summaryrefslogtreecommitdiffstats
path: root/src/spdk/scripts
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/scripts
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/scripts')
-rw-r--r--src/spdk/scripts/ceph/ceph.conf64
-rwxr-xr-xsrc/spdk/scripts/ceph/start.sh139
-rwxr-xr-xsrc/spdk/scripts/ceph/stop.sh13
-rwxr-xr-xsrc/spdk/scripts/check_format.sh500
-rw-r--r--src/spdk/scripts/common.sh239
-rwxr-xr-xsrc/spdk/scripts/config_converter.py727
-rwxr-xr-xsrc/spdk/scripts/detect_cc.sh186
-rwxr-xr-xsrc/spdk/scripts/dpdk_mem_info.py406
-rwxr-xr-xsrc/spdk/scripts/eofnl38
-rwxr-xr-xsrc/spdk/scripts/fio.py164
-rw-r--r--src/spdk/scripts/gdb_macros.py289
-rwxr-xr-xsrc/spdk/scripts/gen_ftl.sh62
-rwxr-xr-xsrc/spdk/scripts/gen_nvme.sh43
-rwxr-xr-xsrc/spdk/scripts/genconfig.py46
-rwxr-xr-xsrc/spdk/scripts/histogram.py44
-rwxr-xr-xsrc/spdk/scripts/iostat.py356
-rw-r--r--src/spdk/scripts/perf/nvme/README12
-rw-r--r--src/spdk/scripts/perf/nvme/fio_test.conf20
-rwxr-xr-xsrc/spdk/scripts/perf/nvme/run_fio_test.py166
-rwxr-xr-xsrc/spdk/scripts/perf/nvme/run_fio_test.sh19
-rw-r--r--src/spdk/scripts/perf/nvmf/README.md159
-rw-r--r--src/spdk/scripts/perf/nvmf/common.py42
-rw-r--r--src/spdk/scripts/perf/nvmf/config.json37
-rwxr-xr-xsrc/spdk/scripts/perf/nvmf/run_nvmf.py941
-rw-r--r--src/spdk/scripts/perf/vhost/fio_test.conf20
-rw-r--r--src/spdk/scripts/perf/vhost/run_vhost_test.py219
-rwxr-xr-xsrc/spdk/scripts/pkgdep.sh160
-rwxr-xr-xsrc/spdk/scripts/pkgdep/arch.sh77
l---------src/spdk/scripts/pkgdep/centos.sh1
-rwxr-xr-xsrc/spdk/scripts/pkgdep/clear-linux-os.sh33
-rwxr-xr-xsrc/spdk/scripts/pkgdep/debian.sh58
l---------src/spdk/scripts/pkgdep/fedora.sh1
-rwxr-xr-xsrc/spdk/scripts/pkgdep/freebsd.sh17
-rwxr-xr-xsrc/spdk/scripts/pkgdep/rhel.sh73
-rwxr-xr-xsrc/spdk/scripts/pkgdep/sles.sh34
l---------src/spdk/scripts/pkgdep/ubuntu.sh1
-rw-r--r--src/spdk/scripts/posix.txt82
-rwxr-xr-xsrc/spdk/scripts/prep_benchmarks.sh73
-rwxr-xr-xsrc/spdk/scripts/qat_setup.sh96
-rwxr-xr-xsrc/spdk/scripts/rpc.py2507
-rw-r--r--src/spdk/scripts/rpc/__init__.py201
-rw-r--r--src/spdk/scripts/rpc/app.py78
-rw-r--r--src/spdk/scripts/rpc/bdev.py1105
-rw-r--r--src/spdk/scripts/rpc/blobfs.py57
-rw-r--r--src/spdk/scripts/rpc/client.py183
-rw-r--r--src/spdk/scripts/rpc/env_dpdk.py8
-rw-r--r--src/spdk/scripts/rpc/helpers.py16
-rw-r--r--src/spdk/scripts/rpc/idxd.py8
-rw-r--r--src/spdk/scripts/rpc/ioat.py17
-rw-r--r--src/spdk/scripts/rpc/iscsi.py558
-rw-r--r--src/spdk/scripts/rpc/log.py75
-rw-r--r--src/spdk/scripts/rpc/lvol.py228
-rw-r--r--src/spdk/scripts/rpc/nbd.py25
-rw-r--r--src/spdk/scripts/rpc/net.py35
-rw-r--r--src/spdk/scripts/rpc/notify.py30
-rw-r--r--src/spdk/scripts/rpc/nvme.py87
-rw-r--r--src/spdk/scripts/rpc/nvmf.py483
-rw-r--r--src/spdk/scripts/rpc/pmem.py35
-rw-r--r--src/spdk/scripts/rpc/sock.py41
-rw-r--r--src/spdk/scripts/rpc/subsystem.py12
-rw-r--r--src/spdk/scripts/rpc/trace.py33
-rw-r--r--src/spdk/scripts/rpc/vhost.py190
-rw-r--r--src/spdk/scripts/rpc/vmd.py3
-rwxr-xr-xsrc/spdk/scripts/rpc_http_proxy.py124
-rwxr-xr-xsrc/spdk/scripts/rxe_cfg_small.sh265
-rwxr-xr-xsrc/spdk/scripts/setup.sh885
-rwxr-xr-xsrc/spdk/scripts/spdkcli.py84
-rw-r--r--src/spdk/scripts/spdkcli/__init__.py1
-rw-r--r--src/spdk/scripts/spdkcli/ui_node.py861
-rw-r--r--src/spdk/scripts/spdkcli/ui_node_iscsi.py639
-rw-r--r--src/spdk/scripts/spdkcli/ui_node_nvmf.py363
-rw-r--r--src/spdk/scripts/spdkcli/ui_root.py560
-rw-r--r--src/spdk/scripts/vagrant/README.md237
-rw-r--r--src/spdk/scripts/vagrant/Vagrantfile291
-rw-r--r--src/spdk/scripts/vagrant/Vagrantfile_openstack_vm82
-rw-r--r--src/spdk/scripts/vagrant/Vagrantfile_vhost_vm138
-rw-r--r--src/spdk/scripts/vagrant/autorun-spdk.conf31
-rwxr-xr-xsrc/spdk/scripts/vagrant/create_nvme_img.sh77
-rwxr-xr-xsrc/spdk/scripts/vagrant/create_openstack_vm.sh33
-rwxr-xr-xsrc/spdk/scripts/vagrant/create_vbox.sh334
-rwxr-xr-xsrc/spdk/scripts/vagrant/create_vhost_vm.sh132
-rw-r--r--src/spdk/scripts/vagrant/local.conf51
-rwxr-xr-xsrc/spdk/scripts/vagrant/run-autorun.sh245
-rwxr-xr-xsrc/spdk/scripts/vagrant/update.sh98
84 files changed, 17203 insertions, 0 deletions
diff --git a/src/spdk/scripts/ceph/ceph.conf b/src/spdk/scripts/ceph/ceph.conf
new file mode 100644
index 000000000..9cf918e5d
--- /dev/null
+++ b/src/spdk/scripts/ceph/ceph.conf
@@ -0,0 +1,64 @@
+[global]
+ debug_lockdep = 0/0
+ debug_context = 0/0
+ debug_crush = 0/0
+ debug_buffer = 0/0
+ debug_timer = 0/0
+ debug_filer = 0/0
+ debug_objecter = 0/0
+ debug_rados = 0/0
+ debug_rbd = 0/0
+ debug_ms = 0/0
+ debug_monc = 0/0
+ debug_tp = 0/0
+ debug_auth = 0/0
+ debug_finisher = 0/0
+ debug_heartbeatmap = 0/0
+ debug_perfcounter = 0/0
+ debug_asok = 0/0
+ debug_throttle = 0/0
+ debug_mon = 0/0
+ debug_paxos = 0/0
+ debug_rgw = 0/0
+
+ perf = true
+ mutex_perf_counter = false
+ throttler_perf_counter = false
+ rbd cache = false
+ mon_allow_pool_delete = true
+
+ osd_pool_default_size = 1
+
+[mon]
+ mon_max_pool_pg_num=166496
+ mon_osd_max_split_count = 10000
+ mon_pg_warn_max_per_osd = 10000
+
+[osd]
+ osd_op_threads = 64
+ filestore_queue_max_ops=5000
+ filestore_queue_committing_max_ops=5000
+ journal_max_write_entries=1000
+ journal_queue_max_ops=3000
+ objecter_inflight_ops=102400
+ filestore_wbthrottle_enable=false
+ filestore_queue_max_bytes=1048576000
+ filestore_queue_committing_max_bytes=1048576000
+ journal_max_write_bytes=1048576000
+ journal_queue_max_bytes=1048576000
+ ms_dispatch_throttle_bytes=1048576000
+ objecter_infilght_op_bytes=1048576000
+ filestore_max_sync_interval=10
+ osd_client_message_size_cap = 0
+ osd_client_message_cap = 0
+ osd_enable_op_tracker = false
+ filestore_fd_cache_size = 10240
+ filestore_fd_cache_shards = 64
+ filestore_op_threads = 16
+ osd_op_num_shards = 48
+ osd_op_num_threads_per_shard = 2
+ osd_pg_object_context_cache_count = 10240
+ filestore_odsync_write = True
+ journal_dynamic_throttle = True
+
+[osd.0]
diff --git a/src/spdk/scripts/ceph/start.sh b/src/spdk/scripts/ceph/start.sh
new file mode 100755
index 000000000..edff469f3
--- /dev/null
+++ b/src/spdk/scripts/ceph/start.sh
@@ -0,0 +1,139 @@
+#!/usr/bin/env bash
+# create mon
+
+set -x
+set -e
+
+script_dir=$(readlink -f $(dirname $0))
+
+base_dir=/var/tmp/ceph
+mon_ip=$1
+mon_dir=${base_dir}/mon.a
+pid_dir=${base_dir}/pid
+ceph_conf=${base_dir}/ceph.conf
+mnt_dir=${base_dir}/mnt
+image=/var/tmp/ceph_raw.img
+dev=/dev/loop200
+
+umount ${dev}p2 || true
+losetup -d $dev || true
+
+# partition osd
+if [ -d $base_dir ]; then
+ rm -rf $base_dir
+fi
+mkdir ${base_dir}
+cp ${script_dir}/ceph.conf $ceph_conf
+
+if [ ! -e $image ]; then
+ fallocate -l 4G $image
+fi
+
+mknod ${dev} b 7 200 || true
+losetup ${dev} ${image} || true
+
+PARTED="parted -s"
+SGDISK="sgdisk"
+
+echo "Partitioning ${dev}"
+${PARTED} ${dev} mktable gpt
+sleep 2
+
+${PARTED} ${dev} mkpart primary 0% 2GiB
+${PARTED} ${dev} mkpart primary 2GiB 100%
+
+partno=0
+echo "Setting name on ${dev}"
+${SGDISK} -c 1:osd-device-${partno}-journal ${dev}
+${SGDISK} -c 2:osd-device-${partno}-data ${dev}
+kpartx ${dev}
+
+# later versions of ceph-12 have a lot of changes, to compatible with the new
+# version of ceph-deploy.
+ceph_version=$(ceph -v | awk '{print $3}')
+ceph_maj=${ceph_version%%.*}
+if [ $ceph_maj -gt 12 ]; then
+ update_config=true
+ rm -f /var/log/ceph/ceph-mon.a.log || true
+ set_min_mon_release="--set-min-mon-release 14"
+ ceph_osd_extra_config="--check-needs-journal --no-mon-config"
+else
+ update_config=false
+ set_min_mon_release=""
+ ceph_osd_extra_config=""
+fi
+
+# prep osds
+
+mnt_pt=${mnt_dir}/osd-device-0-data
+mkdir -p ${mnt_pt}
+mkfs.xfs -f /dev/disk/by-partlabel/osd-device-0-data
+mount /dev/disk/by-partlabel/osd-device-0-data ${mnt_pt}
+cat << EOL >> $ceph_conf
+osd data = ${mnt_pt}
+osd journal = /dev/disk/by-partlabel/osd-device-0-journal
+
+# add mon address
+[mon.a]
+mon addr = ${mon_ip}:12046
+EOL
+
+# create mon
+rm -rf "${mon_dir:?}/"*
+mkdir -p ${mon_dir}
+mkdir -p ${pid_dir}
+rm -f /etc/ceph/ceph.client.admin.keyring
+
+ceph-authtool --create-keyring --gen-key --name=mon. ${base_dir}/keyring --cap mon 'allow *'
+ceph-authtool --gen-key --name=client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *' ${base_dir}/keyring
+
+monmaptool --create --clobber --add a ${mon_ip}:12046 --print ${base_dir}/monmap $set_min_mon_release
+
+sh -c "ulimit -c unlimited && exec ceph-mon --mkfs -c ${ceph_conf} -i a --monmap=${base_dir}/monmap --keyring=${base_dir}/keyring --mon-data=${mon_dir}"
+
+if [ $update_config = true ]; then
+ sed -i 's/mon addr = /mon addr = v2:/g' $ceph_conf
+fi
+
+cp ${base_dir}/keyring ${mon_dir}/keyring
+
+cp $ceph_conf /etc/ceph/ceph.conf
+
+cp ${base_dir}/keyring /etc/ceph/keyring
+cp ${base_dir}/keyring /etc/ceph/ceph.client.admin.keyring
+chmod a+r /etc/ceph/ceph.client.admin.keyring
+
+ceph-run sh -c "ulimit -n 16384 && ulimit -c unlimited && exec ceph-mon -c ${ceph_conf} -i a --keyring=${base_dir}/keyring --pid-file=${base_dir}/pid/root@$(hostname).pid --mon-data=${mon_dir}" || true
+
+# after ceph-mon creation, ceph -s should work.
+if [ $update_config = true ]; then
+ # start to get whole log.
+ ceph-conf --name mon.a --show-config-value log_file
+
+ # add fsid to ceph config file.
+ fsid=$(ceph -s | grep id | awk '{print $2}')
+ sed -i 's/perf = true/perf = true\n\tfsid = '$fsid' \n/g' $ceph_conf
+
+ # unify the filesystem with the old versions.
+ sed -i 's/perf = true/perf = true\n\tosd objectstore = filestore\n/g' $ceph_conf
+ cat ${ceph_conf}
+fi
+
+# create osd
+
+i=0
+
+mkdir -p ${mnt_dir}
+
+uuid=$(uuidgen)
+ceph -c ${ceph_conf} osd create ${uuid} $i
+ceph-osd -c ${ceph_conf} -i $i --mkfs --mkkey --osd-uuid ${uuid} ${ceph_osd_extra_config}
+ceph -c ${ceph_conf} osd crush add osd.${i} 1.0 host=$(hostname) root=default
+ceph -c ${ceph_conf} -i ${mnt_dir}/osd-device-${i}-data/keyring auth add osd.${i} osd "allow *" mon "allow profile osd" mgr "allow *"
+
+# start osd
+pkill -9 ceph-osd || true
+sleep 2
+
+mkdir -p ${pid_dir}
+env -i TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728 ceph-osd -c ${ceph_conf} -i 0 --pid-file=${pid_dir}/ceph-osd.0.pid
diff --git a/src/spdk/scripts/ceph/stop.sh b/src/spdk/scripts/ceph/stop.sh
new file mode 100755
index 000000000..40bcf8a7e
--- /dev/null
+++ b/src/spdk/scripts/ceph/stop.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+set -x
+
+base_dir=/var/tmp/ceph
+image=${base_dir}/ceph_raw.img
+dev=/dev/loop200
+
+pkill -9 ceph
+sleep 3
+umount ${dev}p2
+losetup -d $dev
+rm -rf $base_dir
diff --git a/src/spdk/scripts/check_format.sh b/src/spdk/scripts/check_format.sh
new file mode 100755
index 000000000..f4df5c41b
--- /dev/null
+++ b/src/spdk/scripts/check_format.sh
@@ -0,0 +1,500 @@
+#!/usr/bin/env bash
+
+readonly BASEDIR=$(readlink -f $(dirname $0))/..
+cd $BASEDIR
+
+# exit on errors
+set -e
+
+if ! hash nproc 2> /dev/null; then
+
+ function nproc() {
+ echo 8
+ }
+
+fi
+
+function version_lt() {
+ [ $(echo -e "$1\n$2" | sort -V | head -1) != "$1" ]
+}
+
+function array_contains_string() {
+ name="$1[@]"
+ array=("${!name}")
+
+ for element in "${array[@]}"; do
+ if [ "$element" = "$2" ]; then
+ return $(true)
+ fi
+ done
+
+ return $(false)
+}
+
+rc=0
+
+echo -n "Checking file permissions..."
+
+while read -r perm _res0 _res1 path; do
+ if [ ! -f "$path" ]; then
+ continue
+ fi
+
+ # Skip symlinks
+ if [[ -L $path ]]; then
+ continue
+ fi
+ fname=$(basename -- "$path")
+
+ case ${fname##*.} in
+ c | h | cpp | cc | cxx | hh | hpp | md | html | js | json | svg | Doxyfile | yml | LICENSE | README | conf | in | Makefile | mk | gitignore | go | txt)
+ # These file types should never be executable
+ if [ "$perm" -eq 100755 ]; then
+ echo "ERROR: $path is marked executable but is a code file."
+ rc=1
+ fi
+ ;;
+ *)
+ shebang=$(head -n 1 $path | cut -c1-3)
+
+ # git only tracks the execute bit, so will only ever return 755 or 644 as the permission.
+ if [ "$perm" -eq 100755 ]; then
+ # If the file has execute permission, it should start with a shebang.
+ if [ "$shebang" != "#!/" ]; then
+ echo "ERROR: $path is marked executable but does not start with a shebang."
+ rc=1
+ fi
+ else
+ # If the file doesnot have execute permissions, it should not start with a shebang.
+ if [ "$shebang" = "#!/" ]; then
+ echo "ERROR: $path is not marked executable but starts with a shebang."
+ rc=1
+ fi
+ fi
+ ;;
+ esac
+
+done <<< "$(git grep -I --name-only --untracked -e . | git ls-files -s)"
+
+if [ $rc -eq 0 ]; then
+ echo " OK"
+fi
+
+if hash astyle; then
+ echo -n "Checking coding style..."
+ if [ "$(astyle -V)" \< "Artistic Style Version 3" ]; then
+ echo -n " Your astyle version is too old so skipping coding style checks. Please update astyle to at least 3.0.1 version..."
+ else
+ rm -f astyle.log
+ touch astyle.log
+ # Exclude rte_vhost code imported from DPDK - we want to keep the original code
+ # as-is to enable ongoing work to synch with a generic upstream DPDK vhost library,
+ # rather than making diffs more complicated by a lot of changes to follow SPDK
+ # coding standards.
+ git ls-files '*.[ch]' '*.cpp' '*.cc' '*.cxx' '*.hh' '*.hpp' \
+ | grep -v rte_vhost | grep -v cpp_headers \
+ | xargs -P$(nproc) -n10 astyle --options=.astylerc >> astyle.log
+ if grep -q "^Formatted" astyle.log; then
+ echo " errors detected"
+ git diff --ignore-submodules=all
+ sed -i -e 's/ / /g' astyle.log
+ grep --color=auto "^Formatted.*" astyle.log
+ echo "Incorrect code style detected in one or more files."
+ echo "The files have been automatically formatted."
+ echo "Remember to add the files to your commit."
+ rc=1
+ else
+ echo " OK"
+ fi
+ rm -f astyle.log
+ fi
+else
+ echo "You do not have astyle installed so your code style is not being checked!"
+fi
+
+GIT_VERSION=$(git --version | cut -d' ' -f3)
+
+if version_lt "1.9.5" "${GIT_VERSION}"; then
+ # git <1.9.5 doesn't support pathspec magic exclude
+ echo " Your git version is too old to perform all tests. Please update git to at least 1.9.5 version..."
+ exit 0
+fi
+
+echo -n "Checking comment style..."
+
+git grep --line-number -e '\/[*][^ *-]' -- '*.[ch]' > comment.log || true
+git grep --line-number -e '[^ ][*]\/' -- '*.[ch]' ':!lib/rte_vhost*/*' >> comment.log || true
+git grep --line-number -e '^[*]' -- '*.[ch]' >> comment.log || true
+git grep --line-number -e '\s\/\/' -- '*.[ch]' >> comment.log || true
+git grep --line-number -e '^\/\/' -- '*.[ch]' >> comment.log || true
+
+if [ -s comment.log ]; then
+ echo " Incorrect comment formatting detected"
+ cat comment.log
+ rc=1
+else
+ echo " OK"
+fi
+rm -f comment.log
+
+echo -n "Checking for spaces before tabs..."
+git grep --line-number $' \t' -- './*' ':!*.patch' > whitespace.log || true
+if [ -s whitespace.log ]; then
+ echo " Spaces before tabs detected"
+ cat whitespace.log
+ rc=1
+else
+ echo " OK"
+fi
+rm -f whitespace.log
+
+echo -n "Checking trailing whitespace in output strings..."
+
+git grep --line-number -e ' \\n"' -- '*.[ch]' > whitespace.log || true
+
+if [ -s whitespace.log ]; then
+ echo " Incorrect trailing whitespace detected"
+ cat whitespace.log
+ rc=1
+else
+ echo " OK"
+fi
+rm -f whitespace.log
+
+echo -n "Checking for use of forbidden library functions..."
+
+git grep --line-number -w '\(atoi\|atol\|atoll\|strncpy\|strcpy\|strcat\|sprintf\|vsprintf\)' -- './*.c' ':!lib/rte_vhost*/**' > badfunc.log || true
+if [ -s badfunc.log ]; then
+ echo " Forbidden library functions detected"
+ cat badfunc.log
+ rc=1
+else
+ echo " OK"
+fi
+rm -f badfunc.log
+
+echo -n "Checking for use of forbidden CUnit macros..."
+
+git grep --line-number -w 'CU_ASSERT_FATAL' -- 'test/*' ':!test/spdk_cunit.h' > badcunit.log || true
+if [ -s badcunit.log ]; then
+ echo " Forbidden CU_ASSERT_FATAL usage detected - use SPDK_CU_ASSERT_FATAL instead"
+ cat badcunit.log
+ rc=1
+else
+ echo " OK"
+fi
+rm -f badcunit.log
+
+echo -n "Checking blank lines at end of file..."
+
+if ! git grep -I -l -e . -z './*' ':!*.patch' \
+ | xargs -0 -P$(nproc) -n1 scripts/eofnl > eofnl.log; then
+ echo " Incorrect end-of-file formatting detected"
+ cat eofnl.log
+ rc=1
+else
+ echo " OK"
+fi
+rm -f eofnl.log
+
+echo -n "Checking for POSIX includes..."
+git grep -I -i -f scripts/posix.txt -- './*' ':!include/spdk/stdinc.h' ':!include/linux/**' ':!lib/rte_vhost*/**' ':!scripts/posix.txt' ':!*.patch' > scripts/posix.log || true
+if [ -s scripts/posix.log ]; then
+ echo "POSIX includes detected. Please include spdk/stdinc.h instead."
+ cat scripts/posix.log
+ rc=1
+else
+ echo " OK"
+fi
+rm -f scripts/posix.log
+
+echo -n "Checking for proper function naming conventions..."
+# commit_to_compare = HEAD - 1.
+commit_to_compare="$(git log --pretty=oneline --skip=1 -n 1 | awk '{print $1}')"
+failed_naming_conventions=false
+changed_c_libs=()
+declared_symbols=()
+
+# Build an array of all the modified C files.
+mapfile -t changed_c_libs < <(git diff --name-only HEAD $commit_to_compare -- lib/**/*.c module/**/*.c)
+# Matching groups are 1. qualifiers / return type. 2. function name 3. argument list / comments and stuff after that.
+# Capture just the names of newly added (or modified) function definitions.
+mapfile -t declared_symbols < <(git diff -U0 $commit_to_compare HEAD -- include/spdk*/*.h | sed -En 's/(^[+].*)(spdk[a-z,A-Z,0-9,_]*)(\(.*)/\2/p')
+
+for c_file in "${changed_c_libs[@]}"; do
+ lib_map_file="mk/spdk_blank.map"
+ defined_symbols=()
+ removed_symbols=()
+ exported_symbols=()
+ if ls "$(dirname $c_file)"/*.map &> /dev/null; then
+ lib_map_file="$(ls "$(dirname $c_file)"/*.map)"
+ fi
+ # Matching groups are 1. leading +sign. 2, function name 3. argument list / anything after that.
+ # Capture just the names of newly added (or modified) functions that start with "spdk_"
+ mapfile -t defined_symbols < <(git diff -U0 $commit_to_compare HEAD -- $c_file | sed -En 's/(^[+])(spdk[a-z,A-Z,0-9,_]*)(\(.*)/\2/p')
+ # Capture the names of removed symbols to catch edge cases where we just move definitions around.
+ mapfile -t removed_symbols < <(git diff -U0 $commit_to_compare HEAD -- $c_file | sed -En 's/(^[-])(spdk[a-z,A-Z,0-9,_]*)(\(.*)/\2/p')
+ for symbol in "${removed_symbols[@]}"; do
+ defined_symbols=("${defined_symbols[@]/$symbol/}")
+ done
+ # It's possible that we just modified a functions arguments so unfortunately we can't just look at changed lines in this function.
+ # matching groups are 1. All leading whitespace 2. function name. Capture just the symbol name.
+ mapfile -t exported_symbols < <(sed -En 's/(^[[:space:]]*)(spdk[a-z,A-Z,0-9,_]*);/\2/p' < $lib_map_file)
+ for defined_symbol in "${defined_symbols[@]}"; do
+ # if the list of defined symbols is equal to the list of removed symbols, then we are left with a single empty element. skip it.
+ if [ "$defined_symbol" = '' ]; then
+ continue
+ fi
+ not_exported=true
+ not_declared=true
+ if array_contains_string exported_symbols $defined_symbol; then
+ not_exported=false
+ fi
+
+ if array_contains_string declared_symbols $defined_symbol; then
+ not_declared=false
+ fi
+
+ if $not_exported || $not_declared; then
+ if ! $failed_naming_conventions; then
+ echo " found naming convention errors."
+ fi
+ echo "function $defined_symbol starts with spdk_ which is reserved for public API functions."
+ echo "Please add this function to its corresponding map file and a public header or remove the spdk_ prefix."
+ failed_naming_conventions=true
+ rc=1
+ fi
+ done
+done
+
+if ! $failed_naming_conventions; then
+ echo " OK"
+fi
+
+echo -n "Checking #include style..."
+git grep -I -i --line-number "#include <spdk/" -- '*.[ch]' > scripts/includes.log || true
+if [ -s scripts/includes.log ]; then
+ echo "Incorrect #include syntax. #includes of spdk/ files should use quotes."
+ cat scripts/includes.log
+ rc=1
+else
+ echo " OK"
+fi
+rm -f scripts/includes.log
+
+if hash pycodestyle 2> /dev/null; then
+ PEP8=pycodestyle
+elif hash pep8 2> /dev/null; then
+ PEP8=pep8
+fi
+
+if [ -n "${PEP8}" ]; then
+ echo -n "Checking Python style..."
+
+ PEP8_ARGS+=" --max-line-length=140"
+
+ error=0
+ git ls-files '*.py' | xargs -P$(nproc) -n1 $PEP8 $PEP8_ARGS > pep8.log || error=1
+ if [ $error -ne 0 ]; then
+ echo " Python formatting errors detected"
+ cat pep8.log
+ rc=1
+ else
+ echo " OK"
+ fi
+ rm -f pep8.log
+else
+ echo "You do not have pycodestyle or pep8 installed so your Python style is not being checked!"
+fi
+
+# find compatible shfmt binary
+shfmt_bins=$(compgen -c | grep '^shfmt' || true)
+for bin in $shfmt_bins; do
+ if version_lt "$("$bin" --version)" "3.1.0"; then
+ shfmt=$bin
+ break
+ fi
+done
+
+if [ -n "$shfmt" ]; then
+ shfmt_cmdline=() silly_plural=()
+
+ silly_plural[1]="s"
+
+ commits=() sh_files=() sh_files_repo=() sh_files_staged=()
+
+ mapfile -t sh_files_repo < <(git ls-files '*.sh')
+ # Fetch .sh files only from the commits that are targeted for merge
+ while read -r _ commit; do
+ commits+=("$commit")
+ done < <(git cherry -v origin/master)
+
+ mapfile -t sh_files < <(git diff --name-only HEAD origin/master "${sh_files_repo[@]}")
+ # In case of a call from a pre-commit git hook
+ mapfile -t sh_files_staged < <(
+ IFS="|"
+ git diff --cached --name-only "${sh_files_repo[@]}" | grep -v "${sh_files[*]}"
+ )
+
+ if ((${#sh_files[@]})); then
+ printf 'Checking .sh formatting style...'
+
+ if ((${#sh_files_staged[@]})); then
+ sh_files+=("${sh_files_staged[@]}")
+ fi
+
+ shfmt_cmdline+=(-i 0) # indent_style = tab|indent_size = 0
+ shfmt_cmdline+=(-bn) # binary_next_line = true
+ shfmt_cmdline+=(-ci) # switch_case_indent = true
+ shfmt_cmdline+=(-ln bash) # shell_variant = bash (default)
+ shfmt_cmdline+=(-d) # diffOut - print diff of the changes and exit with != 0
+ shfmt_cmdline+=(-sr) # redirect operators will be followed by a space
+
+ diff=${output_dir:-$PWD}/$shfmt.patch
+
+ # Explicitly tell shfmt to not look for .editorconfig. .editorconfig is also not looked up
+ # in case any formatting arguments has been passed on its cmdline.
+ if ! SHFMT_NO_EDITORCONFIG=true "$shfmt" "${shfmt_cmdline[@]}" "${sh_files[@]}" > "$diff"; then
+ # In case shfmt detects an actual syntax error it will write out a proper message on
+ # its stderr, hence the diff file should remain empty.
+ if [[ -s $diff ]]; then
+ diff_out=$(< "$diff")
+ fi
+
+ cat <<- ERROR_SHFMT
+
+ * Errors in style formatting have been detected.
+ ${diff_out:+* Please, review the generated patch at $diff
+
+ # _START_OF_THE_DIFF
+
+ ${diff_out:-ERROR}
+
+ # _END_OF_THE_DIFF
+ }
+
+ ERROR_SHFMT
+ rc=1
+ else
+ rm -f "$diff"
+ printf ' OK\n'
+ fi
+ fi
+else
+ echo "shfmt not detected, Bash style formatting check is skipped"
+fi
+
+if hash shellcheck 2> /dev/null; then
+ echo -n "Checking Bash style..."
+
+ shellcheck_v=$(shellcheck --version | grep -P "version: [0-9\.]+" | cut -d " " -f2)
+
+ # SHCK_EXCLUDE contains a list of all of the spellcheck errors found in SPDK scripts
+ # currently. New errors should only be added to this list if the cost of fixing them
+ # is deemed too high. For more information about the errors, go to:
+ # https://github.com/koalaman/shellcheck/wiki/Checks
+ # Error descriptions can also be found at: https://github.com/koalaman/shellcheck/wiki
+ # SPDK fails some error checks which have been deprecated in later versions of shellcheck.
+ # We will not try to fix these error checks, but instead just leave the error types here
+ # so that we can still run with older versions of shellcheck.
+ SHCK_EXCLUDE="SC1117"
+ # SPDK has decided to not fix violations of these errors.
+ # We are aware about below exclude list and we want this errors to be excluded.
+ # SC1083: This {/} is literal. Check expression (missing ;/\n?) or quote it.
+ # SC1090: Can't follow non-constant source. Use a directive to specify location.
+ # SC1091: Not following: (error message here)
+ # SC2001: See if you can use ${variable//search/replace} instead.
+ # SC2010: Don't use ls | grep. Use a glob or a for loop with a condition to allow non-alphanumeric filenames.
+ # SC2015: Note that A && B || C is not if-then-else. C may run when A is true.
+ # SC2016: Expressions don't expand in single quotes, use double quotes for that.
+ # SC2034: foo appears unused. Verify it or export it.
+ # SC2046: Quote this to prevent word splitting.
+ # SC2086: Double quote to prevent globbing and word splitting.
+ # SC2119: Use foo "$@" if function's $1 should mean script's $1.
+ # SC2120: foo references arguments, but none are ever passed.
+ # SC2148: Add shebang to the top of your script.
+ # SC2153: Possible Misspelling: MYVARIABLE may not be assigned, but MY_VARIABLE is.
+ # SC2154: var is referenced but not assigned.
+ # SC2164: Use cd ... || exit in case cd fails.
+ # SC2174: When used with -p, -m only applies to the deepest directory.
+ # SC2206: Quote to prevent word splitting/globbing,
+ # or split robustly with mapfile or read -a.
+ # SC2207: Prefer mapfile or read -a to split command output (or quote to avoid splitting).
+ # SC2223: This default assignment may cause DoS due to globbing. Quote it.
+ SHCK_EXCLUDE="$SHCK_EXCLUDE,SC1083,SC1090,SC1091,SC2010,SC2015,SC2016,SC2034,SC2046,SC2086,\
+SC2119,SC2120,SC2148,SC2153,SC2154,SC2164,SC2174,SC2001,SC2206,SC2207,SC2223"
+
+ SHCK_FORMAT="diff"
+ SHCK_APPLY=true
+ if [ "$shellcheck_v" \< "0.7.0" ]; then
+ SHCK_FORMAT="tty"
+ SHCK_APPLY=false
+ fi
+ SHCH_ARGS=" -x -e $SHCK_EXCLUDE -f $SHCK_FORMAT"
+
+ error=0
+ git ls-files '*.sh' | xargs -P$(nproc) -n1 shellcheck $SHCH_ARGS &> shellcheck.log || error=1
+ if [ $error -ne 0 ]; then
+ echo " Bash formatting errors detected!"
+
+ # Some errors are not auto-fixable. Fall back to tty output.
+ if grep -q "Use another format to see them." shellcheck.log; then
+ SHCK_FORMAT="tty"
+ SHCK_APPLY=false
+ SHCH_ARGS=" -e $SHCK_EXCLUDE -f $SHCK_FORMAT"
+ git ls-files '*.sh' | xargs -P$(nproc) -n1 shellcheck $SHCH_ARGS > shellcheck.log || error=1
+ fi
+
+ cat shellcheck.log
+ if $SHCK_APPLY; then
+ git apply shellcheck.log
+ echo "Bash errors were automatically corrected."
+ echo "Please remember to add the changes to your commit."
+ fi
+ rc=1
+ else
+ echo " OK"
+ fi
+ rm -f shellcheck.log
+else
+ echo "You do not have shellcheck installed so your Bash style is not being checked!"
+fi
+
+# Check if any of the public interfaces were modified by this patch.
+# Warn the user to consider updating the changelog any changes
+# are detected.
+echo -n "Checking whether CHANGELOG.md should be updated..."
+staged=$(git diff --name-only --cached .)
+working=$(git status -s --porcelain --ignore-submodules | grep -iv "??" | awk '{print $2}')
+files="$staged $working"
+if [[ "$files" = " " ]]; then
+ files=$(git diff-tree --no-commit-id --name-only -r HEAD)
+fi
+
+has_changelog=0
+for f in $files; do
+ if [[ $f == CHANGELOG.md ]]; then
+ # The user has a changelog entry, so exit.
+ has_changelog=1
+ break
+ fi
+done
+
+needs_changelog=0
+if [ $has_changelog -eq 0 ]; then
+ for f in $files; do
+ if [[ $f == include/spdk/* ]] || [[ $f == scripts/rpc.py ]] || [[ $f == etc/* ]]; then
+ echo ""
+ echo -n "$f was modified. Consider updating CHANGELOG.md."
+ needs_changelog=1
+ fi
+ done
+fi
+
+if [ $needs_changelog -eq 0 ]; then
+ echo " OK"
+else
+ echo ""
+fi
+
+exit $rc
diff --git a/src/spdk/scripts/common.sh b/src/spdk/scripts/common.sh
new file mode 100644
index 000000000..a70cfba0d
--- /dev/null
+++ b/src/spdk/scripts/common.sh
@@ -0,0 +1,239 @@
+# Common shell utility functions
+
+# Check if PCI device is on PCI_WHITELIST and not on PCI_BLACKLIST
+# Env:
+# if PCI_WHITELIST is empty assume device is whitelistened
+# if PCI_BLACKLIST is empty assume device is NOT blacklistened
+# Params:
+# $1 - PCI BDF
+function pci_can_use() {
+ local i
+
+ # The '\ ' part is important
+ if [[ " $PCI_BLACKLIST " =~ \ $1\ ]]; then
+ return 1
+ fi
+
+ if [[ -z "$PCI_WHITELIST" ]]; then
+ #no whitelist specified, bind all devices
+ return 0
+ fi
+
+ for i in $PCI_WHITELIST; do
+ if [ "$i" == "$1" ]; then
+ return 0
+ fi
+ done
+
+ return 1
+}
+
+cache_pci_init() {
+ local -gA pci_bus_cache
+ local -gA pci_ids_vendor
+ local -gA pci_ids_device
+
+ [[ -z ${pci_bus_cache[*]} || $CMD == reset ]] || return 1
+
+ pci_bus_cache=()
+ pci_bus_ids_vendor=()
+ pci_bus_ids_device=()
+}
+
+cache_pci() {
+ local pci=$1 class=$2 vendor=$3 device=$4
+
+ if [[ -n $class ]]; then
+ class=0x${class/0x/}
+ pci_bus_cache["$class"]="${pci_bus_cache["$class"]:+${pci_bus_cache["$class"]} }$pci"
+ fi
+ if [[ -n $vendor && -n $device ]]; then
+ vendor=0x${vendor/0x/} device=0x${device/0x/}
+ pci_bus_cache["$vendor"]="${pci_bus_cache["$vendor"]:+${pci_bus_cache["$vendor"]} }$pci"
+ pci_bus_cache["$device"]="${pci_bus_cache["$device"]:+${pci_bus_cache["$device"]} }$pci"
+ pci_bus_cache["$vendor:$device"]="${pci_bus_cache["$vendor:$device"]:+${pci_bus_cache["$vendor:$device"]} }$pci"
+
+ pci_ids_vendor["$pci"]=$vendor
+ pci_ids_device["$pci"]=$device
+ fi
+}
+
+cache_pci_bus_sysfs() {
+ [[ -e /sys/bus/pci/devices ]] || return 1
+
+ cache_pci_init || return 0
+
+ local pci
+ local class vendor device
+
+ for pci in /sys/bus/pci/devices/*; do
+ class=$(< "$pci/class") vendor=$(< "$pci/vendor") device=$(< "$pci/device")
+ cache_pci "${pci##*/}" "$class" "$vendor" "$device"
+ done
+}
+
+cache_pci_bus_lspci() {
+ hash lspci 2> /dev/null || return 1
+
+ cache_pci_init || return 0
+
+ local dev
+ while read -ra dev; do
+ dev=("${dev[@]//\"/}")
+ # lspci splits ls byte of the class (prog. interface) into a separate
+ # field if it's != 0. Look for it and normalize the value to fit with
+ # what kernel exposes under sysfs.
+ if [[ ${dev[*]} =~ -p([0-9]+) ]]; then
+ dev[1]+=${BASH_REMATCH[1]}
+ else
+ dev[1]+=00
+ fi
+ # pci class vendor device
+ cache_pci "${dev[@]::4}"
+ done < <(lspci -Dnmm)
+}
+
+cache_pci_bus_pciconf() {
+ hash pciconf 2> /dev/null || return 1
+
+ cache_pci_init || return 0
+
+ local class vd vendor device
+ local pci domain bus device function
+
+ while read -r pci class _ vd _; do
+ IFS=":" read -r domain bus device function _ <<< "${pci##*pci}"
+ pci=$(printf '%04x:%02x:%02x:%x' \
+ "$domain" "$bus" "$device" "$function")
+ class=$(printf '0x%06x' $((class)))
+ vendor=$(printf '0x%04x' $((vd & 0xffff)))
+ device=$(printf '0x%04x' $(((vd >> 16) & 0xffff)))
+
+ cache_pci "$pci" "$class" "$vendor" "$device"
+ done < <(pciconf -l)
+}
+
+cache_pci_bus() {
+ case "$(uname -s)" in
+ Linux) cache_pci_bus_lspci || cache_pci_bus_sysfs ;;
+ FreeBSD) cache_pci_bus_pciconf ;;
+ esac
+}
+
+iter_all_pci_sysfs() {
+ cache_pci_bus_sysfs || return 1
+
+ # default to class of the nvme devices
+ local find=${1:-0x010802} findx=$2
+ local pci pcis
+
+ [[ -n ${pci_bus_cache["$find"]} ]] || return 0
+ read -ra pcis <<< "${pci_bus_cache["$find"]}"
+
+ if ((findx)); then
+ printf '%s\n' "${pcis[@]::findx}"
+ else
+ printf '%s\n' "${pcis[@]}"
+ fi
+}
+
+# This function will ignore PCI PCI_WHITELIST and PCI_BLACKLIST
+function iter_all_pci_class_code() {
+ local class
+ local subclass
+ local progif
+ class="$(printf %02x $((0x$1)))"
+ subclass="$(printf %02x $((0x$2)))"
+ progif="$(printf %02x $((0x$3)))"
+
+ if hash lspci &> /dev/null; then
+ if [ "$progif" != "00" ]; then
+ lspci -mm -n -D \
+ | grep -i -- "-p${progif}" \
+ | awk -v cc="\"${class}${subclass}\"" -F " " \
+ '{if (cc ~ $2) print $1}' | tr -d '"'
+ else
+ lspci -mm -n -D \
+ | awk -v cc="\"${class}${subclass}\"" -F " " \
+ '{if (cc ~ $2) print $1}' | tr -d '"'
+ fi
+ elif hash pciconf &> /dev/null; then
+ local addr=($(pciconf -l | grep -i "class=0x${class}${subclass}${progif}" \
+ | cut -d$'\t' -f1 | sed -e 's/^[a-zA-Z0-9_]*@pci//g' | tr ':' ' '))
+ printf "%04x:%02x:%02x:%x\n" ${addr[0]} ${addr[1]} ${addr[2]} ${addr[3]}
+ elif iter_all_pci_sysfs "$(printf '0x%06x' $((0x$progif | 0x$subclass << 8 | 0x$class << 16)))"; then
+ :
+ else
+ echo "Missing PCI enumeration utility" >&2
+ exit 1
+ fi
+}
+
+# This function will ignore PCI PCI_WHITELIST and PCI_BLACKLIST
+function iter_all_pci_dev_id() {
+ local ven_id
+ local dev_id
+ ven_id="$(printf %04x $((0x$1)))"
+ dev_id="$(printf %04x $((0x$2)))"
+
+ if hash lspci &> /dev/null; then
+ lspci -mm -n -D | awk -v ven="\"$ven_id\"" -v dev="\"${dev_id}\"" -F " " \
+ '{if (ven ~ $3 && dev ~ $4) print $1}' | tr -d '"'
+ elif hash pciconf &> /dev/null; then
+ local addr=($(pciconf -l | grep -i "chip=0x${dev_id}${ven_id}" \
+ | cut -d$'\t' -f1 | sed -e 's/^[a-zA-Z0-9_]*@pci//g' | tr ':' ' '))
+ printf "%04x:%02x:%02x:%x\n" ${addr[0]} ${addr[1]} ${addr[2]} ${addr[3]}
+ elif iter_all_pci_sysfs "0x$ven_id:0x$dev_id"; then
+ :
+ else
+ echo "Missing PCI enumeration utility" >&2
+ exit 1
+ fi
+}
+
+function iter_pci_dev_id() {
+ local bdf=""
+
+ for bdf in $(iter_all_pci_dev_id "$@"); do
+ if pci_can_use "$bdf"; then
+ echo "$bdf"
+ fi
+ done
+}
+
+# This function will filter out PCI devices using PCI_WHITELIST and PCI_BLACKLIST
+# See function pci_can_use()
+function iter_pci_class_code() {
+ local bdf=""
+
+ for bdf in $(iter_all_pci_class_code "$@"); do
+ if pci_can_use "$bdf"; then
+ echo "$bdf"
+ fi
+ done
+}
+
+function nvme_in_userspace() {
+ # Check used drivers. If it's not vfio-pci or uio-pci-generic
+ # then most likely PCI_WHITELIST option was used for setup.sh
+ # and we do not want to use that disk.
+
+ local bdf bdfs
+ local nvmes
+
+ if [[ -n ${pci_bus_cache["0x010802"]} ]]; then
+ nvmes=(${pci_bus_cache["0x010802"]})
+ else
+ nvmes=($(iter_pci_class_code 01 08 02))
+ fi
+
+ for bdf in "${nvmes[@]}"; do
+ if [[ -e /sys/bus/pci/drivers/nvme/$bdf ]] \
+ || [[ $(uname -s) == FreeBSD && $(pciconf -l "pci$bdf") == nvme* ]]; then
+ continue
+ fi
+ bdfs+=("$bdf")
+ done
+ ((${#bdfs[@]})) || return 1
+ printf '%s\n' "${bdfs[@]}"
+}
diff --git a/src/spdk/scripts/config_converter.py b/src/spdk/scripts/config_converter.py
new file mode 100755
index 000000000..4fa65a9d7
--- /dev/null
+++ b/src/spdk/scripts/config_converter.py
@@ -0,0 +1,727 @@
+#!/usr/bin/env python3
+
+import configparser
+import re
+import sys
+import json
+from collections import OrderedDict
+
+bdev_dict = OrderedDict()
+bdev_dict["bdev_set_options"] = []
+bdev_dict["bdev_split_create"] = []
+bdev_dict["bdev_nvme_set_options"] = []
+bdev_dict["bdev_nvme_attach_controller"] = []
+bdev_dict["bdev_nvme_set_hotplug"] = []
+bdev_dict["bdev_malloc_create"] = []
+bdev_dict["bdev_aio_create"] = []
+bdev_dict["bdev_pmem_create"] = []
+bdev_dict["bdev_virtio_attach_controller"] = []
+
+vhost_dict = OrderedDict()
+vhost_dict["vhost_create_scsi_controller"] = []
+vhost_dict["vhost_create_blk_controller"] = []
+vhost_dict["vhost_create_nvme_controller"] = []
+
+iscsi_dict = OrderedDict()
+iscsi_dict["iscsi_set_options"] = []
+iscsi_dict["iscsi_create_portal_group"] = []
+iscsi_dict["iscsi_create_initiator_group"] = []
+iscsi_dict["iscsi_create_target_node"] = []
+
+nvmf_dict = OrderedDict()
+nvmf_dict["nvmf_set_config"] = []
+nvmf_dict["nvmf_set_max_subsystems"] = []
+nvmf_dict["subsystems"] = []
+
+
+# dictionary with new config that will be written to new json config file
+subsystem = {
+ "accel": None,
+ "interface": None,
+ "net_framework": None,
+ "bdev": bdev_dict,
+ "scsi": [],
+ "nvmf": nvmf_dict,
+ "nbd": [],
+ "vhost": vhost_dict,
+ "iscsi": iscsi_dict
+}
+
+
+class OptionOrderedDict(OrderedDict):
+ def __setitem__(self, option, value):
+ if option in self and isinstance(value, list):
+ self[option].extend(value)
+ return
+ super(OptionOrderedDict, self).__setitem__(option, value)
+
+
+no_yes_map = {"no": False, "No": False, "Yes": True, "yes": True}
+
+
+def generate_new_json_config():
+ json_subsystem = [
+ {'subsystem': "accel", 'config': None},
+ {"subsystem": "interface", "config": None},
+ {"subsystem": "net_framework", "config": None},
+ {"subsystem": "bdev", "config": []},
+ {"subsystem": "scsi", "config": None},
+ {"subsystem": "nvmf", "config": []},
+ {"subsystem": "nbd", "config": []},
+ {"subsystem": "vhost", "config": []},
+ {"subsystem": "iscsi", "config": []}
+ ]
+ for method in subsystem['bdev']:
+ for item in subsystem['bdev'][method]:
+ json_subsystem[3]['config'].append(item)
+ for item in subsystem['scsi']:
+ if json_subsystem[4]['config'] is None:
+ json_subsystem[4]['config'] = []
+ json_subsystem[4]['config'].append(item)
+ for method in subsystem['nvmf']:
+ for item in subsystem['nvmf'][method]:
+ json_subsystem[5]['config'].append(item)
+ for method in subsystem['vhost']:
+ for item in subsystem['vhost'][method]:
+ json_subsystem[7]['config'].append(item)
+ for method in subsystem['iscsi']:
+ for item in subsystem['iscsi'][method]:
+ json_subsystem[8]['config'].append(item)
+
+ return {"subsystems": json_subsystem}
+
+
+section_to_subsystem = {
+ "Bdev": subsystem['bdev'],
+ "AIO": subsystem['bdev'],
+ "Malloc": subsystem['bdev'],
+ "Nvme": subsystem['bdev'],
+ "Pmem": subsystem['bdev'],
+ "Split": subsystem['bdev'],
+ "Nvmf": subsystem['nvmf'],
+ "Subsystem": subsystem['nvmf'],
+ "VhostScsi": subsystem['vhost'],
+ "VhostBlk": subsystem['vhost'],
+ "VhostNvme": subsystem['vhost'],
+ "VirtioUser": subsystem['bdev'],
+ "iSCSI": subsystem['iscsi'],
+ "PortalGroup": subsystem['iscsi'],
+ "InitiatorGroup": subsystem['iscsi'],
+ "TargetNode": subsystem['iscsi']
+}
+
+
+def set_param(params, cfg_name, value):
+ for param in params:
+ if param[0] != cfg_name:
+ continue
+ if param[1] == "disable_chap":
+ param[3] = True if value == "None" else False
+ elif param[1] == "require_chap":
+ param[3] = True if value in ["CHAP", "Mutual"] else False
+ elif param[1] == "mutual_chap":
+ param[3] = True if value == "Mutual" else False
+ elif param[1] == "chap_group":
+ param[3] = int(value.replace("AuthGroup", ""))
+ elif param[2] == bool:
+ param[3] = True if value in ("yes", "true", "Yes") else False
+ elif param[2] == "hex":
+ param[3] = str(int(value, 16))
+ elif param[2] == int:
+ param[3] = int(value)
+ elif param[2] == list:
+ param[3].append(value)
+ elif param[2] == "dev_type":
+ if value.lower() == "blk":
+ param[3] = "blk"
+ else:
+ param[3] = param[2](value.replace("\"", ""))
+
+
+def to_json_params(params):
+ out = {}
+ for param in params:
+ if param[3] is not None:
+ out[param[1]] = param[3]
+ return out
+
+
+def get_bdev_options_json(config, section):
+ params = [
+ ["BdevIoPoolSize", "bdev_io_pool_size", int, 65536],
+ ["BdevIoCacheSize", "bdev_io_cache_size", int, 256]
+ ]
+ for option in config.options("Bdev"):
+ set_param(params, option, config.get("Bdev", option))
+
+ return [{"params": to_json_params(params), "method": "bdev_set_options"}]
+
+
+def get_aio_bdev_json(config, section):
+ aio_json = []
+ value = None
+ for option in config.options("AIO"):
+ if option == "AIO":
+ value = config.get("AIO", option).split("\n")
+ if value is None:
+ return aio_json
+ for item in value:
+ items = re.findall(r"\S+", item)
+ params = {}
+ params['filename'] = items[0]
+ params['name'] = items[1]
+ if len(items) == 3:
+ params['block_size'] = int(items[2])
+ aio_json.append({
+ "params": params,
+ "method": "bdev_aio_create"
+ })
+
+ return aio_json
+
+
+def get_malloc_bdev_json(config, section):
+ malloc_json = []
+ params = [
+ ['NumberOfLuns', '', int, -1],
+ ['LunSizeInMB', '', int, 20],
+ ['BlockSize', '', int, 512]
+ ]
+ for option in config.options("Malloc"):
+ set_param(params, option, config.get("Malloc", option))
+ for lun in range(0, params[0][3]):
+ malloc_json.append({
+ "params": {
+ "block_size": params[2][3],
+ "num_blocks": params[1][3] * 1024 * 1024 / params[2][3],
+ "name": "Malloc%s" % lun
+ },
+ "method": "bdev_malloc_create"
+ })
+
+ return malloc_json
+
+
+def get_nvme_bdev_json(config, section):
+ params = [
+ ["RetryCount", "retry_count", int, 4],
+ ["TimeoutuSec", "timeout_us", int, 0],
+ ["AdminPollRate", "nvme_adminq_poll_period_us", int, 1000000],
+ ["ActionOnTimeout", "action_on_timeout", str, "none"],
+ ["IOPollRate", "nvme_ioq_poll_period_us", int, 0],
+ ["HotplugEnable", "enable", bool, False],
+ ["AdminPollRate", "period_us", int, 1000]
+ ]
+ nvme_json = []
+ for option in config.options("Nvme"):
+ value = config.get("Nvme", option)
+ if "TransportID" == option:
+ entry = re.findall(r"\S+", value)
+ nvme_name = entry[-1]
+ trtype = re.findall(r"trtype:\S+", value)
+ if trtype:
+ trtype = trtype[0].replace("trtype:", "").replace("\"", "")
+ traddr = re.findall(r"traddr:\S+", value)
+ if traddr:
+ traddr = traddr[0].replace("traddr:", "").replace("\"", "")
+ nvme_json.append({
+ "params": {
+ "trtype": trtype,
+ "name": nvme_name,
+ "traddr": traddr
+ },
+ "method": "bdev_nvme_attach_controller"
+ })
+ else:
+ set_param(params, option, value)
+ params[3][3] = params[3][3].lower()
+ params[6][3] = params[6][3] * 100
+ nvme_json.append({
+ "params": to_json_params(params[5:7]),
+ "method": "bdev_nvme_set_hotplug"
+ })
+ nvme_json.append({
+ "params": to_json_params(params[0:5]),
+ "method": "bdev_nvme_set_options"
+ })
+ return nvme_json
+
+
+def get_pmem_bdev_json(config, section):
+ pmem_json = []
+ for option in config.options(section):
+ if "Blk" == option:
+ for value in config.get(section, option).split("\n"):
+ items = re.findall(r"\S+", value)
+ pmem_json.append({
+ "params": {
+ "name": items[1],
+ "pmem_file": items[0]
+ },
+ "method": "bdev_pmem_create"
+ })
+
+ return pmem_json
+
+
+def get_split_bdev_json(config, section):
+ split_json = []
+ value = []
+ for option in config.options("Split"):
+ if "Split" == option:
+ value = config.get("Split", option)
+ if value and not isinstance(value, list):
+ value = [value]
+ for split in value:
+ items = re.findall(r"\S+", split)
+ split_size_mb = 0
+ base_bdev = items[0]
+ split_count = int(items[1])
+ if len(items) == 3:
+ split_size_mb = items[2]
+ split_json.append({
+ "params": {
+ "base_bdev": base_bdev,
+ "split_size_mb": split_size_mb,
+ "split_count": split_count
+ },
+ "method": "bdev_split_create"
+ })
+
+ return split_json
+
+
+def get_nvmf_options_json(config, section):
+ params = [
+ ["AcceptorPollRate", "acceptor_poll_rate", int, 10000],
+ ["MaxSubsystems", "max_subsystems", int, 1024]
+ ]
+ for option in config.options("Nvmf"):
+ set_param(params, option, config.get("Nvmf", option))
+ nvmf_json = []
+ nvmf_json.append({
+ "params": to_json_params([params[0]]),
+ "method": "nvmf_set_config"
+ })
+ nvmf_json.append({
+ "params": to_json_params(params[1:7]),
+ "method": "nvmf_set_max_subsystems"
+ })
+
+ return nvmf_json
+
+
+def get_nvmf_subsystem_json(config, section):
+ nvmf_subsystem_methods = []
+ params = [
+ # Last items are default values if given entry is not set
+ ["Host", "hosts", list, []],
+ ["NQN", "nqn", str, ""],
+ ["AllowAnyHost", "allow_any_host", bool, False],
+ ["SN", "serial_number", str, "00000000000000000000"],
+ ["MN", "model_number", str, "SPDK bdev Controller"],
+ ["MaxNamespaces", "max_namespaces", str, ""],
+ ]
+ listen_address = []
+ namespaces = []
+ nsid = 0
+ searched_items = [param[0] for param in params]
+ for option in config.options(section):
+ value = config.get(section, option)
+ if option in searched_items:
+ set_param(params, option, value)
+ continue
+ if "Listen" == option:
+ items = re.findall(r"\S+", value)
+ adrfam = "IPv4"
+ if len(items[1].split(":")) > 2:
+ adrfam = "IPv6"
+ listen_address.append({
+ "trtype": items[0],
+ "adrfam": adrfam,
+ "trsvcid": items[1].rsplit(":", 1)[-1],
+ "traddr": items[1].rsplit(":", 1)[0].replace(
+ "]", "").replace("[", "")
+ })
+ if "Namespace" == option:
+ for item in value.split("\n"):
+ items = re.findall(r"\S+", item)
+ if len(items) == 2:
+ nsid = items[1]
+ else:
+ nsid += 1
+ namespaces.append({
+ "nsid": int(nsid),
+ "bdev_name": items[0],
+ })
+ # Get parameters: nqn, allow_any_host, serial_number
+ # for nvmf_create_subsystem rpc method
+ parameters = to_json_params(params[1:5])
+ nvmf_subsystem_methods.append({
+ "params": parameters,
+ "method": "nvmf_create_subsystem"
+ })
+ for listen in listen_address:
+ nvmf_subsystem_methods.append({
+ "params": {
+ "listen_address": listen,
+ "nqn": parameters['nqn']
+ },
+ "method": "nvmf_subsystem_add_listener"
+ })
+ for host in to_json_params([params[0]])['hosts']:
+ nvmf_subsystem_methods.append({
+ "params": {
+ "host": host,
+ "nqn": parameters['nqn']
+ },
+ "method": "nvmf_subsystem_add_host"
+ })
+ for namespace in namespaces:
+ nvmf_subsystem_methods.append({
+ "params": {
+ "namespace": namespace,
+ "nqn": parameters['nqn']
+ },
+ "method": "nvmf_subsystem_add_ns"
+ })
+
+ # Define max_namespaces if it is set in old config
+ if params[5][3]:
+ nvmf_subsystem_methods[0]['params']['max_namespaces'] = int(params[5][3])
+
+ return nvmf_subsystem_methods
+
+
+def get_vhost_scsi_json(config, section):
+ params = [
+ ["Name", "ctrlr", str, None],
+ ["Cpumask", "cpumask", "hex", "1"],
+ ]
+ targets = []
+ vhost_scsi_json = []
+ for option in config.options(section):
+ value = config.get(section, option)
+ if option in ["Name", "Cpumask"]:
+ set_param(params, option, value)
+ if "Target" == option:
+ for item in value.split("\n"):
+ items = re.findall(r"\S+", item)
+ targets.append({
+ "scsi_target_num": int(items[0]),
+ "ctrlr": params[0][3],
+ "bdev_name": items[1]
+ })
+ vhost_scsi_json.append({
+ "params": to_json_params(params),
+ "method": "vhost_create_scsi_controller"
+ })
+ for target in targets:
+ vhost_scsi_json.append({
+ "params": target,
+ "method": "vhost_scsi_controller_add_target"
+ })
+
+ return vhost_scsi_json
+
+
+def get_vhost_blk_json(config, section):
+ params = [
+ ["ReadOnly", "readonly", bool, False],
+ ["Dev", "dev_name", str, ""],
+ ["Name", "ctrlr", str, ""],
+ ["Cpumask", "cpumask", "hex", ""]
+ ]
+ for option in config.options(section):
+ set_param(params, option, config.get(section, option))
+ return [{"method": "vhost_create_blk_controller",
+ "params": to_json_params(params)}]
+
+
+def get_vhost_nvme_json(config, section):
+ params = [
+ ["Name", "ctrlr", str, ""],
+ ["NumberOfQueues", "io_queues", int, -1],
+ ["Cpumask", "cpumask", "hex", 0x1],
+ ["Namespace", "bdev_name", list, []]
+ ]
+ for option in config.options(section):
+ values = config.get(section, option).split("\n")
+ for value in values:
+ set_param(params, option, value)
+ vhost_nvme_json = []
+ vhost_nvme_json.append({
+ "params": to_json_params(params[:3]),
+ "method": "vhost_create_nvme_controller"
+ })
+ for namespace in params[3][3]:
+ vhost_nvme_json.append({
+ "params": {
+ "ctrlr": params[0][3],
+ "bdev_name": namespace,
+ },
+ "method": "vhost_nvme_controller_add_ns"
+ })
+
+ return vhost_nvme_json
+
+
+def get_virtio_user_json(config, section):
+ params = [
+ ["Path", "traddr", str, ""],
+ ["Queues", "vq_count", int, 1],
+ ["Type", "dev_type", "dev_type", "scsi"],
+ ["Name", "name", str, section],
+ # Define parameters with default values.
+ # These params are set by rpc commands and
+ # do not occur in ini config file.
+ # But they are visible in json config file
+ # with default values even if not set by rpc.
+ [None, "trtype", str, "user"],
+ [None, "vq_size", int, 512]
+ ]
+ for option in config.options(section):
+ set_param(params, option, config.get(section, option))
+ dev_name = "Scsi"
+ if params[2][3] == "blk":
+ dev_name = "Blk"
+ params[3][3] = params[3][3].replace("User", dev_name)
+
+ return [{
+ "params": to_json_params(params),
+ "method": "bdev_virtio_attach_controller"
+ }]
+
+
+def get_iscsi_options_json(config, section):
+ params = [
+ ['AllowDuplicateIsid', 'allow_duplicated_isid', bool, False],
+ ['DefaultTime2Retain', 'default_time2retain', int, 20],
+ ['DiscoveryAuthMethod', 'mutual_chap', bool, False],
+ ['MaxConnectionsPerSession', 'max_connections_per_session', int, 2],
+ ['Timeout', 'nop_timeout', int, 60],
+ ['DiscoveryAuthMethod', 'disable_chap', bool, False],
+ ['DiscoveryAuthMethod', 'require_chap', bool, False],
+ ['NodeBase', 'node_base', str, "iqn.2016-06.io.spdk"],
+ ['AuthFile', 'auth_file', str, None],
+ ['DiscoveryAuthGroup', 'chap_group', int, 0],
+ ['MaxSessions', 'max_sessions', int, 128],
+ ['ImmediateData', 'immediate_data', bool, True],
+ ['ErrorRecoveryLevel', 'error_recovery_level', int, 0],
+ ['NopInInterval', 'nop_in_interval', int, 30],
+ ['DefaultTime2Wait', 'default_time2wait', int, 2],
+ ['QueueDepth', 'max_queue_depth', int, 64],
+ ['', 'first_burst_length', int, 8192]
+ ]
+ for option in config.options(section):
+ set_param(params, option, config.get(section, option))
+ return [{"method": "iscsi_set_options", "params": to_json_params(params)}]
+
+
+def get_iscsi_portal_group_json(config, name):
+ portal_group_json = []
+ portals = []
+ for option in config.options(name):
+ if "Portal" == option:
+ for value in config.get(name, option).split("\n"):
+ items = re.findall(r"\S+", value)
+ portal = {'host': items[1].rsplit(":", 1)[0]}
+ if "@" in items[1]:
+ portal['port'] =\
+ items[1].rsplit(":", 1)[1].split("@")[0]
+ else:
+ portal['port'] = items[1].rsplit(":", 1)[1]
+ portals.append(portal)
+
+ portal_group_json.append({
+ "params": {
+ "portals": portals,
+ "tag": int(re.findall(r'\d+', name)[0])
+ },
+ "method": "iscsi_create_portal_group"
+ })
+
+ return portal_group_json
+
+
+def get_iscsi_initiator_group_json(config, name):
+ initiators = []
+ netmasks = []
+
+ for option in config.options(name):
+ if "InitiatorName" == option:
+ initiators.append(config.get(name, option))
+ if "Netmask" == option:
+ netmasks.append(config.get(name, option))
+ initiator_group_json = {
+ "params": {
+ "initiators": initiators,
+ "tag": int(re.findall(r'\d+', name)[0]),
+ "netmasks": netmasks
+ },
+ "method": "iscsi_create_initiator_group"
+ }
+
+ return [initiator_group_json]
+
+
+def get_iscsi_target_node_json(config, section):
+ luns = []
+ mutual_chap = False
+ name = ""
+ alias_name = ""
+ require_chap = False
+ chap_group = 1
+ pg_ig_maps = []
+ data_digest = False
+ disable_chap = False
+ header_digest = False
+ queue_depth = 64
+
+ for option in config.options(section):
+ value = config.get(section, option)
+ if "TargetName" == option:
+ name = value
+ if "TargetAlias" == option:
+ alias_name = value.replace("\"", "")
+ if "Mapping" == option:
+ items = re.findall(r"\S+", value)
+ pg_ig_maps.append({
+ "ig_tag": int(re.findall(r'\d+', items[1])[0]),
+ "pg_tag": int(re.findall(r'\d+', items[0])[0])
+ })
+ if "AuthMethod" == option:
+ items = re.findall(r"\S+", value)
+ for item in items:
+ if "CHAP" == item:
+ require_chap = True
+ elif "Mutual" == item:
+ mutual_chap = True
+ elif "Auto" == item:
+ disable_chap = False
+ require_chap = False
+ mutual_chap = False
+ elif "None" == item:
+ disable_chap = True
+ require_chap = False
+ mutual_chap = False
+ if "AuthGroup" == option: # AuthGroup1
+ items = re.findall(r"\S+", value)
+ chap_group = int(re.findall(r'\d+', items[0])[0])
+ if "UseDigest" == option:
+ items = re.findall(r"\S+", value)
+ for item in items:
+ if "Header" == item:
+ header_digest = True
+ elif "Data" == item:
+ data_digest = True
+ elif "Auto" == item:
+ header_digest = False
+ data_digest = False
+
+ if re.match(r"LUN\d+", option):
+ luns.append({"lun_id": len(luns),
+ "bdev_name": value})
+ if "QueueDepth" == option:
+ queue_depth = int(value)
+
+ params = {"alias_name": alias_name}
+ params["name"] = "iqn.2016-06.io.spdk:%s" % name
+ params["luns"] = luns
+ params["pg_ig_maps"] = pg_ig_maps
+ params["queue_depth"] = queue_depth
+ params["chap_group"] = chap_group
+ params["header_digest"] = header_digest
+ params["mutual_chap"] = mutual_chap
+ params["require_chap"] = require_chap
+ params["data_digest"] = data_digest
+ params["disable_chap"] = disable_chap
+
+ target_json = {
+ "params": params,
+ "method": "iscsi_create_target_node"
+ }
+
+ return [target_json]
+
+
+if __name__ == "__main__":
+ try:
+ config = configparser.ConfigParser(strict=False, delimiters=(' '),
+ dict_type=OptionOrderedDict,
+ allow_no_value=True)
+ # Do not parse options and values. Capital letters are relevant.
+ config.optionxform = str
+ config.read_file(sys.stdin)
+ except Exception as e:
+ print("Exception while parsing config: %s" % e)
+ exit(1)
+ # Add missing sections to generate default configuration
+ for section in ['Nvme', 'Nvmf', 'Bdev', 'iSCSI']:
+ if section not in config.sections():
+ config.add_section(section)
+
+ for section in config.sections():
+ match = re.match(r'(Bdev|Nvme|Malloc|VirtioUser\d+|Split|Pmem|AIO|'
+ r'iSCSI|PortalGroup\d+|InitiatorGroup\d+|'
+ r'TargetNode\d+|Nvmf|Subsystem\d+|VhostScsi\d+|'
+ r'VhostBlk\d+|VhostNvme\d+)', section)
+ if match:
+ match_section = ''.join(letter for letter in match.group(0)
+ if not letter.isdigit())
+ if match_section == "Bdev":
+ items = get_bdev_options_json(config, section)
+ elif match_section == "AIO":
+ items = get_aio_bdev_json(config, section)
+ elif match_section == "Malloc":
+ items = get_malloc_bdev_json(config, section)
+ elif match_section == "Nvme":
+ items = get_nvme_bdev_json(config, section)
+ elif match_section == "Pmem":
+ items = get_pmem_bdev_json(config, section)
+ elif match_section == "Split":
+ items = get_split_bdev_json(config, section)
+ elif match_section == "Nvmf":
+ items = get_nvmf_options_json(config, section)
+ elif match_section == "Subsystem":
+ items = get_nvmf_subsystem_json(config, section)
+ elif match_section == "VhostScsi":
+ items = get_vhost_scsi_json(config, section)
+ elif match_section == "VhostBlk":
+ items = get_vhost_blk_json(config, section)
+ elif match_section == "VhostNvme":
+ items = get_vhost_nvme_json(config, section)
+ elif match_section == "VirtioUser":
+ items = get_virtio_user_json(config, section)
+ elif match_section == "iSCSI":
+ items = get_iscsi_options_json(config, section)
+ elif match_section == "PortalGroup":
+ items = get_iscsi_portal_group_json(config, section)
+ elif match_section == "InitiatorGroup":
+ items = get_iscsi_initiator_group_json(config, section)
+ elif match_section == "TargetNode":
+ items = get_iscsi_target_node_json(config, section)
+ for item in items:
+ if match_section == "VhostScsi":
+ section_to_subsystem[match_section]["vhost_create_scsi_controller"].append(item)
+ elif match_section == "VhostNvme":
+ section_to_subsystem[match_section]["vhost_create_nvme_controller"].append(item)
+ elif match_section == "Subsystem":
+ section_to_subsystem[match_section]["subsystems"].append(item)
+ else:
+ section_to_subsystem[match_section][
+ item['method']].append(item)
+ elif section == "Global":
+ pass
+ elif section == "Ioat":
+ # Ioat doesn't support JSON config yet.
+ pass
+ elif section == "VirtioPci":
+ print("Please use spdk target flags.")
+ exit(1)
+ else:
+ print("An invalid section detected: %s.\n"
+ "Please revise your config file." % section)
+ exit(1)
+ json.dump(generate_new_json_config(), sys.stdout, indent=2)
+ print("")
diff --git a/src/spdk/scripts/detect_cc.sh b/src/spdk/scripts/detect_cc.sh
new file mode 100755
index 000000000..a05ff9adf
--- /dev/null
+++ b/src/spdk/scripts/detect_cc.sh
@@ -0,0 +1,186 @@
+#!/usr/bin/env bash
+
+set -e
+
+function err() {
+ echo "$@" >&2
+}
+
+function usage() {
+ err "Detect compiler and linker versions, generate mk/cc.mk"
+ err ""
+ err "Usage: ./detect_cc.sh [OPTION]..."
+ err ""
+ err "Defaults for the options are specified in brackets."
+ err ""
+ err "General:"
+ err " -h, --help Display this help and exit"
+ err " --cc=path C compiler to use"
+ err " --cxx=path C++ compiler to use"
+ err " --ld=path Linker to use"
+ err " --lto=[y|n] Attempt to configure for LTO"
+ err " --cross-prefix=prefix Use the given prefix for the cross compiler toolchain"
+}
+
+for i in "$@"; do
+ case "$i" in
+ -h | --help)
+ usage
+ exit 0
+ ;;
+ --cc=*)
+ if [[ -n "${i#*=}" ]]; then
+ CC="${i#*=}"
+ fi
+ ;;
+ --cxx=*)
+ if [[ -n "${i#*=}" ]]; then
+ CXX="${i#*=}"
+ fi
+ ;;
+ --lto=*)
+ if [[ -n "${i#*=}" ]]; then
+ LTO="${i#*=}"
+ fi
+ ;;
+ --ld=*)
+ if [[ -n "${i#*=}" ]]; then
+ LD="${i#*=}"
+ fi
+ ;;
+ --cross-prefix=*)
+ if [[ -n "${i#*=}" ]]; then
+ CROSS_PREFIX="${i#*=}"
+ fi
+ ;;
+ --)
+ break
+ ;;
+ *)
+ err "Unrecognized option $i"
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+OS=$(uname)
+
+: ${CC=cc}
+: ${CXX=c++}
+: ${LD=}
+: ${LTO=n}
+: ${CROSS_PREFIX=}
+
+if [ -z "$LD" ]; then
+ if [ "$OS" = "Linux" ]; then
+ LD=ld
+ fi
+ if [ "$OS" = "FreeBSD" ]; then
+ LD=ld.lld
+ fi
+fi
+
+CC_TYPE=$($CC -v 2>&1 | grep -o -E '\w+ version' | head -1 | awk '{ print $1 }')
+CXX_TYPE=$($CXX -v 2>&1 | grep -o -E '\w+ version' | head -1 | awk '{ print $1 }')
+if [ "$CC_TYPE" != "$CXX_TYPE" ]; then
+ err "C compiler is $CC_TYPE but C++ compiler is $CXX_TYPE"
+ err "This may result in errors"
+fi
+
+LD_TYPE=$($LD --version 2>&1 | head -n1 | awk '{print $1, $2}')
+case "$LD_TYPE" in
+ "GNU ld"*)
+ LD_TYPE=bfd
+ ;;
+ "GNU gold"*)
+ LD_TYPE=gold
+ ;;
+ "LLD"*)
+ LD_TYPE=lld
+ ;;
+ *)
+ err "Unsupported linker: $LD"
+ exit 1
+ ;;
+esac
+
+CCAR="ar"
+if [ "$LTO" = "y" ]; then
+ if [ "$CC_TYPE" = "clang" ]; then
+ if [ "$LD_TYPE" != "gold" ]; then
+ err "Using LTO with clang requires the gold linker."
+ exit 1
+ fi
+ CCAR="llvm-ar"
+ else
+ CCAR="gcc-ar"
+ fi
+fi
+
+if [ -n "$CROSS_PREFIX" ]; then
+ expected_prefix=$($CC -dumpmachine)
+
+ if [ ! "$expected_prefix" = "$CROSS_PREFIX" ]; then
+ err "Cross prefix specified ($CROSS_PREFIX) does not match prefix of $CC ($expected_prefix)."
+
+ # Try to fix this automatically. Maybe the user set CROSS_PREFIX but not CC.
+ CC=$CROSS_PREFIX-$CC
+ if hash $CC 2> /dev/null; then
+ expected_prefix=$($CC -dumpmachine)
+
+ if [ "$expected_prefix" = "$CROSS_PREFIX" ]; then
+ err "Automatically changed CC to $CC"
+ else
+ err "Set CC to the appropriate compiler."
+ exit 1
+ fi
+ else
+ err "Set CC to the appropriate compiler."
+ exit 1
+ fi
+
+ fi
+
+ expected_prefix=$($CXX -dumpmachine)
+
+ if [ ! "$expected_prefix" = "$CROSS_PREFIX" ]; then
+ err "Cross prefix specified ($CROSS_PREFIX) does not match prefix of $CXX ($expected_prefix)."
+
+ # Try to fix this automatically. Maybe the user set CROSS_PREFIX but not CXX.
+ CXX=$CROSS_PREFIX-$CXX
+ if hash $CXX 2> /dev/null; then
+ expected_prefix=$($CXX -dumpmachine)
+
+ if [ "$expected_prefix" = "$CROSS_PREFIX" ]; then
+ err "Automatically changed CXX to $CXX"
+ else
+ err "Set CXX to the appropriate compiler."
+ exit 1
+ fi
+ else
+ err "Set CXX to the appropriate compiler."
+ exit 1
+ fi
+ fi
+fi
+
+function set_default() {
+ echo "DEFAULT_$1=$2"
+ echo "ifeq (\$(origin $1),default)"
+ echo "$1=$2"
+ echo "endif"
+ echo ""
+}
+
+set_default CC "$CC"
+set_default CXX "$CXX"
+set_default LD "$LD"
+
+echo "CCAR=$CCAR"
+echo "CC_TYPE=$CC_TYPE"
+echo "LD_TYPE=$LD_TYPE"
+
+if [ -n "$CROSS_PREFIX" ]; then
+ echo "CROSS_PREFIX=$CROSS_PREFIX"
+fi
diff --git a/src/spdk/scripts/dpdk_mem_info.py b/src/spdk/scripts/dpdk_mem_info.py
new file mode 100755
index 000000000..2039a5932
--- /dev/null
+++ b/src/spdk/scripts/dpdk_mem_info.py
@@ -0,0 +1,406 @@
+#!/usr/bin/env python3
+
+import argparse
+import os
+from enum import Enum
+
+
+class memory:
+ def __init__(self, size):
+ self.size = size
+ self.heaps = []
+ self.mempools = []
+ self.memzones = []
+
+ def get_size(self):
+ return self.size
+
+ def add_mempool(self, pool):
+ self.mempools.append(pool)
+
+ def add_memzone(self, zone):
+ self.memzones.append(zone)
+
+ def add_heap(self, heap):
+ self.heaps.append(heap)
+
+ def get_total_heap_size(self):
+ size = 0
+ for heap in self.heaps:
+ size = size + heap.size
+ return size
+
+ def get_total_mempool_size(self):
+ size = 0
+ for pool in self.mempools:
+ size = size + pool.get_memzone_size_sum()
+ return size
+
+ def get_total_memzone_size(self):
+ size = 0
+ for zone in self.memzones:
+ size = size + zone.size
+ return size
+
+ def print_summary(self):
+ print("DPDK memory size {} in {} heap(s)"
+ .format(B_to_MiB(self.size), len(self.heaps)))
+ print("{} heaps totaling size {}".format(len(self.heaps), B_to_MiB(self.get_total_heap_size())))
+ for x in sorted(self.heaps, key=lambda x: x.size, reverse=True):
+ x.print_summary(' ')
+ print("end heaps----------")
+ print("{} mempools totaling size {}".format(len(self.mempools), B_to_MiB(self.get_total_mempool_size())))
+ for x in sorted(self.mempools, key=lambda x: x.get_memzone_size_sum(), reverse=True):
+ x.print_summary(' ')
+ print("end mempools-------")
+ print("{} memzones totaling size {}".format(len(self.memzones), B_to_MiB(self.get_total_memzone_size())))
+ for x in sorted(self.memzones, key=lambda x: x.size, reverse=True):
+ x.print_summary(' ')
+ print("end memzones-------")
+
+ def print_heap_summary(self, heap_id):
+ for heap in self.heaps:
+ if heap_id == heap.id:
+ heap.print_detailed_stats()
+ break
+ else:
+ print("heap id {} is invalid. please see the summary for valid heaps.\n".format(heap_id))
+
+ def print_mempool_summary(self, name):
+ for pool in self.mempools:
+ if name == pool.name:
+ pool.print_detailed_stats()
+ break
+ else:
+ print("mempool name {} is invalid. please see the summary for valid mempools.\n".format(name))
+
+ def print_memzone_summary(self, name):
+ for zone in self.memzones:
+ if name == zone.name:
+ zone.print_detailed_stats("")
+ break
+ else:
+ print("memzone name {} is invalid. please see the summary for valid memzone.\n".format(name))
+
+ def associate_heap_elements_and_memzones(self):
+ for zone in self.memzones:
+ for heap_obj in self.heaps:
+ for element in heap_obj.busy_malloc_elements:
+ if element.check_memzone_compatibility(zone):
+ heap_obj.busy_memzone_elements.append(element)
+ heap_obj.busy_malloc_elements.remove(element)
+
+ def associate_memzones_and_mempools(self):
+ for pool in self.mempools:
+ for zone in self.memzones:
+ if pool.name in zone.name:
+ pool.add_memzone(zone)
+
+ for pool in self.mempools:
+ for zone in pool.memzones:
+ if zone in self.memzones:
+ self.memzones.remove(zone)
+
+
+class heap_elem_status(Enum):
+ FREE = 0
+ BUSY = 1
+
+
+class heap_element:
+ def __init__(self, size, status, addr):
+ self.status = status
+ self.size = size
+ self.addr = addr
+ self.memzone = None
+
+ def print_summary(self, header):
+ print("{}element at address: {} with size: {:>15}".format(header, hex(self.addr), B_to_MiB(self.size)))
+
+ def check_memzone_compatibility(self, memzone):
+ ele_fini_addr = self.addr + self.size
+ memzone_fini_addr = memzone.address + memzone.size
+ if (self.addr <= memzone.address and ele_fini_addr >= memzone_fini_addr):
+ self.memzone = memzone
+ return True
+ return False
+
+
+class heap:
+ def __init__(self, id, size, num_allocations):
+ self.id = id
+ self.size = size
+ self.num_allocations = num_allocations
+ self.free_elements = []
+ self.busy_malloc_elements = []
+ self.busy_memzone_elements = []
+
+ def add_element(self, element):
+ if element.status == heap_elem_status.FREE:
+ self.free_elements.append(element)
+ else:
+ self.busy_malloc_elements.append(element)
+
+ def print_element_stats(self, list_to_print, list_type, header):
+ print("{}list of {} elements. size: {}".format(header, list_type, B_to_MiB(self.get_element_size(list_to_print))))
+ for x in sorted(list_to_print, key=lambda x: x.size, reverse=True):
+ x.print_summary("{} ".format(header))
+ if x.memzone is not None:
+ x.memzone.print_summary(" {}associated memzone info: ".format(header))
+
+ def get_element_size(self, list_to_check):
+ size = 0
+ for element in list_to_check:
+ size = size + element.size
+ return size
+
+ def print_summary(self, header):
+ print("{}size: {:>15} heap id: {}".format(header, B_to_MiB(self.size), self.id))
+
+ def print_detailed_stats(self):
+ print("heap id: {} total size: {} number of busy elements: {} number of free elements: {}"
+ .format(self.id, B_to_MiB(self.size), len(self.busy_malloc_elements), len(self.free_elements)))
+ self.print_element_stats(self.free_elements, "free", " ")
+ self.print_element_stats(self.busy_malloc_elements, "standard malloc", " ")
+ self.print_element_stats(self.busy_memzone_elements, "memzone associated", " ")
+
+
+class mempool:
+ def __init__(self, name, num_objs, num_populated_objs, obj_size):
+ self.name = name
+ self.num_objs = num_objs
+ self.num_populated_objs = num_populated_objs
+ self.obj_size = obj_size
+ self.memzones = []
+
+ def add_memzone(self, memzone):
+ self.memzones.append(memzone)
+
+ def get_memzone_size_sum(self):
+ size = 0
+ for zone in self.memzones:
+ size = size + zone.size
+ return size
+
+ def print_summary(self, header):
+ print("{}size: {:>15} name: {}"
+ .format(header, B_to_MiB(self.get_memzone_size_sum()), self.name))
+
+ def print_detailed_stats(self):
+ print("size: {:>15} name: {} comprised of {} memzone(s):"
+ .format(B_to_MiB(self.get_memzone_size_sum()), self.name, len(self.memzones)))
+ for x in sorted(self.memzones, key=lambda x: x.size, reverse=True):
+ x.print_detailed_stats(" ")
+
+
+class memzone:
+ def __init__(self, name, size, address):
+ self.name = name
+ self.size = size
+ self.address = address
+ self.segments = []
+
+ def add_segment(self, segment):
+ self.segments.append(segment)
+
+ def print_summary(self, header):
+ print("{}size: {:>15} name: {}".format(header, B_to_MiB(self.size), self.name))
+
+ def print_detailed_stats(self, header):
+ self.print_summary(header)
+ print("{}located at address {}".format(header, hex(self.address)))
+ print("{}spanning {} segment(s):".format(header, len(self.segments)))
+ for x in sorted(self.segments, key=lambda x: x.size, reverse=True):
+ x.print_summary(' ')
+
+
+class segment:
+ def __init__(self, size, address):
+ self.size = size
+ self.address = address
+
+ def print_summary(self, header):
+ print("{}address: {} length: {:>15}".format(header, hex(self.address), B_to_MiB(self.size)))
+
+
+class parse_state(Enum):
+ PARSE_MEMORY_SIZE = 0
+ PARSE_MEMZONES = 1
+ PARSE_MEMZONE_SEGMENTS = 2
+ PARSE_MEMPOOLS = 3
+ PARSE_MEMPOOL_INFO = 4
+ PARSE_HEAPS = 5
+ PARSE_HEAP_ELEMENTS = 6
+
+
+def B_to_MiB(raw_value):
+ raw_value = raw_value / (1024.0 * 1024.0)
+
+ return "%6f %s" % (raw_value, "MiB")
+
+
+def parse_zone(line):
+ zone, info = line.split(':', 1)
+ name, length, addr, trash = info.split(',', 3)
+
+ trash, name = name.split(':', 1)
+ name = name.replace("<", "")
+ name = name.replace(">", "")
+ trash, length = length.split(':', 1)
+ trash, addr = addr.split(':', 1)
+
+ return memzone(name, int(length, 0), int(addr, 0))
+
+
+def parse_segment(line):
+ trash, addr, iova, length, pagesz = line.split(':')
+ addr, trash = addr.strip().split(' ')
+ length, trash = length.strip().split(' ')
+
+ return segment(int(length, 0), int(addr, 0))
+
+
+def parse_mempool_name(line):
+ trash, info = line.split()
+ name, addr = line.split('@')
+ name = name.replace("<", "")
+ name = name.replace(">", "")
+ trash, name = name.split()
+
+ return name
+
+
+def parse_mem_stats(stat_path):
+ state = parse_state.PARSE_MEMORY_SIZE
+ with open(stat_path, "r") as stats:
+
+ line = stats.readline()
+ while line != '':
+ if state == parse_state.PARSE_MEMORY_SIZE:
+ if "DPDK memory size" in line:
+ mem_size = int(line.replace("DPDK memory size ", ""))
+ memory_struct = memory(mem_size)
+ state = parse_state.PARSE_MEMZONES
+ line = stats.readline()
+
+ if state == parse_state.PARSE_MEMZONES:
+ if line.find("Zone") == 0:
+ zone = parse_zone(line)
+ state = parse_state.PARSE_MEMZONE_SEGMENTS
+ line = stats.readline()
+
+ if state == parse_state.PARSE_MEMZONE_SEGMENTS:
+ if line.find("Zone") == 0:
+ memory_struct.add_memzone(zone)
+ state = parse_state.PARSE_MEMZONES
+ continue
+ elif line.lstrip().find("addr:") == 0:
+ segment = parse_segment(line)
+ zone.add_segment(segment)
+ elif "DPDK mempools." in line:
+ state = parse_state.PARSE_MEMPOOLS
+ continue
+ line = stats.readline()
+
+ if state == parse_state.PARSE_MEMPOOLS:
+ mempool_info = {}
+ if line.find("mempool") == 0:
+ mempool_info['name'] = parse_mempool_name(line)
+ state = parse_state.PARSE_MEMPOOL_INFO
+ line = stats.readline()
+
+ if state == parse_state.PARSE_MEMPOOL_INFO:
+ if line.find("mempool") == 0:
+ try:
+ new_mempool = mempool(mempool_info['name'], int(mempool_info['size'], 0),
+ int(mempool_info['populated_size'], 0), int(mempool_info['total_obj_size'], 0))
+ memory_struct.add_mempool(new_mempool)
+ except KeyError:
+ print("proper key values not provided for mempool.")
+ state = parse_state.PARSE_MEMPOOLS
+ continue
+ elif "cache" in line:
+ pass
+ elif "DPDK malloc stats." in line:
+ try:
+ new_mempool = mempool(mempool_info['name'], int(mempool_info['size'], 0),
+ int(mempool_info['populated_size'], 0), int(mempool_info['total_obj_size'], 0))
+ memory_struct.add_mempool(new_mempool)
+ except KeyError:
+ print("proper key values not provided for mempool.")
+ while "DPDK malloc heaps." not in line:
+ line = stats.readline()
+ state = parse_state.PARSE_HEAPS
+ else:
+ try:
+ field, value = line.strip().split('=')
+ mempool_info[field] = value
+ except Exception as e:
+ pass
+ line = stats.readline()
+
+ if state == parse_state.PARSE_HEAPS:
+ trash, heap_id = line.strip().split(':')
+ line = stats.readline()
+ trash, heap_size = line.split(':')
+ line = stats.readline()
+ trash, num_allocations = line.split(':')
+ if int(heap_size, 0) == 0:
+ pass
+ else:
+ new_heap = heap(heap_id.lstrip(), int(heap_size, 0), int(num_allocations, 0))
+ memory_struct.add_heap(new_heap)
+ state = parse_state.PARSE_HEAP_ELEMENTS
+
+ line = stats.readline()
+
+ if state == parse_state.PARSE_HEAP_ELEMENTS:
+ if line.find("Heap id") == 0:
+ state = parse_state.PARSE_HEAPS
+ continue
+ elif line.find("Malloc element at") == 0:
+ trash, address, status = line.rsplit(maxsplit=2)
+ line = stats.readline()
+ trash, length, trash = line.split(maxsplit=2)
+ line = stats.readline()
+ if "FREE" in status:
+ element = heap_element(int(length, 0), heap_elem_status.FREE, int(address, 0))
+ else:
+ element = heap_element(int(length, 0), heap_elem_status.BUSY, int(address, 0))
+ new_heap.add_element(element)
+ line = stats.readline()
+
+ memory_struct.associate_heap_elements_and_memzones()
+ memory_struct.associate_memzones_and_mempools()
+ return memory_struct
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='Dumps memory stats for DPDK. If no arguments are provided, it dumps a general summary.')
+ parser.add_argument('-f', dest="stats_file", help='path to a dpdk memory stats file.', default='/tmp/spdk_mem_dump.txt')
+ parser.add_argument('-m', '--heap', dest="heap", help='Print detailed information about the given heap.', default=None)
+ parser.add_argument('-p', '--mempool', dest="mempool", help='Print detailed information about the given mempool.', default=None)
+ parser.add_argument('-z', '--memzone', dest="memzone", help='Print detailed information about the given memzone.', default=None)
+
+ args = parser.parse_args()
+
+ if not os.path.exists(args.stats_file):
+ print("Error, specified stats file does not exist. Please make sure you have run the"
+ "env_dpdk_get_mem_stats rpc on the spdk app you want to analyze.")
+ exit(1)
+
+ mem_info = parse_mem_stats(args.stats_file)
+
+ summary = True
+ if args.heap is not None:
+ mem_info.print_heap_summary(args.heap)
+ summary = False
+ if args.mempool is not None:
+ mem_info.print_mempool_summary(args.mempool)
+ summary = False
+ if args.memzone is not None:
+ mem_info.print_memzone_summary(args.memzone)
+ summary = False
+
+ if summary:
+ mem_info.print_summary()
diff --git a/src/spdk/scripts/eofnl b/src/spdk/scripts/eofnl
new file mode 100755
index 000000000..59544561d
--- /dev/null
+++ b/src/spdk/scripts/eofnl
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+# Make sure file has a trailing newline
+
+f="$1"
+
+if [ -z "$f" ]; then
+ echo "usage: $0 <file>"
+ exit 1
+fi
+
+if [ ! -f "$f" ]; then
+ exit 0
+fi
+
+if [[ $(tail -c1 "$f") ]]; then
+ echo "$f: No newline at end of file"
+ echo '' >> "$f"
+ exit 1
+fi
+
+if [[ ! $(tail -c2 "$f") ]]; then
+ echo "$f: Extra trailing newline"
+ exit 1
+fi
+
+if grep -q $'\r' "$f"; then
+ echo "$f: DOS-style newlines"
+ dos2unix "$f" &> /dev/null
+ exit 1
+fi
+
+if grep -q $'[\t ]$' "$f"; then
+ echo "$f: Trailing whitespace"
+ sed -i $'s/[ \t]*$//' "$f"
+ exit 1
+fi
+
+exit 0
diff --git a/src/spdk/scripts/fio.py b/src/spdk/scripts/fio.py
new file mode 100755
index 000000000..56816436a
--- /dev/null
+++ b/src/spdk/scripts/fio.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python3
+
+from subprocess import check_call, call, check_output, Popen, PIPE, CalledProcessError
+import re
+import sys
+import signal
+import os.path
+import time
+import argparse
+
+fio_template = """
+[global]
+thread=1
+invalidate=1
+rw=%(testtype)s
+time_based=1
+runtime=%(runtime)s
+ioengine=libaio
+direct=1
+bs=%(blocksize)d
+iodepth=%(iodepth)d
+norandommap=%(norandommap)d
+numjobs=%(numjobs)s
+%(verify)s
+verify_dump=1
+
+"""
+
+verify_template = """
+do_verify=1
+verify=crc32c-intel
+"""
+
+
+fio_job_template = """
+[job%(jobnumber)d]
+filename=%(device)s
+
+"""
+
+
+def interrupt_handler(signum, frame):
+ fio.terminate()
+ print("FIO terminated")
+ sys.exit(0)
+
+
+def main(io_size, protocol, queue_depth, test_type, runtime, num_jobs, verify):
+ global fio
+
+ if protocol == "nvmf":
+ devices = get_nvmf_target_devices()
+ elif protocol == "iscsi":
+ devices = get_iscsi_target_devices()
+
+ configure_devices(devices)
+ try:
+ fio_executable = check_output("which fio", shell=True).split()[0]
+ except CalledProcessError as e:
+ sys.stderr.write(str(e))
+ sys.stderr.write("\nCan't find the fio binary, please install it.\n")
+ sys.exit(1)
+
+ device_paths = ['/dev/' + dev for dev in devices]
+ print("Device paths:")
+ print(device_paths)
+ sys.stdout.flush()
+ signal.signal(signal.SIGTERM, interrupt_handler)
+ signal.signal(signal.SIGINT, interrupt_handler)
+ fio = Popen([fio_executable, '-'], stdin=PIPE)
+ fio.communicate(create_fio_config(io_size, queue_depth, device_paths, test_type, runtime, num_jobs, verify).encode())
+ fio.stdin.close()
+ rc = fio.wait()
+ print("FIO completed with code %d\n" % rc)
+ sys.stdout.flush()
+ sys.exit(rc)
+
+
+def get_iscsi_target_devices():
+ output = check_output('iscsiadm -m session -P 3', shell=True)
+ return re.findall("Attached scsi disk (sd[a-z]+)", output.decode("ascii"))
+
+
+def get_nvmf_target_devices():
+ output = str(check_output('lsblk -l -o NAME', shell=True).decode())
+ return re.findall("(nvme[0-9]+n[0-9]+)\n", output)
+
+
+def create_fio_config(size, q_depth, devices, test, run_time, num_jobs, verify):
+ norandommap = 0
+ if not verify:
+ verifyfio = ""
+ norandommap = 1
+ else:
+ verifyfio = verify_template
+ fiofile = fio_template % {"blocksize": size, "iodepth": q_depth,
+ "testtype": test, "runtime": run_time,
+ "norandommap": norandommap, "verify": verifyfio,
+ "numjobs": num_jobs}
+ for (i, dev) in enumerate(devices):
+ fiofile += fio_job_template % {"jobnumber": i, "device": dev}
+ return fiofile
+
+
+def set_device_parameter(devices, filename_template, value):
+ valid_value = True
+
+ for dev in devices:
+ filename = filename_template % dev
+ f = open(filename, 'r+b')
+ try:
+ f.write(value.encode())
+ f.close()
+ except OSError:
+ valid_value = False
+ continue
+
+ return valid_value
+
+
+def configure_devices(devices):
+
+ for dev in devices:
+ retry = 30
+ while retry > 0:
+ if os.path.exists("/sys/block/%s/queue/nomerges" % dev):
+ break
+ else:
+ retry = retry - 1
+ time.sleep(0.1)
+
+ set_device_parameter(devices, "/sys/block/%s/queue/nomerges", "2")
+ set_device_parameter(devices, "/sys/block/%s/queue/nr_requests", "128")
+ requested_qd = 128
+ qd = requested_qd
+ while qd > 0:
+ try:
+ set_device_parameter(devices, "/sys/block/%s/device/queue_depth", str(qd))
+ break
+ except IOError:
+ qd = qd - 1
+ if qd == 0:
+ print("Could not set block device queue depths.")
+ elif qd < requested_qd:
+ print("Requested queue_depth {} but only {} is supported.".format(str(requested_qd), str(qd)))
+ if not set_device_parameter(devices, "/sys/block/%s/queue/scheduler", "noop"):
+ set_device_parameter(devices, "/sys/block/%s/queue/scheduler", "none")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="fio.py")
+ parser.add_argument("-i", "--io-size", type=int, help="The desired I/O size in bytes.", required=True)
+ parser.add_argument("-p", "--protocol", type=str, help="The protocol we are testing against. One of iscsi or nvmf.", required=True)
+ parser.add_argument("-d", "--queue-depth", type=int, help="The desired queue depth for each job.", required=True)
+ parser.add_argument("-t", "--test-type", type=str, help="The fio I/O pattern to run. e.g. read, randwrite, randrw.", required=True)
+ parser.add_argument("-r", "--runtime", type=int, help="Time in seconds to run the workload.", required=True)
+ parser.add_argument("-n", "--num-jobs", type=int, help="The number of fio jobs to run in your workload. default 1.", default=1)
+ parser.add_argument("-v", "--verify", action="store_true", help="Supply this argument to verify the I/O.", default=False)
+ args = parser.parse_args()
+
+ if args.protocol.lower() != "nvmf" and args.protocol.lower() != "iscsi":
+ parser.error("Protocol must be one of the following: nvmf, iscsi.")
+
+ main(args.io_size, args.protocol, args.queue_depth, args.test_type, args.runtime, args.num_jobs, args.verify)
diff --git a/src/spdk/scripts/gdb_macros.py b/src/spdk/scripts/gdb_macros.py
new file mode 100644
index 000000000..74234de7b
--- /dev/null
+++ b/src/spdk/scripts/gdb_macros.py
@@ -0,0 +1,289 @@
+import gdb
+
+
+class SpdkTailqList(object):
+
+ def __init__(self, list_pointer, list_member, tailq_name_list):
+ self.list_pointer = list_pointer
+ self.tailq_name_list = tailq_name_list
+ self.list_member = list_member
+ self.list = gdb.parse_and_eval(self.list_pointer)
+
+ def __iter__(self):
+ curr = self.list['tqh_first']
+ while curr:
+ yield self.list_member(curr)
+ for tailq_name in self.tailq_name_list:
+ curr = curr[tailq_name]
+ curr = curr['tqe_next']
+
+
+class SpdkNormalTailqList(SpdkTailqList):
+
+ def __init__(self, list_pointer, list_member):
+ super(SpdkNormalTailqList, self).__init__(list_pointer, list_member,
+ ['tailq'])
+
+
+class SpdkArr(object):
+
+ def __init__(self, arr_pointer, num_elements, element_type):
+ self.arr_pointer = arr_pointer
+ self.num_elements = num_elements
+ self.element_type = element_type
+
+ def __iter__(self):
+ for i in range(0, self.num_elements):
+ curr = (self.arr_pointer + i).dereference()
+ if (curr == 0x0):
+ continue
+ yield self.element_type(curr)
+
+
+class SpdkPrintCommand(gdb.Command):
+
+ def __init__(self, name, element_list):
+ self.element_list = element_list
+ gdb.Command.__init__(self, name,
+ gdb.COMMAND_DATA,
+ gdb.COMPLETE_SYMBOL,
+ True)
+
+ def print_element_list(self, element_list):
+ first = True
+ for element in element_list:
+ if first:
+ first = False
+ else:
+ print("---------------")
+ print("\n" + str(element) + "\n")
+
+ def invoke(self, arg, from_tty):
+ self.print_element_list(self.element_list)
+
+
+class SpdkObject(object):
+
+ def __init__(self, gdb_obj):
+ self.obj = gdb_obj
+
+ def get_name(self):
+ return self.obj['name']
+
+ def __str__(self):
+ s = "SPDK object of type %s at %s" % (self.type_name, str(self.obj))
+ s += '\n((%s*) %s)' % (self.type_name, str(self.obj))
+ s += '\nname %s' % self.get_name()
+ return s
+
+
+class IoDevice(SpdkObject):
+
+ type_name = 'struct io_device'
+
+
+class IoDevices(SpdkTailqList):
+
+ def __init__(self):
+ super(IoDevices, self).__init__('g_io_devices', IoDevice, ['tailq'])
+
+
+class spdk_print_io_devices(SpdkPrintCommand):
+
+ def __init__(self):
+ io_devices = IoDevices()
+ name = 'spdk_print_io_devices'
+ super(spdk_print_io_devices, self).__init__(name, io_devices)
+
+
+class Bdev(SpdkObject):
+
+ type_name = 'struct spdk_bdev'
+
+
+class BdevMgrBdevs(SpdkTailqList):
+
+ def __init__(self):
+ tailq_name_list = ['internal', 'link']
+ super(BdevMgrBdevs, self).__init__('g_bdev_mgr->bdevs', Bdev, tailq_name_list)
+
+
+class spdk_print_bdevs(SpdkPrintCommand):
+ name = 'spdk_print_bdevs'
+
+ def __init__(self):
+ bdevs = BdevMgrBdevs()
+ super(spdk_print_bdevs, self).__init__(self.name, bdevs)
+
+
+class spdk_find_bdev(spdk_print_bdevs):
+
+ name = 'spdk_find_bdev'
+
+ def invoke(self, arg, from_tty):
+ print(arg)
+ bdev_query = [bdev for bdev in self.element_list
+ if str(bdev.get_name()).find(arg) != -1]
+ if bdev_query == []:
+ print("Cannot find bdev with name %s" % arg)
+ return
+
+ self.print_element_list(bdev_query)
+
+
+class NvmfSubsystem(SpdkObject):
+
+ type_name = 'struct spdk_nvmf_subsystem'
+
+ def __init__(self, ptr):
+ self.ptr = ptr
+ gdb_obj = self.ptr.cast(gdb.lookup_type(self.type_name).pointer())
+ super(NvmfSubsystem, self).__init__(gdb_obj)
+
+ def get_name(self):
+ return self.obj['subnqn']
+
+ def get_id(self):
+ return int(self.obj['id'])
+
+ def get_ns_list(self):
+ max_nsid = int(self.obj['max_nsid'])
+ ns_list = []
+ for i in range(0, max_nsid):
+ nsptr = (self.obj['ns'] + i).dereference()
+ if nsptr == 0x0:
+ continue
+ ns = nsptr.cast(gdb.lookup_type('struct spdk_nvmf_ns').pointer())
+ ns_list.append(ns)
+ return ns_list
+
+ def __str__(self):
+ s = super(NvmfSubsystem, self).__str__()
+ s += '\nnqn %s' % self.get_name()
+ s += '\nID %d' % self.get_id()
+ for ns in self.get_ns_list():
+ s + '\t%s' % str(ns)
+ return s
+
+
+class SpdkNvmfTgtSubsystems(SpdkArr):
+
+ def get_num_subsystems(self):
+ try: # version >= 18.11
+ return int(self.spdk_nvmf_tgt['max_subsystems'])
+ except RuntimeError: # version < 18.11
+ return int(self.spdk_nvmf_tgt['opts']['max_subsystems'])
+
+ def __init__(self):
+ self.spdk_nvmf_tgt = gdb.parse_and_eval("g_spdk_nvmf_tgt")
+ subsystems = gdb.parse_and_eval("g_spdk_nvmf_tgt->subsystems")
+ super(SpdkNvmfTgtSubsystems, self).__init__(subsystems,
+ self.get_num_subsystems(),
+ NvmfSubsystem)
+
+
+class spdk_print_nvmf_subsystems(SpdkPrintCommand):
+
+ def __init__(self):
+ name = 'spdk_print_nvmf_subsystems'
+ nvmf_tgt_subsystems = SpdkNvmfTgtSubsystems()
+ super(spdk_print_nvmf_subsystems, self).__init__(name, nvmf_tgt_subsystems)
+
+
+class IoChannel(SpdkObject):
+
+ type_name = 'struct spdk_io_channel'
+
+ def get_ref(self):
+
+ return int(self.obj['ref'])
+
+ def get_device(self):
+ return self.obj['dev']
+
+ def get_device_name(self):
+ return self.obj['dev']['name']
+
+ def get_name(self):
+ return ""
+
+ def __str__(self):
+ s = super(IoChannel, self).__str__() + '\n'
+ s += 'ref %d\n' % self.get_ref()
+ s += 'device %s (%s)\n' % (self.get_device(), self.get_device_name())
+ return s
+
+
+# TODO - create TailqList type that gets a gdb object instead of a pointer
+class IoChannels(SpdkTailqList):
+
+ def __init__(self, list_obj):
+ self.tailq_name_list = ['tailq']
+ self.list_member = IoChannel
+ self.list = list_obj
+
+
+class SpdkThread(SpdkObject):
+
+ type_name = 'struct spdk_thread'
+
+ def __init__(self, gdb_obj):
+ super(SpdkThread, self).__init__(gdb_obj)
+ self.io_channels = IoChannels(self.obj['io_channels'])
+
+ def __str__(self):
+ s = super(SpdkThread, self).__str__() + '\n'
+ s += "IO Channels:\n"
+ for io_channel in self.get_io_channels():
+ channel_lines = str(io_channel).split('\n')
+ s += '\n'.join('\t%s' % line for line in channel_lines if line is not '')
+ s += '\n'
+ s += '\t---------------\n'
+ s += '\n'
+ return s
+
+ def get_io_channels(self):
+ return self.io_channels
+
+
+class SpdkThreads(SpdkNormalTailqList):
+
+ def __init__(self):
+ super(SpdkThreads, self).__init__('g_threads', SpdkThread)
+
+
+class spdk_print_threads(SpdkPrintCommand):
+
+ def __init__(self):
+ name = "spdk_print_threads"
+ threads = SpdkThreads()
+ super(spdk_print_threads, self).__init__(name, threads)
+
+
+class spdk_load_macros(gdb.Command):
+
+ def __init__(self):
+ gdb.Command.__init__(self, 'spdk_load_macros',
+ gdb.COMMAND_DATA,
+ gdb.COMPLETE_SYMBOL,
+ True)
+ self.loaded = False
+
+ def invoke(self, arg, from_tty):
+ if arg == '--reload':
+ print('Reloading spdk information')
+ reload = True
+ else:
+ reload = False
+
+ if self.loaded and not reload:
+ return
+
+ spdk_print_threads()
+ spdk_print_bdevs()
+ spdk_print_io_devices()
+ spdk_print_nvmf_subsystems()
+ spdk_find_bdev()
+
+
+spdk_load_macros()
diff --git a/src/spdk/scripts/gen_ftl.sh b/src/spdk/scripts/gen_ftl.sh
new file mode 100755
index 000000000..acde64962
--- /dev/null
+++ b/src/spdk/scripts/gen_ftl.sh
@@ -0,0 +1,62 @@
+#!/usr/bin/env bash
+
+set -e
+
+rootdir=$(readlink -f $(dirname $0))/..
+
+function usage() {
+ echo "Usage: [-j] $0 -n BDEV_NAME -d BASE_BDEV [-u UUID] [-c CACHE]"
+ echo "UUID is required when restoring device state"
+ echo
+ echo "BDEV_NAME - name of the bdev"
+ echo "BASE_BDEV - name of the bdev to be used as underlying device"
+ echo "UUID - bdev's uuid (used when in restore mode)"
+ echo "CACHE - name of the bdev to be used as write buffer cache"
+}
+
+function create_json_config() {
+ echo "{"
+ echo '"subsystem": "bdev",'
+ echo '"config": ['
+ echo '{'
+ echo '"method": "bdev_ftl_create",'
+ echo '"params": {'
+ echo "\"name\": \"$1\","
+ echo "\"base_bdev\": \"$2\","
+ if [ -n "$4" ]; then
+ echo "\"uuid\": \"$3\","
+ echo "\"cache\": \"$4\""
+ else
+ echo "\"uuid\": \"$3\""
+ fi
+ echo '}'
+ echo '}'
+ echo ']'
+ echo '}'
+}
+
+uuid=00000000-0000-0000-0000-000000000000
+
+while getopts ":c:d:hn:u:" arg; do
+ case "$arg" in
+ n) name=$OPTARG ;;
+ d) base_bdev=$OPTARG ;;
+ u) uuid=$OPTARG ;;
+ c) cache=$OPTARG ;;
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+if [[ -z "$name" || -z "$base_bdev" ]]; then
+ usage
+ exit 1
+fi
+
+create_json_config $name $base_bdev $uuid $cache
diff --git a/src/spdk/scripts/gen_nvme.sh b/src/spdk/scripts/gen_nvme.sh
new file mode 100755
index 000000000..31817b271
--- /dev/null
+++ b/src/spdk/scripts/gen_nvme.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+set -e
+
+rootdir=$(readlink -f $(dirname $0))/..
+source "$rootdir/scripts/common.sh"
+
+function create_classic_config() {
+ echo "[Nvme]"
+ for ((i = 0; i < ${#bdfs[@]}; i++)); do
+ echo " TransportID \"trtype:PCIe traddr:${bdfs[i]}\" Nvme$i"
+ done
+}
+
+function create_json_config() {
+ echo "{"
+ echo '"subsystem": "bdev",'
+ echo '"config": ['
+ for ((i = 0; i < ${#bdfs[@]}; i++)); do
+ echo '{'
+ echo '"params": {'
+ echo '"trtype": "PCIe",'
+ echo "\"name\": \"Nvme$i\","
+ echo "\"traddr\": \"${bdfs[i]}\""
+ echo '},'
+ echo '"method": "bdev_nvme_attach_controller"'
+ if [ -z ${bdfs[i + 1]} ]; then
+ echo '}'
+ else
+ echo '},'
+ fi
+ done
+ echo ']'
+ echo '}'
+}
+
+bdfs=($(nvme_in_userspace))
+
+if [ "$1" = "--json" ]; then
+ create_json_config
+else
+ create_classic_config
+fi
diff --git a/src/spdk/scripts/genconfig.py b/src/spdk/scripts/genconfig.py
new file mode 100755
index 000000000..cd4f17510
--- /dev/null
+++ b/src/spdk/scripts/genconfig.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+
+import os
+import re
+import sys
+
+comment = re.compile(r'^\s*#')
+assign = re.compile(r'^\s*([a-zA-Z0-9_]+)\s*(\?)?=\s*([^#]*)')
+
+args = os.environ.copy()
+for arg in sys.argv:
+ m = assign.match(arg)
+ if m:
+ var = m.group(1).strip()
+ val = m.group(3).strip()
+ args[var] = val
+
+defs = {}
+try:
+ with open("mk/config.mk") as f:
+ for line in f:
+ line = line.strip()
+ if not comment.match(line):
+ m = assign.match(line)
+ if m:
+ var = m.group(1).strip()
+ default = m.group(3).strip()
+ val = default
+ if var in args:
+ val = args[var]
+ if default.lower() == 'y' or default.lower() == 'n':
+ if val.lower() == 'y':
+ defs["SPDK_{0}".format(var)] = 1
+ else:
+ defs["SPDK_{0}".format(var)] = 0
+ else:
+ strval = val.replace('"', '\"')
+ defs["SPDK_{0}".format(var)] = strval
+except IOError:
+ print("mk/config.mk not found")
+
+for key, value in sorted(defs.items()):
+ if value == 0:
+ print("#undef {0}".format(key))
+ else:
+ print("#define {0} {1}".format(key, value))
diff --git a/src/spdk/scripts/histogram.py b/src/spdk/scripts/histogram.py
new file mode 100755
index 000000000..9d0b10fc0
--- /dev/null
+++ b/src/spdk/scripts/histogram.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python3
+
+import sys
+import json
+import base64
+import struct
+
+buf = sys.stdin.readlines()
+json = json.loads(" ".join(buf))
+histogram = base64.b64decode(json["histogram"])
+bucket_shift = json["bucket_shift"]
+tsc_rate = json["tsc_rate"]
+
+print("Latency histogram")
+print("==============================================================================")
+print(" Range in us Cumulative IO count")
+
+so_far = 0
+bucket = 0
+total = 1
+
+for i in range(0, 64 - bucket_shift):
+ for j in range(0, (1 << bucket_shift)):
+ index = (((i << bucket_shift) + j) * 8)
+ total += int.from_bytes(histogram[index:index + 8], 'little')
+
+for i in range(0, 64 - bucket_shift):
+ for j in range(0, (1 << bucket_shift)):
+ index = (((i << bucket_shift) + j)*8)
+ count = int.from_bytes(histogram[index:index + 8], 'little')
+ so_far += count
+ last_bucket = bucket
+
+ if i > 0:
+ bucket = (1 << (i + bucket_shift - 1))
+ bucket += ((j+1) << (i - 1))
+ else:
+ bucket = j+1
+
+ start = last_bucket * 1000 * 1000 / tsc_rate
+ end = bucket * 1000 * 1000 / tsc_rate
+ so_far_pct = so_far * 100.0 / total
+ if count > 0:
+ print("%9.3f - %9.3f: %9.4f%% (%9u)" % (start, end, so_far_pct, count))
diff --git a/src/spdk/scripts/iostat.py b/src/spdk/scripts/iostat.py
new file mode 100755
index 000000000..f683ce865
--- /dev/null
+++ b/src/spdk/scripts/iostat.py
@@ -0,0 +1,356 @@
+#!/usr/bin/env python3
+
+import logging
+import sys
+import argparse
+import time
+import rpc
+
+
+SPDK_CPU_STAT = "/proc/stat"
+SPDK_UPTIME = "/proc/uptime"
+
+SPDK_CPU_STAT_HEAD = ['cpu_stat:', 'user_stat', 'nice_stat',
+ 'system_stat', 'iowait_stat', 'steal_stat', 'idle_stat']
+SPDK_BDEV_KB_STAT_HEAD = ['Device', 'tps', 'KB_read/s',
+ 'KB_wrtn/s', 'KB_dscd/s', 'KB_read', 'KB_wrtn', 'KB_dscd']
+SPDK_BDEV_MB_STAT_HEAD = ['Device', 'tps', 'MB_read/s',
+ 'MB_wrtn/s', 'MB_dscd/s', 'MB_read', 'MB_wrtn', 'MB_dscd']
+
+SPDK_MAX_SECTORS = 0xffffffff
+
+
+class BdevStat:
+
+ def __init__(self, dictionary):
+ if dictionary is None:
+ return
+ for k, value in dictionary.items():
+ if k == 'name':
+ self.bdev_name = value
+ elif k == 'bytes_read':
+ self.rd_sectors = value >> 9
+ elif k == 'bytes_written':
+ self.wr_sectors = value >> 9
+ elif k == 'bytes_unmapped':
+ self.dc_sectors = value >> 9
+ elif k == 'num_read_ops':
+ self.rd_ios = value
+ elif k == 'num_write_ops':
+ self.wr_ios = value
+ elif k == 'num_unmap_ops':
+ self.dc_ios = value
+ elif k == 'read_latency_ticks':
+ self.rd_ticks = value
+ elif k == 'write_latency_ticks':
+ self.wr_ticks = value
+ elif k == 'unmap_latency_ticks':
+ self.dc_ticks = value
+ elif k == 'queue_depth':
+ self.ios_pgr = value
+ elif k == 'io_time':
+ self.tot_ticks = value
+ elif k == 'weighted_io_time':
+ self.rq_ticks = value
+
+ self.rd_merges = 0
+ self.wr_merges = 0
+ self.dc_merges = 0
+ self.upt = 0.0
+
+ def __getattr__(self, name):
+ return 0
+
+
+def uptime():
+ with open(SPDK_UPTIME, 'r') as f:
+ return float(f.readline().split()[0])
+
+
+def _stat_format(data, header, leave_first=False):
+ list_size = len(data)
+ header_len = len(header)
+
+ if list_size == 0:
+ raise AssertionError
+ list_len = len(data[0])
+
+ for ll in data:
+ if len(ll) != list_len:
+ raise AssertionError
+ for i, r in enumerate(ll):
+ ll[i] = str(r)
+
+ if (leave_first and list_len + 1 != header_len) or \
+ (not leave_first and list_len != header_len):
+ raise AssertionError
+
+ item_sizes = [0 for i in range(header_len)]
+
+ for i in range(0, list_len):
+ if leave_first and i == 0:
+ item_sizes[i] = len(header[i + 1])
+
+ data_len = 0
+ for x in data:
+ data_len = max(data_len, len(x[i]))
+ index = i + 1 if leave_first else i
+ item_sizes[index] = max(len(header[index]), data_len)
+
+ _format = ' '.join('%%-%ss' % item_sizes[i] for i in range(0, header_len))
+ print(_format % tuple(header))
+ if leave_first:
+ print('\n'.join(_format % ('', *tuple(ll)) for ll in data))
+ else:
+ print('\n'.join(_format % tuple(ll) for ll in data))
+
+ print()
+ sys.stdout.flush()
+
+
+def read_cpu_stat(last_cpu_info, cpu_info):
+ jiffies = 0
+ for i in range(0, 7):
+ jiffies += cpu_info[i] - \
+ (last_cpu_info[i] if last_cpu_info else 0)
+
+ if last_cpu_info:
+ info_stat = [
+ "{:.2%}".format((cpu_info[0] - last_cpu_info[0]) / jiffies),
+ "{:.2%}".format((cpu_info[1] - last_cpu_info[1]) / jiffies),
+ "{:.2%}".format(((cpu_info[2] + cpu_info[5] + cpu_info[6]) -
+ (last_cpu_info[2] + last_cpu_info[5] + last_cpu_info[6])) / jiffies),
+ "{:.2%}".format((cpu_info[4] - last_cpu_info[4]) / jiffies),
+ "{:.2%}".format((cpu_info[7] - last_cpu_info[7]) / jiffies),
+ "{:.2%}".format((cpu_info[3] - last_cpu_info[3]) / jiffies),
+ ]
+ else:
+ info_stat = [
+ "{:.2%}".format(cpu_info[0] / jiffies),
+ "{:.2%}".format(cpu_info[1] / jiffies),
+ "{:.2%}".format((cpu_info[2] + cpu_info[5]
+ + cpu_info[6]) / jiffies),
+ "{:.2%}".format(cpu_info[4] / jiffies),
+ "{:.2%}".format(cpu_info[7] / jiffies),
+ "{:.2%}".format(cpu_info[3] / jiffies),
+ ]
+
+ _stat_format([info_stat], SPDK_CPU_STAT_HEAD, True)
+
+
+def check_positive(value):
+ v = int(value)
+ if v <= 0:
+ raise argparse.ArgumentTypeError("%s should be positive int value" % v)
+ return v
+
+
+def get_cpu_stat():
+ with open(SPDK_CPU_STAT, "r") as cpu_file:
+ cpu_dump_info = []
+ line = cpu_file.readline()
+ while line:
+ line = line.strip()
+ if "cpu " in line:
+ cpu_dump_info = [int(data) for data in line[5:].split(' ')]
+ break
+
+ line = cpu_file.readline()
+ return cpu_dump_info
+
+
+def read_bdev_stat(last_stat, stat, mb, use_upt):
+ if use_upt:
+ upt_cur = uptime()
+ else:
+ upt_cur = stat['ticks']
+ upt_rate = stat['tick_rate']
+
+ info_stats = []
+ unit = 2048 if mb else 2
+
+ bdev_stats = []
+ if last_stat:
+ for bdev in stat['bdevs']:
+ _stat = BdevStat(bdev)
+ _stat.upt = upt_cur
+ bdev_stats.append(_stat)
+ _last_stat = None
+ for last_bdev in last_stat:
+ if (_stat.bdev_name == last_bdev.bdev_name):
+ _last_stat = last_bdev
+ break
+
+ # get the interval time
+ if use_upt:
+ upt = _stat.upt - _last_stat.upt
+ else:
+ upt = (_stat.upt - _last_stat.upt) / upt_rate
+
+ rd_sec = _stat.rd_sectors - _last_stat.rd_sectors
+ if (_stat.rd_sectors < _last_stat.rd_sectors) and (_last_stat.rd_sectors <= SPDK_MAX_SECTORS):
+ rd_sec &= SPDK_MAX_SECTORS
+
+ wr_sec = _stat.wr_sectors - _last_stat.wr_sectors
+ if (_stat.wr_sectors < _last_stat.wr_sectors) and (_last_stat.wr_sectors <= SPDK_MAX_SECTORS):
+ wr_sec &= SPDK_MAX_SECTORS
+
+ dc_sec = _stat.dc_sectors - _last_stat.dc_sectors
+ if (_stat.dc_sectors < _last_stat.dc_sectors) and (_last_stat.dc_sectors <= SPDK_MAX_SECTORS):
+ dc_sec &= SPDK_MAX_SECTORS
+
+ tps = ((_stat.rd_ios + _stat.dc_ios + _stat.wr_ios) -
+ (_last_stat.rd_ios + _last_stat.dc_ios + _last_stat.wr_ios)) / upt
+
+ info_stat = [
+ _stat.bdev_name,
+ "{:.2f}".format(tps),
+ "{:.2f}".format(
+ (_stat.rd_sectors - _last_stat.rd_sectors) / upt / unit),
+ "{:.2f}".format(
+ (_stat.wr_sectors - _last_stat.wr_sectors) / upt / unit),
+ "{:.2f}".format(
+ (_stat.dc_sectors - _last_stat.dc_sectors) / upt / unit),
+ "{:.2f}".format(rd_sec / unit),
+ "{:.2f}".format(wr_sec / unit),
+ "{:.2f}".format(dc_sec / unit),
+ ]
+ info_stats.append(info_stat)
+ else:
+ for bdev in stat['bdevs']:
+ _stat = BdevStat(bdev)
+ _stat.upt = upt_cur
+ bdev_stats.append(_stat)
+
+ if use_upt:
+ upt = _stat.upt
+ else:
+ upt = _stat.upt / upt_rate
+
+ tps = (_stat.rd_ios + _stat.dc_ios + _stat.wr_ios) / upt
+ info_stat = [
+ _stat.bdev_name,
+ "{:.2f}".format(tps),
+ "{:.2f}".format(_stat.rd_sectors / upt / unit),
+ "{:.2f}".format(_stat.wr_sectors / upt / unit),
+ "{:.2f}".format(_stat.dc_sectors / upt / unit),
+ "{:.2f}".format(_stat.rd_sectors / unit),
+ "{:.2f}".format(_stat.wr_sectors / unit),
+ "{:.2f}".format(_stat.dc_sectors / unit),
+ ]
+ info_stats.append(info_stat)
+
+ _stat_format(
+ info_stats, SPDK_BDEV_MB_STAT_HEAD if mb else SPDK_BDEV_KB_STAT_HEAD)
+ return bdev_stats
+
+
+def get_bdev_stat(client, name):
+ return rpc.bdev.bdev_get_iostat(client, name=name)
+
+
+def io_stat_display(args, cpu_info, stat):
+ if args.cpu_stat and not args.bdev_stat:
+ _cpu_info = get_cpu_stat()
+ read_cpu_stat(cpu_info, _cpu_info)
+ return _cpu_info, None
+
+ if args.bdev_stat and not args.cpu_stat:
+ _stat = get_bdev_stat(args.client, args.name)
+ bdev_stats = read_bdev_stat(
+ stat, _stat, args.mb_display, args.use_uptime)
+ return None, bdev_stats
+
+ _cpu_info = get_cpu_stat()
+ read_cpu_stat(cpu_info, _cpu_info)
+
+ _stat = get_bdev_stat(args.client, args.name)
+ bdev_stats = read_bdev_stat(stat, _stat, args.mb_display, args.use_uptime)
+ return _cpu_info, bdev_stats
+
+
+def io_stat_display_loop(args):
+ interval = args.interval
+ time_in_second = args.time_in_second
+ args.client = rpc.client.JSONRPCClient(
+ args.server_addr, args.port, args.timeout, log_level=getattr(logging, args.verbose.upper()))
+
+ last_cpu_stat = None
+ bdev_stats = None
+
+ cur = 0
+ while True:
+ last_cpu_stat, bdev_stats = io_stat_display(
+ args, last_cpu_stat, bdev_stats)
+
+ time.sleep(interval)
+ cur += interval
+ if cur >= time_in_second:
+ break
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description='SPDK iostats command line interface')
+
+ parser.add_argument('-c', '--cpu-status', dest='cpu_stat',
+ action='store_true', help="Only display cpu status",
+ required=False, default=False)
+
+ parser.add_argument('-d', '--bdev-status', dest='bdev_stat',
+ action='store_true', help="Only display Blockdev io stats",
+ required=False, default=False)
+
+ parser.add_argument('-k', '--kb-display', dest='kb_display',
+ action='store_true', help="Display drive stats in KiB",
+ required=False, default=False)
+
+ parser.add_argument('-m', '--mb-display', dest='mb_display',
+ action='store_true', help="Display drive stats in MiB",
+ required=False, default=False)
+
+ parser.add_argument('-u', '--use-uptime', dest='use_uptime',
+ action='store_true', help='Use uptime or spdk ticks(default) as \
+ the interval variable to calculate iostat changes.',
+ required=False, default=False)
+
+ parser.add_argument('-i', '--interval', dest='interval',
+ type=check_positive, help='Time interval (in seconds) on which \
+ to poll I/O stats. Used in conjunction with -t',
+ required=False, default=0)
+
+ parser.add_argument('-t', '--time', dest='time_in_second',
+ type=check_positive, help='The number of second to display stats \
+ before returning. Used in conjunction with -i',
+ required=False, default=0)
+
+ parser.add_argument('-s', "--server", dest='server_addr',
+ help='RPC domain socket path or IP address',
+ default='/var/tmp/spdk.sock')
+
+ parser.add_argument('-p', "--port", dest='port',
+ help='RPC port number (if server_addr is IP address)',
+ default=4420, type=int)
+
+ parser.add_argument('-b', '--name', dest='name',
+ help="Name of the Blockdev. Example: Nvme0n1", required=False)
+
+ parser.add_argument('-o', '--timeout', dest='timeout',
+ help='Timeout as a floating point number expressed in seconds \
+ waiting for response. Default: 60.0',
+ default=60.0, type=float)
+
+ parser.add_argument('-v', dest='verbose', action='store_const', const="INFO",
+ help='Set verbose mode to INFO', default="ERROR")
+
+ args = parser.parse_args()
+ if ((args.interval == 0 and args.time_in_second != 0) or
+ (args.interval != 0 and args.time_in_second == 0)):
+ raise argparse.ArgumentTypeError(
+ "interval and time_in_second should be greater than 0 at the same time")
+
+ if args.kb_display and args.mb_display:
+ parser.print_help()
+ exit()
+
+ io_stat_display_loop(args)
diff --git a/src/spdk/scripts/perf/nvme/README b/src/spdk/scripts/perf/nvme/README
new file mode 100644
index 000000000..6468399a7
--- /dev/null
+++ b/src/spdk/scripts/perf/nvme/README
@@ -0,0 +1,12 @@
+These scripts are used to perform benchmark testing with fio.
+The run_fio_test.py is the main script that runs the performance test and parses the test results.
+Users can populate test parameters for different fio workloads in the lists (q_depth, io_size, workload_type, mix, core_mask and run_time) at the top of the run_fio_test.py script.
+The run_fio_test.py puts the test results in a csv file named <hostname>_<num ssds>_perf_output.csv.
+The run_fio_test.sh script demonstrates how to invoke the run_fio_test.py script with the
+input parameters: path_to_fio_conf, path_to_ioengine and num_ssds. The run_fio_test.sh script will
+call the SPDK setup.sh script to unbind NVMe devices from the kernel driver and bind them to the uio driver.
+We include a sample fio configuration file that includes the parameters we use in our test environment.
+The run_fio_test.py will append the NVMe devices to the end of the configuration file. The number of
+NVMe devices used is specified using the num_ssds parameter.
+
+Usage: ./run_fio_test.sh
diff --git a/src/spdk/scripts/perf/nvme/fio_test.conf b/src/spdk/scripts/perf/nvme/fio_test.conf
new file mode 100644
index 000000000..a03c6a1e9
--- /dev/null
+++ b/src/spdk/scripts/perf/nvme/fio_test.conf
@@ -0,0 +1,20 @@
+[global]
+ioengine=${IOENGINE}
+thread=1
+group_reporting=1
+direct=1
+verify=0
+norandommap=1
+cpumask=1
+percentile_list=50:90:99:99.5:99.9:99.99:99.999
+
+[perf_test]
+stonewall
+description="Run NVMe driver performance test for a given workload"
+bs=${BLK_SIZE}
+rw=${RW}
+rwmixread=${MIX}
+iodepth=${IODEPTH}
+time_based=1
+ramp_time=10s
+runtime=${RUNTIME}
diff --git a/src/spdk/scripts/perf/nvme/run_fio_test.py b/src/spdk/scripts/perf/nvme/run_fio_test.py
new file mode 100755
index 000000000..79d9e566d
--- /dev/null
+++ b/src/spdk/scripts/perf/nvme/run_fio_test.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python3
+
+# This script runs fio benchmark test on the local nvme device using the SPDK NVMe driver.
+# Prework: Run script/setup.sh to bind SSDs to SPDK driver.
+# Prework: Change any fio configurations in the template fio config file fio_test.conf
+# Output: A csv file <hostname>_<num ssds>_perf_output.csv
+
+import subprocess
+from subprocess import check_call, call, check_output, Popen, PIPE
+import random
+import os
+import sys
+import re
+import signal
+import getopt
+from datetime import datetime
+from itertools import *
+import csv
+import itertools
+from shutil import copyfile
+import json
+
+# Populate test parameters into these lists to run different workloads
+# The configuration below runs QD 1 & 128. To add QD 32 set q_depth=['1', '32', '128']
+q_depth = ['1', '128']
+# io_size specifies the size in bytes of the IO workload.
+# To add 64K IOs set io_size = ['4096', '65536']
+io_size = ['4096']
+workload_type = ['randrw']
+mix = ['100']
+core_mask = ['0x1']
+# run_time parameter specifies how long to run each test.
+# Set run_time = ['600'] to run the test for 10 minutes
+run_time = ['60']
+# iter_num parameter is used to run the test multiple times.
+# set iter_num = ['1', '2', '3'] to repeat each test 3 times
+iter_num = ['1']
+
+
+def run_fio(io_size_bytes, qd, rw_mix, cpu_mask, run_num, workload, run_time_sec):
+ print("Running Test: IO Size={} QD={} Mix={} CPU Mask={}".format(io_size_bytes, qd, rw_mix, cpu_mask))
+ string = "s_" + str(io_size_bytes) + "_q_" + str(qd) + "_m_" + str(rw_mix) + "_c_" + str(cpu_mask) + "_run_" + str(run_num)
+
+ # Call fio
+ path_to_fio_conf = config_file_for_test
+ path_to_ioengine = sys.argv[2]
+ command = "BLK_SIZE=" + str(io_size_bytes) + " RW=" + str(workload) + " MIX=" + str(rw_mix) \
+ + " IODEPTH=" + str(qd) + " RUNTIME=" + str(run_time_sec) + " IOENGINE=" + path_to_ioengine \
+ + " fio " + str(path_to_fio_conf) + " -output=" + string + " -output-format=json"
+ output = subprocess.check_output(command, shell=True)
+
+ print("Finished Test: IO Size={} QD={} Mix={} CPU Mask={}".format(io_size_bytes, qd, rw_mix, cpu_mask))
+ return
+
+
+def parse_results(io_size_bytes, qd, rw_mix, cpu_mask, run_num, workload, run_time_sec):
+ results_array = []
+
+ # If json file has results for multiple fio jobs pick the results from the right job
+ job_pos = 0
+
+ # generate the next result line that will be added to the output csv file
+ results = str(io_size_bytes) + "," + str(qd) + "," + str(rw_mix) + "," \
+ + str(workload) + "," + str(cpu_mask) + "," + str(run_time_sec) + "," + str(run_num)
+
+ # Read the results of this run from the test result file
+ string = "s_" + str(io_size_bytes) + "_q_" + str(qd) + "_m_" + str(rw_mix) + "_c_" + str(cpu_mask) + "_run_" + str(run_num)
+ with open(string) as json_file:
+ data = json.load(json_file)
+ job_name = data['jobs'][job_pos]['jobname']
+ # print "FIO job name: ", job_name
+ if 'lat_ns' in data['jobs'][job_pos]['read']:
+ lat = 'lat_ns'
+ lat_units = 'ns'
+ else:
+ lat = 'lat'
+ lat_units = 'us'
+ read_iops = float(data['jobs'][job_pos]['read']['iops'])
+ read_bw = float(data['jobs'][job_pos]['read']['bw'])
+ read_avg_lat = float(data['jobs'][job_pos]['read'][lat]['mean'])
+ read_min_lat = float(data['jobs'][job_pos]['read'][lat]['min'])
+ read_max_lat = float(data['jobs'][job_pos]['read'][lat]['max'])
+ write_iops = float(data['jobs'][job_pos]['write']['iops'])
+ write_bw = float(data['jobs'][job_pos]['write']['bw'])
+ write_avg_lat = float(data['jobs'][job_pos]['write'][lat]['mean'])
+ write_min_lat = float(data['jobs'][job_pos]['write'][lat]['min'])
+ write_max_lat = float(data['jobs'][job_pos]['write'][lat]['max'])
+ print("%-10s" % "IO Size", "%-10s" % "QD", "%-10s" % "Mix",
+ "%-10s" % "Workload Type", "%-10s" % "CPU Mask",
+ "%-10s" % "Run Time", "%-10s" % "Run Num",
+ "%-15s" % "Read IOps",
+ "%-10s" % "Read MBps", "%-15s" % "Read Avg. Lat(" + lat_units + ")",
+ "%-15s" % "Read Min. Lat(" + lat_units + ")", "%-15s" % "Read Max. Lat(" + lat_units + ")",
+ "%-15s" % "Write IOps",
+ "%-10s" % "Write MBps", "%-15s" % "Write Avg. Lat(" + lat_units + ")",
+ "%-15s" % "Write Min. Lat(" + lat_units + ")", "%-15s" % "Write Max. Lat(" + lat_units + ")")
+ print("%-10s" % io_size_bytes, "%-10s" % qd, "%-10s" % rw_mix,
+ "%-10s" % workload, "%-10s" % cpu_mask, "%-10s" % run_time_sec,
+ "%-10s" % run_num, "%-15s" % read_iops, "%-10s" % read_bw,
+ "%-15s" % read_avg_lat, "%-15s" % read_min_lat, "%-15s" % read_max_lat,
+ "%-15s" % write_iops, "%-10s" % write_bw, "%-15s" % write_avg_lat,
+ "%-15s" % write_min_lat, "%-15s" % write_max_lat)
+ results = results + "," + str(read_iops) + "," + str(read_bw) + "," \
+ + str(read_avg_lat) + "," + str(read_min_lat) + "," + str(read_max_lat) \
+ + "," + str(write_iops) + "," + str(write_bw) + "," + str(write_avg_lat) \
+ + "," + str(write_min_lat) + "," + str(write_max_lat)
+ with open(result_file_name, "a") as result_file:
+ result_file.write(results + "\n")
+ results_array = []
+ return
+
+
+def get_nvme_devices_count():
+ output = check_output('lspci | grep -i Non | wc -l', shell=True)
+ return int(output)
+
+
+def get_nvme_devices_bdf():
+ output = check_output('lspci | grep -i Non | awk \'{print $1}\'', shell=True).decode("utf-8")
+ output = output.split()
+ return output
+
+
+def add_filename_to_conf(conf_file_name, bdf):
+ filestring = "filename=trtype=PCIe traddr=0000." + bdf.replace(":", ".") + " ns=1"
+ with open(conf_file_name, "a") as conf_file:
+ conf_file.write(filestring + "\n")
+
+
+if len(sys.argv) != 4:
+ print("usage: " % sys.argv[0] % " path_to_fio_conf path_to_ioengine num_ssds")
+ sys.exit()
+
+num_ssds = int(sys.argv[3])
+if num_ssds > get_nvme_devices_count():
+ print("System does not have {} NVMe SSDs.".format(num_ssds))
+ sys.exit()
+
+host_name = os.uname()[1]
+result_file_name = host_name + "_" + sys.argv[3] + "ssds_perf_output.csv"
+
+bdf = get_nvme_devices_bdf()
+config_file_for_test = sys.argv[1] + "_" + sys.argv[3] + "ssds"
+copyfile(sys.argv[1], config_file_for_test)
+
+# Add the number of threads to the fio config file
+with open(config_file_for_test, "a") as conf_file:
+ conf_file.write("numjobs=" + str(1) + "\n")
+
+# Add the NVMe bdf to the fio config file
+for i in range(0, num_ssds):
+ add_filename_to_conf(config_file_for_test, bdf[i])
+
+# Set up for output
+columns = "IO_Size,Q_Depth,Workload_Mix,Workload_Type,Core_Mask,Run_Time,Run,Read_IOPS,Read_bw(KiB/s), \
+ Read_Avg_lat(us),Read_Min_Lat(us),Read_Max_Lat(us),Write_IOPS,Write_bw(KiB/s),Write_Avg_lat(us), \
+ Write_Min_Lat(us),Write_Max_Lat(us)"
+
+with open(result_file_name, "w+") as result_file:
+ result_file.write(columns + "\n")
+
+for i, (s, q, m, w, c, t) in enumerate(itertools.product(io_size, q_depth, mix, workload_type, core_mask, run_time)):
+ run_fio(s, q, m, c, i, w, t)
+ parse_results(s, q, m, c, i, w, t)
+
+result_file.close()
diff --git a/src/spdk/scripts/perf/nvme/run_fio_test.sh b/src/spdk/scripts/perf/nvme/run_fio_test.sh
new file mode 100755
index 000000000..bc888d0f4
--- /dev/null
+++ b/src/spdk/scripts/perf/nvme/run_fio_test.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../../..)
+
+# Bind devices to NVMe driver
+$rootdir/scripts/setup.sh
+
+# Run Performance Test with 1 SSD
+$testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/build/fio/spdk_nvme 1
+
+# 2 SSDs test run
+$testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/build/fio/spdk_nvme 2
+
+# 4 SSDs test run
+$testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/build/fio/spdk_nvme 4
+
+# 8 SSDs test run
+$testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/build/fio/spdk_nvme 8
diff --git a/src/spdk/scripts/perf/nvmf/README.md b/src/spdk/scripts/perf/nvmf/README.md
new file mode 100644
index 000000000..6cd65790e
--- /dev/null
+++ b/src/spdk/scripts/perf/nvmf/README.md
@@ -0,0 +1,159 @@
+## Running NVMe-OF Performace Testcases
+
+In order to reproduce test cases described in [SPDK NVMe-OF Performance Test Cases](https://ci.spdk.io/download/performance-reports/SPDK_nvmeof_perf_report_18.04.pdf) follow the following instructions.
+
+Currently RDMA NIC IP address assignment must be done manually before running the tests.
+
+# Prepare the configuration file
+
+Configure the target, initiators, and FIO workload in the json configuration file.
+
+## General
+
+Options which apply to both target and all initiator servers such as "password" and "username" fields.
+All servers are required to have the same user credentials for running the test.
+Test results can be found in /tmp/results directory.
+
+### transport
+
+Transport layer to use between Target and Initiator servers - rdma or tcp.
+
+## Target
+
+Configure the target server information.
+
+### nic_ips
+
+List of IP addresses othat will be used in this test..
+NVMe namespaces will be split between provided IP addresses.
+So for example providing 2 IP's with 16 NVMe drives present will result in each IP managing
+8 NVMe subystems.
+
+### mode
+
+"spdk" or "kernel" values allowed.
+
+### use_null_block
+
+Use null block device instead of present NVMe drives. Used for latency measurements as described
+in Test Case 3 of performance report.
+
+### num_cores
+
+List of CPU cores to assign for running SPDK NVMe-OF Target process. Can specify exact core numbers or ranges, eg:
+[0, 1, 10-15].
+
+### nvmet_bin
+
+Path to nvmetcli application executable. If not provided then system-wide package will be used
+by default. Not used if "mode" is set to "spdk".
+
+### num_shared_buffers
+
+Number of shared buffers to use when creating transport layer.
+
+## Initiator
+
+Describes initiator arguments. There can be more than one initiator section in the configuration file.
+For the sake of easier results parsing from multiple initiators please use only digits and letters
+in initiator section name.
+
+### ip
+
+Management IP address used for SSH communication with initiator server.
+
+### nic_ips
+
+List of target IP addresses to which the initiator should try to connect.
+
+### mode
+
+"spdk" or "kernel" values allowed.
+
+### cpus_allowed
+
+List of CPU cores to assign for running SPDK NVMe-OF initiator process.
+Can specify exact core numbers: 0,5
+or ranges: 10-15
+or binding to CPUs 0, 5, and 8 to 15: `cpus_allowed=0,5,8-15`.
+If not specified then will use num_cores option.
+If specified with num_cores then cpu_allowed parameter has higher priority than num_cores.
+
+### num_cores
+
+Applies only to SPDK initiator. Number of CPUs core to use for running FIO job.
+If not specified then by default each connected subsystem gets its own CPU core.
+
+### nvmecli_dir
+
+Path to directory with nvme-cli application. If not provided then system-wide package will be used
+by default. Not used if "mode" is set to "spdk".
+
+### fio_bin
+
+Path to the fio binary that will be used to compile SPDK and run the test.
+If not specified, then the script will use /usr/src/fio/fio as the default.
+
+### extra_params
+
+Space separated string with additional settings for "nvme connect" command
+other than -t, -s, -n and -a.
+
+## fio
+
+Fio job parameters.
+
+- bs: block size
+- qd: io depth - Per connected fio filename target
+- rw: workload mode
+- rwmixread: percentage of reads in readwrite workloads
+- run_time: time (in seconds) to run workload
+- ramp_time: time (in seconds) to run workload before statistics are gathered
+- run_num: how many times to run given workload in loop
+
+# Running Test
+
+Before running the test script use the setup.sh script to bind the devices you want to
+use in the test to the VFIO/UIO driver.
+Run the script on the NVMe-oF target system:
+
+ cd spdk
+ sudo PYTHONPATH=$PYTHONPATH:$PWD/scripts scripts/perf/nvmf/run_nvmf.py
+The script uses the config.json configuration file in the scripts/perf/nvmf directory by default. You can
+specify a different configuration file at runtime as shown below:
+sudo PYTHONPATH=$PYTHONPATH:$PWD/scripts scripts/perf/nvmf/run_nvmf.py /path/to/config file/json config file
+
+The script uses another spdk script (scripts/rpc.py) so we pass the path to rpc.py by setting the Python path
+as a runtime environment parameter.
+
+# Test Results
+
+When the test completes, you will find a csv file (nvmf_results.csv) containing the results in the target node
+directory /tmp/results.
+
+# Processor Counter Monitor (PCM)
+PCM Tools provides a number of command-line utilities for real-time monitoring.
+Before using PCM Tools in nvmf perf scripts it needs to be installed on Target machine.
+PCM source and instructions are available on https://github.com/opcm/pcm.
+To enable PCM in perf test you need to add Target setting in config.json file:
+```
+"pcm_settings": ["pcm_directory", "measure_cpu", "measure_memory", delay_time, measure_interval, sample_count]
+```
+example:
+```
+"pcm_settings": ["/tmp/pcm", true, true, 10, 1, 30]
+```
+Example above will run PCM measure for cpu and memory, with start delay 10s, sample every 1 second,
+and 30 samples for cpu measure. PCM memory do not support sample count.
+
+# Bandwidth monitor (bwm-ng)
+PCM Tools provides a number of command-line utilities for real-time monitoring.
+Before using bwm-ng in nvmf perf scripts it needs to be installed on Target machine.
+To enable bandwidth monitor in perf test you need to add Target setting in config.json file:
+```
+"bandwidth_settings": [bool, sample_count]
+```
+example:
+```
+"bandwidth_settings": [true, 30]
+```
diff --git a/src/spdk/scripts/perf/nvmf/common.py b/src/spdk/scripts/perf/nvmf/common.py
new file mode 100644
index 000000000..8c0d435f3
--- /dev/null
+++ b/src/spdk/scripts/perf/nvmf/common.py
@@ -0,0 +1,42 @@
+import os
+import re
+import json
+from itertools import product, chain
+from subprocess import check_output, Popen
+
+
+def get_used_numa_nodes():
+ used_numa_nodes = set()
+ for bdf in get_nvme_devices_bdf():
+ with open("/sys/bus/pci/devices/%s/numa_node" % bdf, "r") as numa_file:
+ output = numa_file.read()
+ used_numa_nodes.add(int(output))
+ return used_numa_nodes
+
+
+def get_nvme_devices_count():
+ output = get_nvme_devices_bdf()
+ return len(output)
+
+
+def get_nvme_devices_bdf():
+ print("Getting BDFs for NVMe section")
+ output = check_output("rootdir=$PWD; \
+ source test/common/autotest_common.sh; \
+ get_nvme_bdfs 01 08 02",
+ executable="/bin/bash", shell=True)
+ output = [str(x, encoding="utf-8") for x in output.split()]
+ print("Done getting BDFs")
+ return output
+
+
+def get_nvme_devices():
+ print("Getting kernel NVMe names")
+ output = check_output("lsblk -o NAME -nlp", shell=True).decode(encoding="utf-8")
+ output = [x for x in output.split("\n") if "nvme" in x]
+ print("Done getting kernel NVMe names")
+ return output
+
+
+def nvmet_command(nvmet_bin, command):
+ return check_output("%s %s" % (nvmet_bin, command), shell=True).decode(encoding="utf-8")
diff --git a/src/spdk/scripts/perf/nvmf/config.json b/src/spdk/scripts/perf/nvmf/config.json
new file mode 100644
index 000000000..d8b16be93
--- /dev/null
+++ b/src/spdk/scripts/perf/nvmf/config.json
@@ -0,0 +1,37 @@
+{
+ "general": {
+ "username": "uname",
+ "password": "pass",
+ "transport": "transport_type"
+ },
+ "target": {
+ "nic_ips": ["192.0.1.1", "192.0.2.1"],
+ "mode": "spdk",
+ "use_null_block": false,
+ "nvmet_dir": "/path/to/nvmetcli",
+ "num_cores": "1",
+ "num_shared_buffers": 4096
+ },
+ "initiator1": {
+ "ip": "10.0.0.1",
+ "nic_ips": ["192.0.1.1"],
+ "mode": "spdk",
+ "nvmecli_dir": "/path/to/nvmecli",
+ "fio_dir": "/path/to/fio binary",
+ "extra_params": "Extra nvme connect params"
+ },
+ "initiator2": {
+ "ip": "10.0.0.2",
+ "nic_ips": ["192.0.2.1"],
+ "mode": "spdk"
+ },
+ "fio": {
+ "bs": ["4k"],
+ "qd": [128],
+ "rw": ["randrw"],
+ "rwmixread": 100,
+ "run_time": 5,
+ "ramp_time": 1,
+ "run_num": 3
+ }
+}
diff --git a/src/spdk/scripts/perf/nvmf/run_nvmf.py b/src/spdk/scripts/perf/nvmf/run_nvmf.py
new file mode 100755
index 000000000..05b94ec09
--- /dev/null
+++ b/src/spdk/scripts/perf/nvmf/run_nvmf.py
@@ -0,0 +1,941 @@
+#!/usr/bin/env python3
+
+import os
+import re
+import sys
+import json
+import paramiko
+import zipfile
+import threading
+import subprocess
+import itertools
+import time
+import uuid
+import rpc
+import rpc.client
+import pandas as pd
+from collections import OrderedDict
+from common import *
+
+
+class Server:
+ def __init__(self, name, username, password, mode, nic_ips, transport):
+ self.name = name
+ self.mode = mode
+ self.username = username
+ self.password = password
+ self.nic_ips = nic_ips
+ self.transport = transport.lower()
+
+ if not re.match("^[A-Za-z0-9]*$", name):
+ self.log_print("Please use a name which contains only letters or numbers")
+ sys.exit(1)
+
+ def log_print(self, msg):
+ print("[%s] %s" % (self.name, msg), flush=True)
+
+
+class Target(Server):
+ def __init__(self, name, username, password, mode, nic_ips, transport="rdma",
+ use_null_block=False, sar_settings=None, pcm_settings=None,
+ bandwidth_settings=None):
+
+ super(Target, self).__init__(name, username, password, mode, nic_ips, transport)
+ self.null_block = bool(use_null_block)
+ self.enable_sar = False
+ self.enable_pcm_memory = False
+ self.enable_pcm = False
+ self.enable_bandwidth = False
+
+ if sar_settings:
+ self.enable_sar, self.sar_delay, self.sar_interval, self.sar_count = sar_settings
+
+ if pcm_settings:
+ self.pcm_dir, self.enable_pcm, self.enable_pcm_memory, self.pcm_delay, self.pcm_interval, self.pcm_count = pcm_settings
+
+ if bandwidth_settings:
+ self.enable_bandwidth, self.bandwidth_count = bandwidth_settings
+
+ self.script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
+ self.spdk_dir = os.path.abspath(os.path.join(self.script_dir, "../../../"))
+
+ def zip_spdk_sources(self, spdk_dir, dest_file):
+ self.log_print("Zipping SPDK source directory")
+ fh = zipfile.ZipFile(dest_file, "w", zipfile.ZIP_DEFLATED)
+ for root, directories, files in os.walk(spdk_dir, followlinks=True):
+ for file in files:
+ fh.write(os.path.relpath(os.path.join(root, file)))
+ fh.close()
+ self.log_print("Done zipping")
+
+ def read_json_stats(self, file):
+ with open(file, "r") as json_data:
+ data = json.load(json_data)
+ job_pos = 0 # job_post = 0 because using aggregated results
+
+ # Check if latency is in nano or microseconds to choose correct dict key
+ def get_lat_unit(key_prefix, dict_section):
+ # key prefix - lat, clat or slat.
+ # dict section - portion of json containing latency bucket in question
+ # Return dict key to access the bucket and unit as string
+ for k, v in dict_section.items():
+ if k.startswith(key_prefix):
+ return k, k.split("_")[1]
+
+ read_iops = float(data["jobs"][job_pos]["read"]["iops"])
+ read_bw = float(data["jobs"][job_pos]["read"]["bw"])
+ lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["read"])
+ read_avg_lat = float(data["jobs"][job_pos]["read"][lat_key]["mean"])
+ read_min_lat = float(data["jobs"][job_pos]["read"][lat_key]["min"])
+ read_max_lat = float(data["jobs"][job_pos]["read"][lat_key]["max"])
+ clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["read"])
+ read_p99_lat = float(data["jobs"][job_pos]["read"][clat_key]["percentile"]["99.000000"])
+ read_p99_9_lat = float(data["jobs"][job_pos]["read"][clat_key]["percentile"]["99.900000"])
+ read_p99_99_lat = float(data["jobs"][job_pos]["read"][clat_key]["percentile"]["99.990000"])
+ read_p99_999_lat = float(data["jobs"][job_pos]["read"][clat_key]["percentile"]["99.999000"])
+
+ if "ns" in lat_unit:
+ read_avg_lat, read_min_lat, read_max_lat = [x / 1000 for x in [read_avg_lat, read_min_lat, read_max_lat]]
+ if "ns" in clat_unit:
+ read_p99_lat = read_p99_lat / 1000
+ read_p99_9_lat = read_p99_9_lat / 1000
+ read_p99_99_lat = read_p99_99_lat / 1000
+ read_p99_999_lat = read_p99_999_lat / 1000
+
+ write_iops = float(data["jobs"][job_pos]["write"]["iops"])
+ write_bw = float(data["jobs"][job_pos]["write"]["bw"])
+ lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["write"])
+ write_avg_lat = float(data["jobs"][job_pos]["write"][lat_key]["mean"])
+ write_min_lat = float(data["jobs"][job_pos]["write"][lat_key]["min"])
+ write_max_lat = float(data["jobs"][job_pos]["write"][lat_key]["max"])
+ clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["write"])
+ write_p99_lat = float(data["jobs"][job_pos]["write"][clat_key]["percentile"]["99.000000"])
+ write_p99_9_lat = float(data["jobs"][job_pos]["write"][clat_key]["percentile"]["99.900000"])
+ write_p99_99_lat = float(data["jobs"][job_pos]["write"][clat_key]["percentile"]["99.990000"])
+ write_p99_999_lat = float(data["jobs"][job_pos]["write"][clat_key]["percentile"]["99.999000"])
+
+ if "ns" in lat_unit:
+ write_avg_lat, write_min_lat, write_max_lat = [x / 1000 for x in [write_avg_lat, write_min_lat, write_max_lat]]
+ if "ns" in clat_unit:
+ write_p99_lat = write_p99_lat / 1000
+ write_p99_9_lat = write_p99_9_lat / 1000
+ write_p99_99_lat = write_p99_99_lat / 1000
+ write_p99_999_lat = write_p99_999_lat / 1000
+
+ return [read_iops, read_bw, read_avg_lat, read_min_lat, read_max_lat,
+ read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat,
+ write_iops, write_bw, write_avg_lat, write_min_lat, write_max_lat,
+ write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat]
+
+ def parse_results(self, results_dir, initiator_count=None, run_num=None):
+ files = os.listdir(results_dir)
+ fio_files = filter(lambda x: ".fio" in x, files)
+ json_files = [x for x in files if ".json" in x]
+
+ headers = ["read_iops", "read_bw", "read_avg_lat_us", "read_min_lat_us", "read_max_lat_us",
+ "read_p99_lat_us", "read_p99.9_lat_us", "read_p99.99_lat_us", "read_p99.999_lat_us",
+ "write_iops", "write_bw", "write_avg_lat_us", "write_min_lat_us", "write_max_lat_us",
+ "write_p99_lat_us", "write_p99.9_lat_us", "write_p99.99_lat_us", "write_p99.999_lat_us"]
+
+ aggr_headers = ["iops", "bw", "avg_lat_us", "min_lat_us", "max_lat_us",
+ "p99_lat_us", "p99.9_lat_us", "p99.99_lat_us", "p99.999_lat_us"]
+
+ header_line = ",".join(["Name", *headers])
+ aggr_header_line = ",".join(["Name", *aggr_headers])
+
+ # Create empty results file
+ csv_file = "nvmf_results.csv"
+ with open(os.path.join(results_dir, csv_file), "w") as fh:
+ fh.write(aggr_header_line + "\n")
+ rows = set()
+
+ for fio_config in fio_files:
+ self.log_print("Getting FIO stats for %s" % fio_config)
+ job_name, _ = os.path.splitext(fio_config)
+
+ # Look in the filename for rwmixread value. Function arguments do
+ # not have that information.
+ # TODO: Improve this function by directly using workload params instead
+ # of regexing through filenames.
+ if "read" in job_name:
+ rw_mixread = 1
+ elif "write" in job_name:
+ rw_mixread = 0
+ else:
+ rw_mixread = float(re.search(r"m_(\d+)", job_name).group(1)) / 100
+
+ # If "_CPU" exists in name - ignore it
+ # Initiators for the same job could have diffrent num_cores parameter
+ job_name = re.sub(r"_\d+CPU", "", job_name)
+ job_result_files = [x for x in json_files if job_name in x]
+ self.log_print("Matching result files for current fio config:")
+ for j in job_result_files:
+ self.log_print("\t %s" % j)
+
+ # There may have been more than 1 initiator used in test, need to check that
+ # Result files are created so that string after last "_" separator is server name
+ inits_names = set([os.path.splitext(x)[0].split("_")[-1] for x in job_result_files])
+ inits_avg_results = []
+ for i in inits_names:
+ self.log_print("\tGetting stats for initiator %s" % i)
+ # There may have been more than 1 test run for this job, calculate average results for initiator
+ i_results = [x for x in job_result_files if i in x]
+ i_results_filename = re.sub(r"run_\d+_", "", i_results[0].replace("json", "csv"))
+
+ separate_stats = []
+ for r in i_results:
+ stats = self.read_json_stats(os.path.join(results_dir, r))
+ separate_stats.append(stats)
+ self.log_print(stats)
+
+ init_results = [sum(x) for x in zip(*separate_stats)]
+ init_results = [x / len(separate_stats) for x in init_results]
+ inits_avg_results.append(init_results)
+
+ self.log_print("\tAverage results for initiator %s" % i)
+ self.log_print(init_results)
+ with open(os.path.join(results_dir, i_results_filename), "w") as fh:
+ fh.write(header_line + "\n")
+ fh.write(",".join([job_name, *["{0:.3f}".format(x) for x in init_results]]) + "\n")
+
+ # Sum results of all initiators running this FIO job.
+ # Latency results are an average of latencies from accros all initiators.
+ inits_avg_results = [sum(x) for x in zip(*inits_avg_results)]
+ inits_avg_results = OrderedDict(zip(headers, inits_avg_results))
+ for key in inits_avg_results:
+ if "lat" in key:
+ inits_avg_results[key] /= len(inits_names)
+
+ # Aggregate separate read/write values into common labels
+ # Take rw_mixread into consideration for mixed read/write workloads.
+ aggregate_results = OrderedDict()
+ for h in aggr_headers:
+ read_stat, write_stat = [float(value) for key, value in inits_avg_results.items() if h in key]
+ if "lat" in h:
+ _ = rw_mixread * read_stat + (1 - rw_mixread) * write_stat
+ else:
+ _ = read_stat + write_stat
+ aggregate_results[h] = "{0:.3f}".format(_)
+
+ rows.add(",".join([job_name, *aggregate_results.values()]))
+
+ # Save results to file
+ for row in rows:
+ with open(os.path.join(results_dir, csv_file), "a") as fh:
+ fh.write(row + "\n")
+ self.log_print("You can find the test results in the file %s" % os.path.join(results_dir, csv_file))
+
+ def measure_sar(self, results_dir, sar_file_name):
+ self.log_print("Waiting %d delay before measuring SAR stats" % self.sar_delay)
+ time.sleep(self.sar_delay)
+ out = subprocess.check_output("sar -P ALL %s %s" % (self.sar_interval, self.sar_count), shell=True).decode(encoding="utf-8")
+ with open(os.path.join(results_dir, sar_file_name), "w") as fh:
+ for line in out.split("\n"):
+ if "Average" in line and "CPU" in line:
+ self.log_print("Summary CPU utilization from SAR:")
+ self.log_print(line)
+ if "Average" in line and "all" in line:
+ self.log_print(line)
+ fh.write(out)
+
+ def measure_pcm_memory(self, results_dir, pcm_file_name):
+ time.sleep(self.pcm_delay)
+ pcm_memory = subprocess.Popen("%s/pcm-memory.x %s -csv=%s/%s" % (self.pcm_dir, self.pcm_interval,
+ results_dir, pcm_file_name), shell=True)
+ time.sleep(self.pcm_count)
+ pcm_memory.kill()
+
+ def measure_pcm(self, results_dir, pcm_file_name):
+ time.sleep(self.pcm_delay)
+ subprocess.run("%s/pcm.x %s -i=%s -csv=%s/%s" % (self.pcm_dir, self.pcm_interval, self.pcm_count,
+ results_dir, pcm_file_name), shell=True, check=True)
+ df = pd.read_csv(os.path.join(results_dir, pcm_file_name), header=[0, 1])
+ df = df.rename(columns=lambda x: re.sub(r'Unnamed:[\w\s]*$', '', x))
+ skt = df.loc[:, df.columns.get_level_values(1).isin({'UPI0', 'UPI1', 'UPI2'})]
+ skt_pcm_file_name = "_".join(["skt", pcm_file_name])
+ skt.to_csv(os.path.join(results_dir, skt_pcm_file_name), index=False)
+
+ def measure_bandwidth(self, results_dir, bandwidth_file_name):
+ bwm = subprocess.run("bwm-ng -o csv -F %s/%s -a 1 -t 1000 -c %s" % (results_dir, bandwidth_file_name,
+ self.bandwidth_count), shell=True, check=True)
+
+
+class Initiator(Server):
+ def __init__(self, name, username, password, mode, nic_ips, ip, transport="rdma", cpu_frequency=None,
+ nvmecli_bin="nvme", workspace="/tmp/spdk", cpus_allowed=None,
+ cpus_allowed_policy="shared", fio_bin="/usr/src/fio/fio"):
+
+ super(Initiator, self).__init__(name, username, password, mode, nic_ips, transport)
+
+ self.ip = ip
+ self.spdk_dir = workspace
+ if os.getenv('SPDK_WORKSPACE'):
+ self.spdk_dir = os.getenv('SPDK_WORKSPACE')
+ self.fio_bin = fio_bin
+ self.cpus_allowed = cpus_allowed
+ self.cpus_allowed_policy = cpus_allowed_policy
+ self.cpu_frequency = cpu_frequency
+ self.nvmecli_bin = nvmecli_bin
+ self.ssh_connection = paramiko.SSHClient()
+ self.ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self.ssh_connection.connect(self.ip, username=self.username, password=self.password)
+ self.remote_call("sudo rm -rf %s/nvmf_perf" % self.spdk_dir)
+ self.remote_call("mkdir -p %s" % self.spdk_dir)
+ self.set_cpu_frequency()
+
+ def __del__(self):
+ self.ssh_connection.close()
+
+ def put_file(self, local, remote_dest):
+ ftp = self.ssh_connection.open_sftp()
+ ftp.put(local, remote_dest)
+ ftp.close()
+
+ def get_file(self, remote, local_dest):
+ ftp = self.ssh_connection.open_sftp()
+ ftp.get(remote, local_dest)
+ ftp.close()
+
+ def remote_call(self, cmd):
+ stdin, stdout, stderr = self.ssh_connection.exec_command(cmd)
+ out = stdout.read().decode(encoding="utf-8")
+ err = stderr.read().decode(encoding="utf-8")
+ return out, err
+
+ def copy_result_files(self, dest_dir):
+ self.log_print("Copying results")
+
+ if not os.path.exists(dest_dir):
+ os.mkdir(dest_dir)
+
+ # Get list of result files from initiator and copy them back to target
+ stdout, stderr = self.remote_call("ls %s/nvmf_perf" % self.spdk_dir)
+ file_list = stdout.strip().split("\n")
+
+ for file in file_list:
+ self.get_file(os.path.join(self.spdk_dir, "nvmf_perf", file),
+ os.path.join(dest_dir, file))
+ self.log_print("Done copying results")
+
+ def discover_subsystems(self, address_list, subsys_no):
+ num_nvmes = range(0, subsys_no)
+ nvme_discover_output = ""
+ for ip, subsys_no in itertools.product(address_list, num_nvmes):
+ self.log_print("Trying to discover: %s:%s" % (ip, 4420 + subsys_no))
+ nvme_discover_cmd = ["sudo",
+ "%s" % self.nvmecli_bin,
+ "discover", "-t %s" % self.transport,
+ "-s %s" % (4420 + subsys_no),
+ "-a %s" % ip]
+ nvme_discover_cmd = " ".join(nvme_discover_cmd)
+
+ stdout, stderr = self.remote_call(nvme_discover_cmd)
+ if stdout:
+ nvme_discover_output = nvme_discover_output + stdout
+
+ subsystems = re.findall(r'trsvcid:\s(\d+)\s+' # get svcid number
+ r'subnqn:\s+([a-zA-Z0-9\.\-\:]+)\s+' # get NQN id
+ r'traddr:\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})', # get IP address
+ nvme_discover_output) # from nvme discovery output
+ subsystems = filter(lambda x: x[-1] in address_list, subsystems)
+ subsystems = list(set(subsystems))
+ subsystems.sort(key=lambda x: x[1])
+ self.log_print("Found matching subsystems on target side:")
+ for s in subsystems:
+ self.log_print(s)
+
+ return subsystems
+
+ def gen_fio_config(self, rw, rwmixread, block_size, io_depth, subsys_no, num_jobs=None, ramp_time=0, run_time=10):
+ fio_conf_template = """
+[global]
+ioengine={ioengine}
+{spdk_conf}
+thread=1
+group_reporting=1
+direct=1
+percentile_list=50:90:99:99.5:99.9:99.99:99.999
+
+norandommap=1
+rw={rw}
+rwmixread={rwmixread}
+bs={block_size}
+time_based=1
+ramp_time={ramp_time}
+runtime={run_time}
+"""
+ if "spdk" in self.mode:
+ subsystems = self.discover_subsystems(self.nic_ips, subsys_no)
+ bdev_conf = self.gen_spdk_bdev_conf(subsystems)
+ self.remote_call("echo '%s' > %s/bdev.conf" % (bdev_conf, self.spdk_dir))
+ ioengine = "%s/build/fio/spdk_bdev" % self.spdk_dir
+ spdk_conf = "spdk_conf=%s/bdev.conf" % self.spdk_dir
+ else:
+ ioengine = "libaio"
+ spdk_conf = ""
+ out, err = self.remote_call("sudo nvme list | grep -E 'SPDK|Linux' | awk '{print $1}'")
+ subsystems = [x for x in out.split("\n") if "nvme" in x]
+
+ if self.cpus_allowed is not None:
+ self.log_print("Limiting FIO workload execution on specific cores %s" % self.cpus_allowed)
+ cpus_num = 0
+ cpus = self.cpus_allowed.split(",")
+ for cpu in cpus:
+ if "-" in cpu:
+ a, b = cpu.split("-")
+ a = int(a)
+ b = int(b)
+ cpus_num += len(range(a, b))
+ else:
+ cpus_num += 1
+ threads = range(0, cpus_num)
+ elif hasattr(self, 'num_cores'):
+ self.log_print("Limiting FIO workload execution to %s cores" % self.num_cores)
+ threads = range(0, int(self.num_cores))
+ else:
+ threads = range(0, len(subsystems))
+
+ if "spdk" in self.mode:
+ filename_section = self.gen_fio_filename_conf(subsystems, threads, io_depth, num_jobs)
+ else:
+ filename_section = self.gen_fio_filename_conf(threads, io_depth, num_jobs)
+
+ fio_config = fio_conf_template.format(ioengine=ioengine, spdk_conf=spdk_conf,
+ rw=rw, rwmixread=rwmixread, block_size=block_size,
+ ramp_time=ramp_time, run_time=run_time)
+ if num_jobs:
+ fio_config = fio_config + "numjobs=%s \n" % num_jobs
+ if self.cpus_allowed is not None:
+ fio_config = fio_config + "cpus_allowed=%s \n" % self.cpus_allowed
+ fio_config = fio_config + "cpus_allowed_policy=%s \n" % self.cpus_allowed_policy
+ fio_config = fio_config + filename_section
+
+ fio_config_filename = "%s_%s_%s_m_%s" % (block_size, io_depth, rw, rwmixread)
+ if hasattr(self, "num_cores"):
+ fio_config_filename += "_%sCPU" % self.num_cores
+ fio_config_filename += ".fio"
+
+ self.remote_call("mkdir -p %s/nvmf_perf" % self.spdk_dir)
+ self.remote_call("echo '%s' > %s/nvmf_perf/%s" % (fio_config, self.spdk_dir, fio_config_filename))
+ self.log_print("Created FIO Config:")
+ self.log_print(fio_config)
+
+ return os.path.join(self.spdk_dir, "nvmf_perf", fio_config_filename)
+
+ def set_cpu_frequency(self):
+ if self.cpu_frequency is not None:
+ try:
+ self.remote_call('sudo cpupower frequency-set -g userspace')
+ self.remote_call('sudo cpupower frequency-set -f %s' % self.cpu_frequency)
+ cmd = "sudo cpupower frequency-info"
+ output, error = self.remote_call(cmd)
+ self.log_print(output)
+ self.log_print(error)
+ except Exception:
+ self.log_print("ERROR: cpu_frequency will not work when intel_pstate is enabled!")
+ sys.exit()
+ else:
+ self.log_print("WARNING: you have disabled intel_pstate and using default cpu governance.")
+
+ def run_fio(self, fio_config_file, run_num=None):
+ job_name, _ = os.path.splitext(fio_config_file)
+ self.log_print("Starting FIO run for job: %s" % job_name)
+ self.log_print("Using FIO: %s" % self.fio_bin)
+
+ if run_num:
+ for i in range(1, run_num + 1):
+ output_filename = job_name + "_run_" + str(i) + "_" + self.name + ".json"
+ cmd = "sudo %s %s --output-format=json --output=%s" % (self.fio_bin, fio_config_file, output_filename)
+ output, error = self.remote_call(cmd)
+ self.log_print(output)
+ self.log_print(error)
+ else:
+ output_filename = job_name + "_" + self.name + ".json"
+ cmd = "sudo %s %s --output-format=json --output=%s" % (self.fio_bin, fio_config_file, output_filename)
+ output, error = self.remote_call(cmd)
+ self.log_print(output)
+ self.log_print(error)
+ self.log_print("FIO run finished. Results in: %s" % output_filename)
+
+
+class KernelTarget(Target):
+ def __init__(self, name, username, password, mode, nic_ips, transport="rdma",
+ use_null_block=False, sar_settings=None, pcm_settings=None,
+ bandwidth_settings=None, nvmet_bin="nvmetcli", **kwargs):
+
+ super(KernelTarget, self).__init__(name, username, password, mode, nic_ips, transport,
+ use_null_block, sar_settings, pcm_settings, bandwidth_settings)
+ self.nvmet_bin = nvmet_bin
+
+ def __del__(self):
+ nvmet_command(self.nvmet_bin, "clear")
+
+ def kernel_tgt_gen_nullblock_conf(self, address):
+ nvmet_cfg = {
+ "ports": [],
+ "hosts": [],
+ "subsystems": [],
+ }
+
+ nvmet_cfg["subsystems"].append({
+ "allowed_hosts": [],
+ "attr": {
+ "allow_any_host": "1",
+ "serial": "SPDK0001",
+ "version": "1.3"
+ },
+ "namespaces": [
+ {
+ "device": {
+ "path": "/dev/nullb0",
+ "uuid": "%s" % uuid.uuid4()
+ },
+ "enable": 1,
+ "nsid": 1
+ }
+ ],
+ "nqn": "nqn.2018-09.io.spdk:cnode1"
+ })
+
+ nvmet_cfg["ports"].append({
+ "addr": {
+ "adrfam": "ipv4",
+ "traddr": address,
+ "trsvcid": "4420",
+ "trtype": "%s" % self.transport,
+ },
+ "portid": 1,
+ "referrals": [],
+ "subsystems": ["nqn.2018-09.io.spdk:cnode1"]
+ })
+ with open("kernel.conf", 'w') as fh:
+ fh.write(json.dumps(nvmet_cfg, indent=2))
+
+ def kernel_tgt_gen_subsystem_conf(self, nvme_list, address_list):
+
+ nvmet_cfg = {
+ "ports": [],
+ "hosts": [],
+ "subsystems": [],
+ }
+
+ # Split disks between NIC IP's
+ disks_per_ip = int(len(nvme_list) / len(address_list))
+ disk_chunks = [nvme_list[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(address_list))]
+
+ subsys_no = 1
+ port_no = 0
+ for ip, chunk in zip(address_list, disk_chunks):
+ for disk in chunk:
+ nvmet_cfg["subsystems"].append({
+ "allowed_hosts": [],
+ "attr": {
+ "allow_any_host": "1",
+ "serial": "SPDK00%s" % subsys_no,
+ "version": "1.3"
+ },
+ "namespaces": [
+ {
+ "device": {
+ "path": disk,
+ "uuid": "%s" % uuid.uuid4()
+ },
+ "enable": 1,
+ "nsid": subsys_no
+ }
+ ],
+ "nqn": "nqn.2018-09.io.spdk:cnode%s" % subsys_no
+ })
+
+ nvmet_cfg["ports"].append({
+ "addr": {
+ "adrfam": "ipv4",
+ "traddr": ip,
+ "trsvcid": "%s" % (4420 + port_no),
+ "trtype": "%s" % self.transport
+ },
+ "portid": subsys_no,
+ "referrals": [],
+ "subsystems": ["nqn.2018-09.io.spdk:cnode%s" % subsys_no]
+ })
+ subsys_no += 1
+ port_no += 1
+
+ with open("kernel.conf", "w") as fh:
+ fh.write(json.dumps(nvmet_cfg, indent=2))
+ pass
+
+ def tgt_start(self):
+ self.log_print("Configuring kernel NVMeOF Target")
+
+ if self.null_block:
+ print("Configuring with null block device.")
+ if len(self.nic_ips) > 1:
+ print("Testing with null block limited to single RDMA NIC.")
+ print("Please specify only 1 IP address.")
+ exit(1)
+ self.subsys_no = 1
+ self.kernel_tgt_gen_nullblock_conf(self.nic_ips[0])
+ else:
+ print("Configuring with NVMe drives.")
+ nvme_list = get_nvme_devices()
+ self.kernel_tgt_gen_subsystem_conf(nvme_list, self.nic_ips)
+ self.subsys_no = len(nvme_list)
+
+ nvmet_command(self.nvmet_bin, "clear")
+ nvmet_command(self.nvmet_bin, "restore kernel.conf")
+ self.log_print("Done configuring kernel NVMeOF Target")
+
+
+class SPDKTarget(Target):
+
+ def __init__(self, name, username, password, mode, nic_ips, transport="rdma",
+ use_null_block=False, sar_settings=None, pcm_settings=None,
+ bandwidth_settings=None, num_shared_buffers=4096, num_cores=1, **kwargs):
+
+ super(SPDKTarget, self).__init__(name, username, password, mode, nic_ips, transport,
+ use_null_block, sar_settings, pcm_settings, bandwidth_settings)
+ self.num_cores = num_cores
+ self.num_shared_buffers = num_shared_buffers
+
+ def spdk_tgt_configure(self):
+ self.log_print("Configuring SPDK NVMeOF target via RPC")
+ numa_list = get_used_numa_nodes()
+
+ # Create RDMA transport layer
+ rpc.nvmf.nvmf_create_transport(self.client, trtype=self.transport, num_shared_buffers=self.num_shared_buffers)
+ self.log_print("SPDK NVMeOF transport layer:")
+ rpc.client.print_dict(rpc.nvmf.nvmf_get_transports(self.client))
+
+ if self.null_block:
+ nvme_section = self.spdk_tgt_add_nullblock()
+ subsystems_section = self.spdk_tgt_add_subsystem_conf(self.nic_ips, req_num_disks=1)
+ else:
+ nvme_section = self.spdk_tgt_add_nvme_conf()
+ subsystems_section = self.spdk_tgt_add_subsystem_conf(self.nic_ips)
+ self.log_print("Done configuring SPDK NVMeOF Target")
+
+ def spdk_tgt_add_nullblock(self):
+ self.log_print("Adding null block bdev to config via RPC")
+ rpc.bdev.bdev_null_create(self.client, 102400, 4096, "Nvme0n1")
+ self.log_print("SPDK Bdevs configuration:")
+ rpc.client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
+
+ def spdk_tgt_add_nvme_conf(self, req_num_disks=None):
+ self.log_print("Adding NVMe bdevs to config via RPC")
+
+ bdfs = get_nvme_devices_bdf()
+ bdfs = [b.replace(":", ".") for b in bdfs]
+
+ if req_num_disks:
+ if req_num_disks > len(bdfs):
+ self.log_print("ERROR: Requested number of disks is more than available %s" % len(bdfs))
+ sys.exit(1)
+ else:
+ bdfs = bdfs[0:req_num_disks]
+
+ for i, bdf in enumerate(bdfs):
+ rpc.bdev.bdev_nvme_attach_controller(self.client, name="Nvme%s" % i, trtype="PCIe", traddr=bdf)
+
+ self.log_print("SPDK Bdevs configuration:")
+ rpc.client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
+
+ def spdk_tgt_add_subsystem_conf(self, ips=None, req_num_disks=None):
+ self.log_print("Adding subsystems to config")
+ if not req_num_disks:
+ req_num_disks = get_nvme_devices_count()
+
+ # Distribute bdevs between provided NICs
+ num_disks = range(0, req_num_disks)
+ if len(num_disks) == 1:
+ disks_per_ip = 1
+ else:
+ disks_per_ip = int(len(num_disks) / len(ips))
+ disk_chunks = [num_disks[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(ips))]
+
+ # Create subsystems, add bdevs to namespaces, add listeners
+ for ip, chunk in zip(ips, disk_chunks):
+ for c in chunk:
+ nqn = "nqn.2018-09.io.spdk:cnode%s" % c
+ serial = "SPDK00%s" % c
+ bdev_name = "Nvme%sn1" % c
+ rpc.nvmf.nvmf_create_subsystem(self.client, nqn, serial,
+ allow_any_host=True, max_namespaces=8)
+ rpc.nvmf.nvmf_subsystem_add_ns(self.client, nqn, bdev_name)
+
+ rpc.nvmf.nvmf_subsystem_add_listener(self.client, nqn,
+ trtype=self.transport,
+ traddr=ip,
+ trsvcid="4420",
+ adrfam="ipv4")
+
+ self.log_print("SPDK NVMeOF subsystem configuration:")
+ rpc.client.print_dict(rpc.nvmf.nvmf_get_subsystems(self.client))
+
+ def tgt_start(self):
+ if self.null_block:
+ self.subsys_no = 1
+ else:
+ self.subsys_no = get_nvme_devices_count()
+ self.log_print("Starting SPDK NVMeOF Target process")
+ nvmf_app_path = os.path.join(self.spdk_dir, "build/bin/nvmf_tgt")
+ command = " ".join([nvmf_app_path, "-m", self.num_cores])
+ proc = subprocess.Popen(command, shell=True)
+ self.pid = os.path.join(self.spdk_dir, "nvmf.pid")
+
+ with open(self.pid, "w") as fh:
+ fh.write(str(proc.pid))
+ self.nvmf_proc = proc
+ self.log_print("SPDK NVMeOF Target PID=%s" % self.pid)
+ self.log_print("Waiting for spdk to initilize...")
+ while True:
+ if os.path.exists("/var/tmp/spdk.sock"):
+ break
+ time.sleep(1)
+ self.client = rpc.client.JSONRPCClient("/var/tmp/spdk.sock")
+
+ self.spdk_tgt_configure()
+
+ def __del__(self):
+ if hasattr(self, "nvmf_proc"):
+ try:
+ self.nvmf_proc.terminate()
+ self.nvmf_proc.wait()
+ except Exception as e:
+ self.log_print(e)
+ self.nvmf_proc.kill()
+ self.nvmf_proc.communicate()
+
+
+class KernelInitiator(Initiator):
+ def __init__(self, name, username, password, mode, nic_ips, ip, transport,
+ cpus_allowed=None, cpus_allowed_policy="shared",
+ cpu_frequency=None, fio_bin="/usr/src/fio/fio", **kwargs):
+
+ super(KernelInitiator, self).__init__(name, username, password, mode, nic_ips, ip, transport,
+ cpus_allowed=cpus_allowed, cpus_allowed_policy=cpus_allowed_policy,
+ cpu_frequency=cpu_frequency, fio_bin=fio_bin)
+
+ self.extra_params = ""
+ if kwargs["extra_params"]:
+ self.extra_params = kwargs["extra_params"]
+
+ def __del__(self):
+ self.ssh_connection.close()
+
+ def kernel_init_connect(self, address_list, subsys_no):
+ subsystems = self.discover_subsystems(address_list, subsys_no)
+ self.log_print("Below connection attempts may result in error messages, this is expected!")
+ for subsystem in subsystems:
+ self.log_print("Trying to connect %s %s %s" % subsystem)
+ self.remote_call("sudo %s connect -t %s -s %s -n %s -a %s %s" % (self.nvmecli_bin,
+ self.transport,
+ *subsystem,
+ self.extra_params))
+ time.sleep(2)
+
+ def kernel_init_disconnect(self, address_list, subsys_no):
+ subsystems = self.discover_subsystems(address_list, subsys_no)
+ for subsystem in subsystems:
+ self.remote_call("sudo %s disconnect -n %s" % (self.nvmecli_bin, subsystem[1]))
+ time.sleep(1)
+
+ def gen_fio_filename_conf(self, threads, io_depth, num_jobs=1):
+ out, err = self.remote_call("sudo nvme list | grep -E 'SPDK|Linux' | awk '{print $1}'")
+ nvme_list = [x for x in out.split("\n") if "nvme" in x]
+
+ filename_section = ""
+ nvme_per_split = int(len(nvme_list) / len(threads))
+ remainder = len(nvme_list) % len(threads)
+ iterator = iter(nvme_list)
+ result = []
+ for i in range(len(threads)):
+ result.append([])
+ for j in range(nvme_per_split):
+ result[i].append(next(iterator))
+ if remainder:
+ result[i].append(next(iterator))
+ remainder -= 1
+ for i, r in enumerate(result):
+ header = "[filename%s]" % i
+ disks = "\n".join(["filename=%s" % x for x in r])
+ job_section_qd = round((io_depth * len(r)) / num_jobs)
+ if job_section_qd == 0:
+ job_section_qd = 1
+ iodepth = "iodepth=%s" % job_section_qd
+ filename_section = "\n".join([filename_section, header, disks, iodepth])
+
+ return filename_section
+
+
+class SPDKInitiator(Initiator):
+ def __init__(self, name, username, password, mode, nic_ips, ip, transport="rdma",
+ num_cores=1, cpus_allowed=None, cpus_allowed_policy="shared",
+ cpu_frequency=None, fio_bin="/usr/src/fio/fio", **kwargs):
+ super(SPDKInitiator, self).__init__(name, username, password, mode, nic_ips, ip, transport,
+ cpus_allowed=cpus_allowed, cpus_allowed_policy=cpus_allowed_policy,
+ cpu_frequency=cpu_frequency, fio_bin=fio_bin)
+
+ self.num_cores = num_cores
+
+ def install_spdk(self, local_spdk_zip):
+ self.put_file(local_spdk_zip, "/tmp/spdk_drop.zip")
+ self.log_print("Copied sources zip from target")
+ self.remote_call("unzip -qo /tmp/spdk_drop.zip -d %s" % self.spdk_dir)
+
+ self.log_print("Sources unpacked")
+ self.log_print("Using fio binary %s" % self.fio_bin)
+ self.remote_call("cd %s; git submodule update --init; make clean; ./configure --with-rdma --with-fio=%s;"
+ "make -j$(($(nproc)*2))" % (self.spdk_dir, os.path.dirname(self.fio_bin)))
+
+ self.log_print("SPDK built")
+ self.remote_call("sudo %s/scripts/setup.sh" % self.spdk_dir)
+
+ def gen_spdk_bdev_conf(self, remote_subsystem_list):
+ header = "[Nvme]"
+ row_template = """ TransportId "trtype:{transport} adrfam:IPv4 traddr:{ip} trsvcid:{svc} subnqn:{nqn}" Nvme{i}"""
+
+ bdev_rows = [row_template.format(transport=self.transport,
+ svc=x[0],
+ nqn=x[1],
+ ip=x[2],
+ i=i) for i, x in enumerate(remote_subsystem_list)]
+ bdev_rows = "\n".join(bdev_rows)
+ bdev_section = "\n".join([header, bdev_rows])
+ return bdev_section
+
+ def gen_fio_filename_conf(self, subsystems, threads, io_depth, num_jobs=1):
+ filename_section = ""
+ if len(threads) >= len(subsystems):
+ threads = range(0, len(subsystems))
+ filenames = ["Nvme%sn1" % x for x in range(0, len(subsystems))]
+ nvme_per_split = int(len(subsystems) / len(threads))
+ remainder = len(subsystems) % len(threads)
+ iterator = iter(filenames)
+ result = []
+ for i in range(len(threads)):
+ result.append([])
+ for j in range(nvme_per_split):
+ result[i].append(next(iterator))
+ if remainder:
+ result[i].append(next(iterator))
+ remainder -= 1
+ for i, r in enumerate(result):
+ header = "[filename%s]" % i
+ disks = "\n".join(["filename=%s" % x for x in r])
+ job_section_qd = round((io_depth * len(r)) / num_jobs)
+ if job_section_qd == 0:
+ job_section_qd = 1
+ iodepth = "iodepth=%s" % job_section_qd
+ filename_section = "\n".join([filename_section, header, disks, iodepth])
+
+ return filename_section
+
+
+if __name__ == "__main__":
+ spdk_zip_path = "/tmp/spdk.zip"
+ target_results_dir = "/tmp/results"
+
+ if (len(sys.argv) > 1):
+ config_file_path = sys.argv[1]
+ else:
+ script_full_dir = os.path.dirname(os.path.realpath(__file__))
+ config_file_path = os.path.join(script_full_dir, "config.json")
+
+ print("Using config file: %s" % config_file_path)
+ with open(config_file_path, "r") as config:
+ data = json.load(config)
+
+ initiators = []
+ fio_cases = []
+
+ for k, v in data.items():
+ if "target" in k:
+ if data[k]["mode"] == "spdk":
+ target_obj = SPDKTarget(name=k, **data["general"], **v)
+ elif data[k]["mode"] == "kernel":
+ target_obj = KernelTarget(name=k, **data["general"], **v)
+ elif "initiator" in k:
+ if data[k]["mode"] == "spdk":
+ init_obj = SPDKInitiator(name=k, **data["general"], **v)
+ elif data[k]["mode"] == "kernel":
+ init_obj = KernelInitiator(name=k, **data["general"], **v)
+ initiators.append(init_obj)
+ elif "fio" in k:
+ fio_workloads = itertools.product(data[k]["bs"],
+ data[k]["qd"],
+ data[k]["rw"])
+
+ fio_run_time = data[k]["run_time"]
+ fio_ramp_time = data[k]["ramp_time"]
+ fio_rw_mix_read = data[k]["rwmixread"]
+ fio_run_num = data[k]["run_num"] if "run_num" in data[k].keys() else None
+ fio_num_jobs = data[k]["num_jobs"] if "num_jobs" in data[k].keys() else None
+ else:
+ continue
+
+ # Copy and install SPDK on remote initiators
+ if "skip_spdk_install" not in data["general"]:
+ target_obj.zip_spdk_sources(target_obj.spdk_dir, spdk_zip_path)
+ threads = []
+ for i in initiators:
+ if i.mode == "spdk":
+ t = threading.Thread(target=i.install_spdk, args=(spdk_zip_path,))
+ threads.append(t)
+ t.start()
+ for t in threads:
+ t.join()
+
+ target_obj.tgt_start()
+
+ # Poor mans threading
+ # Run FIO tests
+ for block_size, io_depth, rw in fio_workloads:
+ threads = []
+ configs = []
+ for i in initiators:
+ if i.mode == "kernel":
+ i.kernel_init_connect(i.nic_ips, target_obj.subsys_no)
+
+ cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
+ fio_num_jobs, fio_ramp_time, fio_run_time)
+ configs.append(cfg)
+
+ for i, cfg in zip(initiators, configs):
+ t = threading.Thread(target=i.run_fio, args=(cfg, fio_run_num))
+ threads.append(t)
+ if target_obj.enable_sar:
+ sar_file_name = "_".join([str(block_size), str(rw), str(io_depth), "sar"])
+ sar_file_name = ".".join([sar_file_name, "txt"])
+ t = threading.Thread(target=target_obj.measure_sar, args=(target_results_dir, sar_file_name))
+ threads.append(t)
+
+ if target_obj.enable_pcm:
+ pcm_file_name = "_".join(["pcm_cpu", str(block_size), str(rw), str(io_depth)])
+ pcm_file_name = ".".join([pcm_file_name, "csv"])
+ t = threading.Thread(target=target_obj.measure_pcm, args=(target_results_dir, pcm_file_name,))
+ threads.append(t)
+
+ if target_obj.enable_pcm_memory:
+ pcm_file_name = "_".join(["pcm_memory", str(block_size), str(rw), str(io_depth)])
+ pcm_file_name = ".".join([pcm_file_name, "csv"])
+ t = threading.Thread(target=target_obj.measure_pcm_memory, args=(target_results_dir, pcm_file_name,))
+ threads.append(t)
+
+ if target_obj.enable_bandwidth:
+ bandwidth_file_name = "_".join(["bandwidth", str(block_size), str(rw), str(io_depth)])
+ bandwidth_file_name = ".".join([bandwidth_file_name, "csv"])
+ t = threading.Thread(target=target_obj.measure_bandwidth, args=(target_results_dir, bandwidth_file_name,))
+ threads.append(t)
+
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+
+ for i in initiators:
+ if i.mode == "kernel":
+ i.kernel_init_disconnect(i.nic_ips, target_obj.subsys_no)
+ i.copy_result_files(target_results_dir)
+
+ target_obj.parse_results(target_results_dir)
diff --git a/src/spdk/scripts/perf/vhost/fio_test.conf b/src/spdk/scripts/perf/vhost/fio_test.conf
new file mode 100644
index 000000000..c480f1966
--- /dev/null
+++ b/src/spdk/scripts/perf/vhost/fio_test.conf
@@ -0,0 +1,20 @@
+[global]
+ioengine=libaio
+thread=1
+group_reporting=1
+direct=1
+verify=0
+norandommap=1
+
+[perf_test]
+stonewall
+description="Run NVMe driver performance test for a given workload"
+bs={blksize}
+rw={rw}
+rwmixread={rwmixread}
+iodepth={iodepth}
+time_based=1
+ramp_time={ramptime}
+runtime={runtime}
+numjobs={numjobs}
+filename=
diff --git a/src/spdk/scripts/perf/vhost/run_vhost_test.py b/src/spdk/scripts/perf/vhost/run_vhost_test.py
new file mode 100644
index 000000000..e6d86161f
--- /dev/null
+++ b/src/spdk/scripts/perf/vhost/run_vhost_test.py
@@ -0,0 +1,219 @@
+import os
+import sys
+import argparse
+import multiprocessing
+import subprocess
+from subprocess import check_call, call, check_output, Popen, PIPE
+
+
+def range_incl(a, b):
+ return list(range(a, b + 1))
+
+
+def list_spdk_used_cpus(cpus):
+ cpu_list = []
+ for chunk in cpus.split(","):
+ if "-" in chunk:
+ _ = chunk.split("-")
+ _ = list(map(int, _))
+ cpu_list.extend(list(range_incl(*_)))
+ else:
+ cpu_list.append(int(chunk))
+ return cpu_list
+
+
+def gen_cpu_mask_config(output_dir, spdk_cpu_list, vm_count, vm_cpu_num):
+ spdk = gen_spdk_cpu_mask_config(spdk_cpu_list)
+ qemu = gen_qemu_cpu_mask_config(spdk_cpu_list, vm_count, vm_cpu_num)
+ file_path = os.path.join(output_dir, "mask_config")
+ with open(file_path, "w") as fh:
+ fh.write("".join([spdk, qemu]))
+ return file_path
+
+
+def gen_spdk_cpu_mask_config(spdk_cpu_list):
+ cpus = "vhost_0_reactor_mask=[%s]" % (spdk_cpu_list)
+
+ # Go through assigned CPUs and use the lowest CPU index as
+ # default primary core
+ cpu_indexes = list_spdk_used_cpus(spdk_cpu_list)
+ cpu_indexes.sort()
+ print(cpu_indexes)
+
+ pr_core = "vhost_0_master_core=%s" % (cpu_indexes[0])
+ return "\n".join([cpus, pr_core, "\n"])
+
+
+def get_host_cpus():
+ cpu_num = multiprocessing.cpu_count()
+ cpu_list = list(range(0, cpu_num))
+ output = check_output("lscpu | grep 'per core'", shell=True)
+
+ # Assuming 2-socket server
+ if "2" in str(output):
+ ht_enabled = True
+ cpu_chunk = int(cpu_num/4)
+ numa0_cpus = cpu_list[0:cpu_chunk]
+ numa0_cpus.extend(cpu_list[2*cpu_chunk:3*cpu_chunk])
+ numa1_cpus = cpu_list[cpu_chunk:2*cpu_chunk]
+ numa1_cpus.extend(cpu_list[3*cpu_chunk:4*cpu_chunk])
+ else:
+ ht_enabled = False
+ cpu_chunk = int(cpu_num/2)
+ numa0_cpus = cpu_list[:cpu_chunk]
+ numa1_cpus = cpu_list[cpu_chunk:]
+ return [numa0_cpus, numa1_cpus]
+
+
+def gen_qemu_cpu_mask_config(spdk_cpu_list, vm_count, vm_cpu_num):
+ print("Creating masks for QEMU")
+ ret = ""
+
+ # Exclude SPDK cores from available CPU list
+ numa0_cpus, numa1_cpus = get_host_cpus()
+ spdk_cpus = list_spdk_used_cpus(spdk_cpu_list)
+ spdk_cpus.sort()
+
+ numa0_cpus = sorted(list(set(numa0_cpus) - set(spdk_cpus)))
+ numa1_cpus = sorted(list(set(numa1_cpus) - set(spdk_cpus)))
+
+ # Generate qemu cpu mask and numa param for VMs out of
+ # remaining free CPU cores.
+ # All CPUs assigned to a VM will come from the same NUMA node.
+ # Assuming 2 socket server.
+ used_numa = 0
+ available = numa0_cpus
+ for i in range(0, vm_count):
+ cpus = [str(x) for x in available[0:vm_cpu_num]]
+
+ # If there is not enough cores on first numa node for a VM
+ # then switch to next numa node
+ if len(cpus) < vm_cpu_num and used_numa == 0:
+ available = numa1_cpus
+ used_numa = 1
+ cpus = [str(x) for x in available[0:vm_cpu_num]]
+
+ # If not enough cores on second numa node - break and exit
+ if len(cpus) < vm_cpu_num and used_numa == 1:
+ print("There is not enough CPU Cores available on \
+ Numa node1 to create VM %s" % i)
+ break
+
+ cpus = ",".join(cpus)
+ cpus = "VM_%s_qemu_mask=%s" % (i, cpus)
+ numa = "VM_%s_qemu_numa_node=%s\n" % (i, used_numa)
+
+ # Remove used CPU cores from available list
+ available = available[vm_cpu_num:]
+ ret = "\n".join([ret, cpus, numa])
+
+ return ret
+
+
+def create_fio_cfg(template_dir, output_dir, **kwargs):
+ fio_tempalte = os.path.join(template_dir, "fio_test.conf")
+ with open("scripts/perf/vhost/fio_test.conf", "r") as fh:
+ cfg = fh.read()
+ cfg = cfg.format(**kwargs)
+
+ file_path = os.path.join(output_dir, "fio_job.cfg")
+ with open(file_path, "w") as fh:
+ fh.write(cfg)
+ return file_path
+
+
+script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
+parser = argparse.ArgumentParser()
+
+parser.add_argument('blksize', default="4k", type=str,
+ help="Block size param for FIO. Default: 4k")
+parser.add_argument('iodepth', default="128", type=str,
+ help="Iodepth param for FIO. Default: 128")
+parser.add_argument('rw', default="randread", type=str,
+ help="RW param for FIO. Default: randread")
+parser.add_argument('-m', '--rwmixread', default="70", type=str,
+ help="Percentage of reads in read-write mode. Default: 70")
+parser.add_argument('-n', '--numjobs', default="1", type=str,
+ help="Numjobs value for FIO job. Default: 1")
+parser.add_argument('-r', '--runtime', default="10", type=str,
+ help="Run time param for FIO (in seconds). Default: 10")
+parser.add_argument('-R', '--ramptime', default="10", type=str,
+ help="Ramp time param for FIO (in seconds). Default: 10")
+parser.add_argument('-c', '--ctrl-type', default="spdk_vhost_scsi", type=str,
+ help="Type of vhost controller to use in test.\
+ Possible options: spdk_vhost_scsi, spdk_vhost_blk\
+ Default: spdk_vhost_scsi")
+parser.add_argument('-s', '--split', default=False, type=bool,
+ help="Use split vbdevs instead of logical volumes. Default: false")
+parser.add_argument('-d', '--max-disks', default=0, type=int,
+ help="How many physical disks to use in test. Default: all disks.\
+ Depending on the number of --vm-count disks may be split into\
+ smaller logical bdevs (splits or logical volumes) so that\
+ each virtual machine gets it's own bdev to work on.")
+parser.add_argument('-v', '--vm-count', default=1, type=int,
+ help="How many VMs to run in test. Default: 1")
+parser.add_argument('-i', '--vm-image', default="$HOME/vhost_vm_image.qcow2",
+ type=str, help="VM image to use for running VMs.")
+
+subparsers = parser.add_subparsers()
+cpu_cfg_create = subparsers.add_parser('create_cpu_cfg',
+ help="Generate a CPU config file for test.\
+ This option will attempt to automatically\
+ generate config file with SPDK/QEMU cpu lists.\
+ CPU cores on NUMA Node 0 will be used first\
+ (including logical cores when HT is enabled)\
+ and NUMA Node 1 will be used last.")
+cpu_cfg_create.add_argument('spdk_cpu_list', default=None,
+ help="List of CPU cores to be used by SPDK vhost app.\
+ Accepted format examples:\
+ single cpus: 0,2,4\
+ ranges (inclusive!): 0-2\
+ mixed: 0,2-5,9")
+cpu_cfg_create.add_argument('vm_cpu_num', default=None, type=int)
+
+cpu_cfg_load = subparsers.add_parser('load_cpu_cfg',
+ help="Load and use a CPU config file for test\
+ Example configuration files can be found in:\
+ test/vhost/common/autotest.config")
+cpu_cfg_load.add_argument('custom_mask_file', default=None,
+ help="Path to file with custom values for vhost's\
+ reactor mask and master core, and each VM's qemu mask\
+ and qemu numa node")
+
+args = parser.parse_args()
+fio_cfg_path = create_fio_cfg(script_dir, script_dir, **vars(args))
+
+cpu_cfg_arg = ""
+disk_arg = ""
+split_arg = ""
+if "spdk_cpu_list" in args:
+ cfg_path = gen_cpu_mask_config(script_dir, args.spdk_cpu_list, args.vm_count, args.vm_cpu_num)
+ cpu_cfg_arg = "--custom-cpu-cfg=%s" % cfg_path
+if "custom_mask_file" in args:
+ cpu_cfg_arg = "--custom-cpu-cfg=%s" % args.custom_mask_file
+if args.split is True:
+ split_arg = "--use-split"
+if args.max_disks > 0:
+ disk_arg = "--max-disks=%s" % args.max_disks
+
+
+command = " ".join(["test/vhost/perf_bench/vhost_perf.sh",
+ "--vm-image=%s" % args.vm_image,
+ "--vm-count=%s" % args.vm_count,
+ "--ctrl-type=%s" % args.ctrl_type,
+ "%s" % split_arg,
+ "%s" % disk_arg,
+ "--fio-job=%s" % fio_cfg_path,
+ "%s" % cpu_cfg_arg])
+# TODO: Disabled for now.
+# Reason: initially this script was supposed to be a wrapper for .sh script and would
+# - generate FIO config
+# - generate SPDK/QEMU CPU mask configuration file
+# - run test script
+# Auto-generating CPU masks configuration needs some more work to be done
+# and increasing number of params makes .py script hard to use.
+# Will cleanup here soon.
+
+# print("INFO: Running perf test with command:")
+# print(command)
+# pr = check_output(command, shell=True)
diff --git a/src/spdk/scripts/pkgdep.sh b/src/spdk/scripts/pkgdep.sh
new file mode 100755
index 000000000..7d748c437
--- /dev/null
+++ b/src/spdk/scripts/pkgdep.sh
@@ -0,0 +1,160 @@
+#!/usr/bin/env bash
+# Please run this script as root.
+
+set -e
+
+function usage() {
+ echo ""
+ echo "This script is intended to automate the installation of package dependencies to build SPDK."
+ echo "Please run this script as root user or with sudo -E."
+ echo ""
+ echo "$0"
+ echo " -h --help"
+ echo " -a --all"
+ echo " -d --developer-tools Install tools for developers (code styling, code coverage, etc.)"
+ echo " -p --pmem Additional dependencies for reduce and pmdk"
+ echo " -f --fuse Additional dependencies for FUSE and NVMe-CUSE"
+ echo " -r --rdma Additional dependencies for RDMA transport in NVMe over Fabrics"
+ echo " -b --docs Additional dependencies for building docs"
+ echo " -u --uring Additional dependencies for io_uring"
+ echo ""
+ exit 0
+}
+
+function install_all_dependencies() {
+ INSTALL_DEV_TOOLS=true
+ INSTALL_PMEM=true
+ INSTALL_FUSE=true
+ INSTALL_RDMA=true
+ INSTALL_DOCS=true
+ INSTALL_LIBURING=true
+}
+
+function install_liburing() {
+ local GIT_REPO_LIBURING=https://github.com/axboe/liburing.git
+ local liburing_dir=/usr/local/src/liburing
+
+ if [[ -e /usr/lib64/liburing.so ]]; then
+ echo "liburing is already installed. skipping"
+ else
+ if [[ -d $liburing_dir ]]; then
+ echo "liburing source already present, not cloning"
+ else
+ mkdir $liburing_dir
+ git clone "${GIT_REPO_LIBURING}" "$liburing_dir"
+ fi
+ (cd "$liburing_dir" && ./configure --libdir=/usr/lib64 && make install)
+ fi
+}
+
+function install_shfmt() {
+ # Fetch version that has been tested
+ local shfmt_version=3.1.0
+ local shfmt=shfmt-$shfmt_version
+ local shfmt_dir=${SHFMT_DIR:-/opt/shfmt}
+ local shfmt_dir_out=${SHFMT_DIR_OUT:-/usr/bin}
+ local shfmt_url
+ local os
+
+ if hash "$shfmt" && [[ $("$shfmt" --version) == "v$shfmt_version" ]]; then
+ echo "$shfmt already installed"
+ return 0
+ fi 2> /dev/null
+
+ os=$(uname -s)
+
+ case "$os" in
+ Linux) shfmt_url=https://github.com/mvdan/sh/releases/download/v$shfmt_version/shfmt_v${shfmt_version}_linux_amd64 ;;
+ FreeBSD) shfmt_url=https://github.com/mvdan/sh/releases/download/v$shfmt_version/shfmt_v${shfmt_version}_freebsd_amd64 ;;
+ *)
+ echo "Not supported OS (${os:-Unknown}), skipping"
+ return 0
+ ;;
+ esac
+
+ mkdir -p "$shfmt_dir"
+ mkdir -p "$shfmt_dir_out"
+
+ echo "Fetching ${shfmt_url##*/}"...
+ local err
+ if err=$(curl -f -Lo"$shfmt_dir/$shfmt" "$shfmt_url" 2>&1); then
+ chmod +x "$shfmt_dir/$shfmt"
+ ln -sf "$shfmt_dir/$shfmt" "$shfmt_dir_out"
+ else
+ cat <<- CURL_ERR
+
+ * Fetching $shfmt_url failed, $shfmt will not be available for format check.
+ * Error:
+
+ $err
+
+ CURL_ERR
+ return 0
+ fi
+ echo "$shfmt installed"
+}
+
+INSTALL_CRYPTO=false
+INSTALL_DEV_TOOLS=false
+INSTALL_PMEM=false
+INSTALL_FUSE=false
+INSTALL_RDMA=false
+INSTALL_DOCS=false
+INSTALL_LIBURING=false
+
+while getopts 'abdfhipru-:' optchar; do
+ case "$optchar" in
+ -)
+ case "$OPTARG" in
+ help) usage ;;
+ all) install_all_dependencies ;;
+ developer-tools) INSTALL_DEV_TOOLS=true ;;
+ pmem) INSTALL_PMEM=true ;;
+ fuse) INSTALL_FUSE=true ;;
+ rdma) INSTALL_RDMA=true ;;
+ docs) INSTALL_DOCS=true ;;
+ uring) INSTALL_LIBURING=true ;;
+ *)
+ echo "Invalid argument '$OPTARG'"
+ usage
+ ;;
+ esac
+ ;;
+ h) usage ;;
+ a) install_all_dependencies ;;
+ d) INSTALL_DEV_TOOLS=true ;;
+ p) INSTALL_PMEM=true ;;
+ f) INSTALL_FUSE=true ;;
+ r) INSTALL_RDMA=true ;;
+ b) INSTALL_DOCS=true ;;
+ u) INSTALL_LIBURING=true ;;
+ *)
+ echo "Invalid argument '$OPTARG'"
+ usage
+ ;;
+ esac
+done
+
+trap 'set +e; trap - ERR; echo "Error!"; exit 1;' ERR
+
+scriptsdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $scriptsdir/..)
+
+OS=$(uname -s)
+
+if [[ -e /etc/os-release ]]; then
+ source /etc/os-release
+fi
+
+ID=${ID:-$OS} ID=${ID,,}
+
+#Link suse related OS to sles
+if [[ ${ID,,} == *"suse"* ]]; then
+ ID="sles"
+fi
+
+if [[ -e $scriptsdir/pkgdep/$ID.sh ]]; then
+ source "$scriptsdir/pkgdep/$ID.sh"
+else
+ printf 'Not supported platform detected (%s), aborting\n' "$ID" >&2
+fi
diff --git a/src/spdk/scripts/pkgdep/arch.sh b/src/spdk/scripts/pkgdep/arch.sh
new file mode 100755
index 000000000..53e5d8d7d
--- /dev/null
+++ b/src/spdk/scripts/pkgdep/arch.sh
@@ -0,0 +1,77 @@
+#!/usr/bin/env bash
+
+# Install main dependencies
+pacman -Sy --needed --noconfirm gcc make cunit libaio openssl \
+ libutil-linux libiscsi python ncurses ninja meson
+# Additional dependencies for SPDK CLI
+pacman -Sy --needed --noconfirm python-pexpect python-pip libffi
+pip install configshell_fb
+# Additional dependencies for DPDK
+pacman -Sy --needed --noconfirm numactl nasm
+# Additional dependencies for ISA-L used in compression
+pacman -Sy --needed --noconfirm autoconf automake libtool help2man
+if [[ $INSTALL_DEV_TOOLS == "true" ]]; then
+ # Tools for developers
+ pacman -Sy --needed --noconfirm git astyle autopep8 \
+ clang sg3_utils pciutils shellcheck
+ #fakeroot needed to instal via makepkg
+ pacman -Sy --needed --noconfirm fakeroot
+ su - $SUDO_USER -c "pushd /tmp;
+ git clone https://aur.archlinux.org/perl-perlio-gzip.git;
+ cd perl-perlio-gzip;
+ yes y | makepkg -si --needed;
+ cd ..; rm -rf perl-perlio-gzip
+ popd"
+ # sed is to modify sources section in PKGBUILD
+ # By default it uses git:// which will fail behind proxy, so
+ # redirect it to http:// source instead
+ su - $SUDO_USER -c "pushd /tmp;
+ git clone https://aur.archlinux.org/lcov-git.git;
+ cd lcov-git;
+ sed -i 's/git:/git+http:/' PKGBUILD;
+ makepkg -si --needed --noconfirm;
+ cd .. && rm -rf lcov-git;
+ popd"
+ install_shfmt
+fi
+if [[ $INSTALL_PMEM == "true" ]]; then
+ # Additional dependencies for building pmem based backends
+ pacman -Sy --needed --noconfirm ndctl pkg-config
+ git clone https://github.com/pmem/pmdk.git /tmp/pmdk -b 1.6.1
+ make -C /tmp/pmdk -j$(nproc)
+ make install prefix=/usr -C /tmp/pmdk
+ echo "/usr/local/lib" > /etc/ld.so.conf.d/pmdk.conf
+ ldconfig
+ rm -rf /tmp/pmdk
+fi
+if [[ $INSTALL_FUSE == "true" ]]; then
+ # Additional dependencies for FUSE and NVMe-CUSE
+ pacman -Sy --needed --noconfirm fuse3
+fi
+if [[ $INSTALL_RDMA == "true" ]]; then
+ # Additional dependencies for RDMA transport in NVMe over Fabrics
+ if [[ -n "$http_proxy" ]]; then
+ gpg_options=" --keyserver hkp://pgp.mit.edu:11371 --keyserver-options \"http-proxy=$http_proxy\""
+ fi
+ su - $SUDO_USER -c "gpg $gpg_options --recv-keys 29F0D86B9C1019B1"
+ su - $SUDO_USER -c "pushd /tmp;
+ git clone https://aur.archlinux.org/rdma-core.git;
+ cd rdma-core;
+ makepkg -si --needed --noconfirm;
+ cd .. && rm -rf rdma-core;
+ popd"
+fi
+if [[ $INSTALL_DOCS == "true" ]]; then
+ # Additional dependencies for building docs
+ pacman -Sy --needed --noconfirm doxygen graphviz
+ pacman -S --noconfirm --needed gd ttf-font
+ su - $SUDO_USER -c "pushd /tmp;
+ git clone https://aur.archlinux.org/mscgen.git;
+ cd mscgen;
+ makepkg -si --needed --noconfirm;
+ cd .. && rm -rf mscgen;
+ popd"
+fi
+if [[ $INSTALL_LIBURING == "true" ]]; then
+ install_liburing
+fi
diff --git a/src/spdk/scripts/pkgdep/centos.sh b/src/spdk/scripts/pkgdep/centos.sh
new file mode 120000
index 000000000..0f75e52a1
--- /dev/null
+++ b/src/spdk/scripts/pkgdep/centos.sh
@@ -0,0 +1 @@
+rhel.sh \ No newline at end of file
diff --git a/src/spdk/scripts/pkgdep/clear-linux-os.sh b/src/spdk/scripts/pkgdep/clear-linux-os.sh
new file mode 100755
index 000000000..a79a67e79
--- /dev/null
+++ b/src/spdk/scripts/pkgdep/clear-linux-os.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+
+# Install main dependencies
+swupd bundle-add -y c-basic make dev-utils openssl devpkg-libiscsi \
+ devpkg-ncurses python3-basic python-extras devpkg-open-iscsi \
+ storage-utils
+# Additional dependencies for ISA-L used in compression
+swupd bundle-add -y dev-utils-dev
+# Additional dependencies for DPDK
+swupd bundle-add -y nasm sysadmin-basic
+# Additional dependencies for SPDK CLI
+pip3 install pexpect
+pip3 install configshell_fb
+if [[ $INSTALL_DEV_TOOLS == "true" ]]; then
+ swupd bundle-add -y git os-testsuite-0day
+ install_shfmt
+fi
+if [[ $INSTALL_PMEM == "true" ]]; then
+ # Additional dependencies for building pmem based backends
+ swupd bundle-add -y devpkg-pmdk
+fi
+if [[ $INSTALL_FUSE == "true" ]]; then
+ # Additional dependencies for FUSE and NVMe-CUSE
+ swupd bundle-add -y devpkg-fuse
+fi
+if [[ $INSTALL_RDMA == "true" ]]; then
+ # Additional dependencies for RDMA transport in NVMe over Fabrics
+ swupd bundle-add -y devpkg-rdma-core network-basic-dev
+fi
+if [[ $INSTALL_DOCS == "true" ]]; then
+ # Additional dependencies for building docs
+ swupd bundle-add -y doxygen graphviz
+fi
diff --git a/src/spdk/scripts/pkgdep/debian.sh b/src/spdk/scripts/pkgdep/debian.sh
new file mode 100755
index 000000000..2513a0160
--- /dev/null
+++ b/src/spdk/scripts/pkgdep/debian.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/env bash
+
+VERSION_ID_NUM=$(sed 's/\.//g' <<< $VERSION_ID)
+# Includes Ubuntu, Debian
+# Minimal install
+apt-get install -y gcc g++ make libcunit1-dev libaio-dev libssl-dev \
+ uuid-dev libiscsi-dev python libncurses5-dev libncursesw5-dev python3-pip
+pip3 install ninja
+pip3 install meson
+# Additional dependencies for SPDK CLI - not available on older Ubuntus
+apt-get install -y python3-configshell-fb python3-pexpect || echo \
+ "Note: Some SPDK CLI dependencies could not be installed."
+
+# Additional dependencies for DPDK
+if [[ $NAME == "Ubuntu" ]] && [[ $VERSION_ID_NUM -lt 1900 ]]; then
+ echo "Ubuntu $VERSION_ID needs NASM version 2.13.03 for DPDK but is not in the mainline repository."
+ echo "You can install it manually"
+else
+ apt-get install -y nasm
+fi
+apt-get install -y libnuma-dev
+# Additional dependencies for ISA-L used in compression
+apt-get install -y autoconf automake libtool help2man
+if [[ $INSTALL_DEV_TOOLS == "true" ]]; then
+ # Tools for developers
+ apt-get install -y git astyle pep8 lcov clang sg3-utils pciutils shellcheck
+ # Additional python style checker not available on ubuntu 16.04 or earlier.
+ apt-get install -y pycodestyle || true
+ # Additional dependecies for nvmf performance test script
+ apt-get install -y python3-paramiko
+ install_shfmt
+fi
+if [[ $INSTALL_PMEM == "true" ]]; then
+ # Additional dependencies for building pmem based backends
+ if [[ $NAME == "Ubuntu" ]] && [[ $VERSION_ID_NUM -gt 1800 ]]; then
+ apt-get install -y libpmem-dev
+ fi
+fi
+if [[ $INSTALL_FUSE == "true" ]]; then
+ # Additional dependencies for FUSE and NVMe-CUSE
+ if [[ $NAME == "Ubuntu" ]] && ((VERSION_ID_NUM > 1400 && VERSION_ID_NUM < 1900)); then
+ echo "Ubuntu $VERSION_ID does not have libfuse3-dev in mainline repository."
+ echo "You can install it manually"
+ else
+ apt-get install -y libfuse3-dev
+ fi
+fi
+if [[ $INSTALL_RDMA == "true" ]]; then
+ # Additional dependencies for RDMA transport in NVMe over Fabrics
+ apt-get install -y libibverbs-dev librdmacm-dev
+fi
+if [[ $INSTALL_DOCS == "true" ]]; then
+ # Additional dependencies for building docs
+ apt-get install -y doxygen mscgen graphviz
+fi
+if [[ $INSTALL_LIBURING == "true" ]]; then
+ install_liburing
+fi
diff --git a/src/spdk/scripts/pkgdep/fedora.sh b/src/spdk/scripts/pkgdep/fedora.sh
new file mode 120000
index 000000000..0f75e52a1
--- /dev/null
+++ b/src/spdk/scripts/pkgdep/fedora.sh
@@ -0,0 +1 @@
+rhel.sh \ No newline at end of file
diff --git a/src/spdk/scripts/pkgdep/freebsd.sh b/src/spdk/scripts/pkgdep/freebsd.sh
new file mode 100755
index 000000000..032170b6d
--- /dev/null
+++ b/src/spdk/scripts/pkgdep/freebsd.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+
+# Minimal install
+pkg install -y gmake cunit openssl git bash misc/e2fsprogs-libuuid python \
+ ncurses ninja meson
+# Additional dependencies for ISA-L used in compression
+pkg install -y autoconf automake libtool help2man
+if [[ $INSTALL_DEV_TOOLS == "true" ]]; then
+ # Tools for developers
+ pkg install -y devel/astyle bash py27-pycodestyle \
+ misc/e2fsprogs-libuuid sysutils/sg3_utils nasm
+ install_shfmt
+fi
+if [[ $INSTALL_DOCS == "true" ]]; then
+ # Additional dependencies for building docs
+ pkg install -y doxygen mscgen graphviz
+fi
diff --git a/src/spdk/scripts/pkgdep/rhel.sh b/src/spdk/scripts/pkgdep/rhel.sh
new file mode 100755
index 000000000..af5d4c0b3
--- /dev/null
+++ b/src/spdk/scripts/pkgdep/rhel.sh
@@ -0,0 +1,73 @@
+#!/usr/bin/env bash
+
+# Minimal install
+if echo "$ID $VERSION_ID" | grep -E -q 'centos 8'; then
+ # Add PowerTools needed for install CUnit-devel in Centos8
+ yum install -y yum-utils
+ yum config-manager --set-enabled PowerTools
+fi
+yum install -y gcc gcc-c++ make CUnit-devel libaio-devel openssl-devel \
+ libuuid-devel libiscsi-devel ncurses-devel
+if echo "$ID $VERSION_ID" | grep -E -q 'centos 8'; then
+ yum install -y python36
+ #Create hard link to use in SPDK as python
+ ln /etc/alternatives/python3 /usr/bin/python || true
+else
+ yum install -y python
+fi
+yum install -y python3-pip
+pip-3 install ninja
+pip-3 install meson
+
+# Additional dependencies for SPDK CLI - not available in rhel and centos
+if ! echo "$ID $VERSION_ID" | grep -E -q 'rhel 7|centos 7'; then
+ yum install -y python3-configshell python3-pexpect
+fi
+# Additional dependencies for ISA-L used in compression
+yum install -y autoconf automake libtool help2man
+# Additional dependencies for DPDK
+yum install -y numactl-devel nasm
+if [[ $INSTALL_DEV_TOOLS == "true" ]]; then
+ # Tools for developers
+ # Includes Fedora, CentOS 7, RHEL 7
+ # Add EPEL repository for CUnit-devel
+ if echo "$ID $VERSION_ID" | grep -E -q 'rhel 7|centos 7|centos 8'; then
+ if ! rpm --quiet -q epel-release; then
+ yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+ fi
+
+ if [[ $ID = 'rhel' ]]; then
+ subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms"
+ elif [[ $ID = 'centos' ]]; then
+ yum --enablerepo=extras install -y epel-release
+ fi
+ fi
+ if echo "$ID $VERSION_ID" | grep -E -q 'centos 8'; then
+ yum install -y python3-pycodestyle
+ echo "Centos 8 does not have lcov and ShellCheck dependencies"
+ else
+ yum install -y python-pycodestyle lcov ShellCheck
+ fi
+ yum install -y git astyle sg3_utils pciutils
+ install_shfmt
+fi
+if [[ $INSTALL_PMEM == "true" ]]; then
+ # Additional dependencies for building pmem based backends
+ yum install -y libpmemblk-devel || true
+fi
+if [[ $INSTALL_FUSE == "true" ]]; then
+ # Additional dependencies for FUSE and NVMe-CUSE
+ yum install -y fuse3-devel
+fi
+if [[ $INSTALL_RDMA == "true" ]]; then
+ # Additional dependencies for RDMA transport in NVMe over Fabrics
+ yum install -y libibverbs-devel librdmacm-devel
+fi
+if [[ $INSTALL_DOCS == "true" ]]; then
+ # Additional dependencies for building docs
+ yum install -y mscgen || echo "Warning: couldn't install mscgen via yum. Please install mscgen manually."
+ yum install -y doxygen graphviz
+fi
+if [[ $INSTALL_LIBURING == "true" ]]; then
+ install_liburing
+fi
diff --git a/src/spdk/scripts/pkgdep/sles.sh b/src/spdk/scripts/pkgdep/sles.sh
new file mode 100755
index 000000000..dacf6d1b5
--- /dev/null
+++ b/src/spdk/scripts/pkgdep/sles.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+
+# Minimal install
+zypper install -y gcc gcc-c++ make cunit-devel libaio-devel libopenssl-devel \
+ libuuid-devel python-base ncurses-devel ninja meson
+# Additional dependencies for DPDK
+zypper install -y libnuma-devel nasm
+# Additional dependencies for ISA-L used in compression
+zypper install -y autoconf automake libtool help2man
+if [[ $INSTALL_DEV_TOOLS == "true" ]]; then
+ # Tools for developers
+ zypper install -y git-core lcov python-pycodestyle sg3_utils \
+ pciutils ShellCheck
+ install_shfmt
+fi
+if [[ $INSTALL_PMEM == "true" ]]; then
+ # Additional dependencies for building pmem based backends
+ zypper install -y libpmemblk-devel
+fi
+if [[ $INSTALL_FUSE == "true" ]]; then
+ # Additional dependencies for FUSE and NVMe-CUSE
+ zypper install -y fuse3-devel
+fi
+if [[ $INSTALL_RDMA == "true" ]]; then
+ # Additional dependencies for RDMA transport in NVMe over Fabrics
+ zypper install -y rdma-core-devel
+fi
+if [[ $INSTALL_DOCS == "true" ]]; then
+ # Additional dependencies for building docs
+ zypper install -y doxygen mscgen graphviz
+fi
+if [[ $INSTALL_LIBURING == "true" ]]; then
+ install_liburing
+fi
diff --git a/src/spdk/scripts/pkgdep/ubuntu.sh b/src/spdk/scripts/pkgdep/ubuntu.sh
new file mode 120000
index 000000000..0edcb8b83
--- /dev/null
+++ b/src/spdk/scripts/pkgdep/ubuntu.sh
@@ -0,0 +1 @@
+debian.sh \ No newline at end of file
diff --git a/src/spdk/scripts/posix.txt b/src/spdk/scripts/posix.txt
new file mode 100644
index 000000000..2d07f23df
--- /dev/null
+++ b/src/spdk/scripts/posix.txt
@@ -0,0 +1,82 @@
+<aio.h>
+<arpa/inet.h>
+<assert.h>
+<complex.h>
+<cpio.h>
+<ctype.h>
+<dirent.h>
+<dlfcn.h>
+<errno.h>
+<fcntl.h>
+<fenv.h>
+<float.h>
+<fmtmsg.h>
+<fnmatch.h>
+<ftw.h>
+<glob.h>
+<grp.h>
+<iconv.h>
+<inttypes.h>
+<iso646.h>
+<langinfo.h>
+<libgen.h>
+<limits.h>
+<locale.h>
+<math.h>
+<monetary.h>
+<mqueue.h>
+<ndbm.h>
+<net/if.h>
+<netdb.h>
+<netinet/in.h>
+<netinet/tcp.h>
+<nl_types.h>
+<poll.h>
+<pthread.h>
+<pwd.h>
+<regex.h>
+<sched.h>
+<search.h>
+<semaphore.h>
+<setjmp.h>
+<signal.h>
+<spawn.h>
+<stdarg.h>
+<stdbool.h>
+<stddef.h>
+<stdint.h>
+<stdio.h>
+<stdlib.h>
+<string.h>
+<strings.h>
+<stropts.h>
+<sys/ipc.h>
+<sys/mman.h>
+<sys/msg.h>
+<sys/resource.h>
+<sys/select.h>
+<sys/sem.h>
+<sys/shm.h>
+<sys/socket.h>
+<sys/stat.h>
+<sys/statvfs.h>
+<sys/time.h>
+<sys/times.h>
+<sys/types.h>
+<sys/uio.h>
+<sys/un.h>
+<sys/utsname.h>
+<sys/wait.h>
+<syslog.h>
+<tar.h>
+<termios.h>
+<tgmath.h>
+<time.h>
+<trace.h>
+<ulimit.h>
+<unistd.h>
+<utime.h>
+<utmpx.h>
+<wchar.h>
+<wctype.h>
+<wordexp.h>
diff --git a/src/spdk/scripts/prep_benchmarks.sh b/src/spdk/scripts/prep_benchmarks.sh
new file mode 100755
index 000000000..01d8c47da
--- /dev/null
+++ b/src/spdk/scripts/prep_benchmarks.sh
@@ -0,0 +1,73 @@
+#!/usr/bin/env bash
+
+function configure_performance() {
+ echo -n "Placing all CPUs in performance mode..."
+ for governor in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor; do
+ echo -n performance > $governor
+ done
+ echo "Done"
+
+ if [ -f "/sys/devices/system/cpu/intel_pstate/no_turbo" ]; then
+ echo -n "Disabling Turbo Boost..."
+ echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo
+ echo "Done"
+ fi
+
+ echo -n "Disabling irqbalance service..."
+ service irqbalance stop 2> /dev/null
+ echo "Done"
+
+ echo -n "Moving all interrupts off of core 0..."
+ count=$(($(nproc) / 4))
+ cpumask="e"
+ for ((i = 1; i < count; i++)); do
+ if [ $((i % 8)) -eq 0 ]; then
+ cpumask=",$cpumask"
+ fi
+ cpumask="f$cpumask"
+ done
+ for file in /proc/irq/*/smp_affinity; do
+ echo "$cpumask" > $file 2> /dev/null
+ done
+ echo "Done"
+
+ echo -n "Configuring kernel blk-mq for NVMe SSDs..."
+ for queue in /sys/block/nvme*n*/queue; do
+ if [ -f "$queue/nomerges" ]; then
+ echo "1" > $queue/nomerges
+ fi
+
+ if [ -f "$queue/io_poll" ]; then
+ echo "1" > $queue/io_poll
+ fi
+
+ if [ -f "$queue/io_poll_delay" ]; then
+ echo "-1" > $queue/io_poll_delay
+ fi
+ done
+ echo "Done"
+}
+
+function reset_performance() {
+ echo -n "Placing all CPUs in powersave mode..."
+ for governor in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor; do
+ echo -n powersave > $governor
+ done
+ echo "Done"
+
+ if [ -f "/sys/devices/system/cpu/intel_pstate/no_turbo" ]; then
+ echo -n "Enabling Turbo Boost..."
+ echo -n 0 > /sys/devices/system/cpu/intel_pstate/no_turbo
+ echo "Done"
+ fi
+
+ echo -n "Enabling irqbalance service..."
+ service irqbalance start 2> /dev/null
+ echo "Done"
+}
+
+if [ "$1" = "reset" ]; then
+ reset_performance
+else
+ configure_performance
+fi
diff --git a/src/spdk/scripts/qat_setup.sh b/src/spdk/scripts/qat_setup.sh
new file mode 100755
index 000000000..30e24ca09
--- /dev/null
+++ b/src/spdk/scripts/qat_setup.sh
@@ -0,0 +1,96 @@
+#!/usr/bin/env bash
+
+rootdir=$(readlink -f $(dirname $0))/..
+igb_driverdir=$rootdir/dpdk/build/build/kernel/igb_uio/
+allowed_drivers=("igb_uio" "uio_pci_generic")
+
+# This script requires an igb_uio kernel module binary located at $igb_driverdir/igb_uio.ko
+# Please also note that this script is not intended to be comprehensive or production quality.
+# It supports configuring a single card (the Intel QAT 8970) for use with the SPDK
+
+bad_driver=true
+driver_to_bind=uio_pci_generic
+num_vfs=16
+
+qat_pci_bdfs=($(lspci -Dd:37c8 | awk '{print $1}'))
+if [ ${#qat_pci_bdfs[@]} -eq 0 ]; then
+ echo "No QAT devices found. Exiting"
+ exit 0
+fi
+
+if [ -n "$1" ]; then
+ driver_to_bind=$1
+fi
+
+for driver in "${allowed_drivers[@]}"; do
+ if [ $driver == $driver_to_bind ]; then
+ bad_driver=false
+ fi
+done
+
+if $bad_driver; then
+ echo "Unrecognized driver. Please specify an accepted driver (listed below):"
+ echo "${allowed_drivers[@]}"
+ exit 1
+fi
+
+# try starting the qat service. If this doesn't work, just treat it as a warning for now.
+if ! service qat_service start; then
+ echo "failed to start the qat service. Something may be wrong with your 01.org driver."
+fi
+
+# configure virtual functions for the QAT cards.
+for qat_bdf in "${qat_pci_bdfs[@]}"; do
+ echo "$num_vfs" > /sys/bus/pci/drivers/c6xx/$qat_bdf/sriov_numvfs
+ num_vfs=$(cat /sys/bus/pci/drivers/c6xx/$qat_bdf/sriov_numvfs)
+ echo "$qat_bdf set to $num_vfs VFs"
+done
+
+# Confirm we have all of the virtual functions we asked for.
+
+qat_vf_bdfs=($(lspci -Dd:37c9 | awk '{print $1}'))
+if ((${#qat_vf_bdfs[@]} != ${#qat_pci_bdfs[@]} * num_vfs)); then
+ echo "Failed to prepare the VFs. Aborting"
+ exit 1
+fi
+
+# Unbind old driver if necessary.
+for vf in "${qat_vf_bdfs[@]}"; do
+ old_driver=$(basename $(readlink -f /sys/bus/pci/devices/${vf}/driver))
+ if [ $old_driver != "driver" ]; then
+ echo "unbinding driver $old_driver from qat VF at BDF $vf"
+ echo -n $vf > /sys/bus/pci/drivers/$old_driver/unbind
+ fi
+done
+
+modprobe uio
+
+# Insert the dpdk uio kernel module.
+if [ $driver_to_bind == "igb_uio" ]; then
+ if ! lsmod | grep -q igb_uio; then
+ if ! insmod $igb_driverdir/igb_uio.ko; then
+ echo "Unable to insert the igb_uio kernel module. Aborting."
+ exit 1
+ fi
+ fi
+elif [ "$driver_to_bind" == "uio_pci_generic" ]; then
+ modprobe uio_pci_generic
+else
+ echo "Unsure how to work with driver $driver_to_bind. Please configure it in qat_setup.sh"
+ exit 1
+fi
+
+echo -n "8086 37c9" > /sys/bus/pci/drivers/$driver_to_bind/new_id
+for vf in "${qat_vf_bdfs[@]}"; do
+ if ! ls -l /sys/bus/pci/devices/$vf/driver | grep -q $driver_to_bind; then
+ echo "unable to bind the driver to the device at bdf $vf"
+ if [ "$driver_to_bind" == "uio_pci_generic" ]; then
+ echo "Your kernel's uio_pci_generic module does not support binding to virtual functions."
+ echo "It likely is missing Linux git commit ID acec09e67 which is needed to bind"
+ echo "uio_pci_generic to virtual functions which have no legacy interrupt vector."
+ echo "Please rebuild spdk with --with-igb-uio-driver and re-run this script specifying the igb_uio driver."
+ fi
+ exit 1
+ fi
+done
+echo "Properly configured the qat device with driver $driver_to_bind."
diff --git a/src/spdk/scripts/rpc.py b/src/spdk/scripts/rpc.py
new file mode 100755
index 000000000..140bfefd9
--- /dev/null
+++ b/src/spdk/scripts/rpc.py
@@ -0,0 +1,2507 @@
+#!/usr/bin/env python3
+
+from rpc.client import print_dict, print_json, JSONRPCException
+from rpc.helpers import deprecated_aliases
+
+import logging
+import argparse
+import importlib
+import rpc
+import sys
+import shlex
+import json
+
+try:
+ from shlex import quote
+except ImportError:
+ from pipes import quote
+
+
+def print_array(a):
+ print(" ".join((quote(v) for v in a)))
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description='SPDK RPC command line interface', usage='%(prog)s [options]')
+ parser.add_argument('-s', dest='server_addr',
+ help='RPC domain socket path or IP address', default='/var/tmp/spdk.sock')
+ parser.add_argument('-p', dest='port',
+ help='RPC port number (if server_addr is IP address)',
+ default=5260, type=int)
+ parser.add_argument('-t', dest='timeout',
+ help='Timeout as a floating point number expressed in seconds waiting for response. Default: 60.0',
+ default=60.0, type=float)
+ parser.add_argument('-r', dest='conn_retries',
+ help='Retry connecting to the RPC server N times with 0.2s interval. Default: 0',
+ default=0, type=int)
+ parser.add_argument('-v', dest='verbose', action='store_const', const="INFO",
+ help='Set verbose mode to INFO', default="ERROR")
+ parser.add_argument('--verbose', dest='verbose', choices=['DEBUG', 'INFO', 'ERROR'],
+ help="""Set verbose level. """)
+ parser.add_argument('--dry_run', dest='dry_run', action='store_true', help="Display request and exit")
+ parser.set_defaults(dry_run=False)
+ parser.add_argument('--server', dest='is_server', action='store_true',
+ help="Start listening on stdin, parse each line as a regular rpc.py execution and create \
+ a separate connection for each command. Each command's output ends with either \
+ **STATUS=0 if the command succeeded or **STATUS=1 if it failed. --server is meant \
+ to be used in conjunction with bash coproc, where stdin and stdout are connected to \
+ pipes and can be used as a faster way to send RPC commands. If enabled, rpc.py \
+ must be executed without any other parameters.")
+ parser.set_defaults(is_server=False)
+ parser.add_argument('--plugin', dest='rpc_plugin', help='Module name of plugin with additional RPC commands')
+ subparsers = parser.add_subparsers(help='RPC methods', dest='called_rpc_name', metavar='')
+
+ def framework_start_init(args):
+ rpc.framework_start_init(args.client)
+
+ p = subparsers.add_parser('framework_start_init', aliases=['start_subsystem_init'],
+ help='Start initialization of subsystems')
+ p.set_defaults(func=framework_start_init)
+
+ def framework_wait_init(args):
+ rpc.framework_wait_init(args.client)
+
+ p = subparsers.add_parser('framework_wait_init', aliases=['wait_subsystem_init'],
+ help='Block until subsystems have been initialized')
+ p.set_defaults(func=framework_wait_init)
+
+ def rpc_get_methods(args):
+ print_dict(rpc.rpc_get_methods(args.client,
+ current=args.current,
+ include_aliases=args.include_aliases))
+
+ p = subparsers.add_parser('rpc_get_methods', aliases=['get_rpc_methods'],
+ help='Get list of supported RPC methods')
+ p.add_argument('-c', '--current', help='Get list of RPC methods only callable in the current state.', action='store_true')
+ p.add_argument('-i', '--include-aliases', help='include RPC aliases', action='store_true')
+ p.set_defaults(func=rpc_get_methods)
+
+ def spdk_get_version(args):
+ print_json(rpc.spdk_get_version(args.client))
+
+ p = subparsers.add_parser('spdk_get_version', aliases=['get_spdk_version'],
+ help='Get SPDK version')
+ p.set_defaults(func=spdk_get_version)
+
+ def save_config(args):
+ rpc.save_config(args.client,
+ sys.stdout,
+ indent=args.indent)
+
+ p = subparsers.add_parser('save_config', help="""Write current (live) configuration of SPDK subsystems and targets to stdout.
+ """)
+ p.add_argument('-i', '--indent', help="""Indent level. Value less than 0 mean compact mode. Default indent level is 2.
+ """, type=int, default=2)
+ p.set_defaults(func=save_config)
+
+ def load_config(args):
+ rpc.load_config(args.client, args.json_conf,
+ include_aliases=args.include_aliases)
+
+ p = subparsers.add_parser('load_config', help="""Configure SPDK subsystems and targets using JSON RPC.""")
+ p.add_argument('-i', '--include-aliases', help='include RPC aliases', action='store_true')
+ p.add_argument('-j', '--json_conf', help='Valid JSON configuration', default=sys.stdin)
+ p.set_defaults(func=load_config)
+
+ def save_subsystem_config(args):
+ rpc.save_subsystem_config(args.client,
+ sys.stdout,
+ indent=args.indent,
+ name=args.name)
+
+ p = subparsers.add_parser('save_subsystem_config', help="""Write current (live) configuration of SPDK subsystem to stdout.
+ """)
+ p.add_argument('-i', '--indent', help="""Indent level. Value less than 0 mean compact mode. Default indent level is 2.
+ """, type=int, default=2)
+ p.add_argument('-n', '--name', help='Name of subsystem', required=True)
+ p.set_defaults(func=save_subsystem_config)
+
+ def load_subsystem_config(args):
+ rpc.load_subsystem_config(args.client,
+ args.json_conf)
+
+ p = subparsers.add_parser('load_subsystem_config', help="""Configure SPDK subsystem using JSON RPC.""")
+ p.add_argument('-j', '--json_conf', help='Valid JSON configuration', default=sys.stdin)
+ p.set_defaults(func=load_subsystem_config)
+
+ # app
+ def spdk_kill_instance(args):
+ rpc.app.spdk_kill_instance(args.client,
+ sig_name=args.sig_name)
+
+ p = subparsers.add_parser('spdk_kill_instance', aliases=['kill_instance'],
+ help='Send signal to instance')
+ p.add_argument('sig_name', help='signal will be sent to server.')
+ p.set_defaults(func=spdk_kill_instance)
+
+ def framework_monitor_context_switch(args):
+ enabled = None
+ if args.enable:
+ enabled = True
+ if args.disable:
+ enabled = False
+ print_dict(rpc.app.framework_monitor_context_switch(args.client,
+ enabled=enabled))
+
+ p = subparsers.add_parser('framework_monitor_context_switch', aliases=['context_switch_monitor'],
+ help='Control whether the context switch monitor is enabled')
+ p.add_argument('-e', '--enable', action='store_true', help='Enable context switch monitoring')
+ p.add_argument('-d', '--disable', action='store_true', help='Disable context switch monitoring')
+ p.set_defaults(func=framework_monitor_context_switch)
+
+ def framework_get_reactors(args):
+ print_dict(rpc.app.framework_get_reactors(args.client))
+
+ p = subparsers.add_parser(
+ 'framework_get_reactors', help='Display list of all reactors')
+ p.set_defaults(func=framework_get_reactors)
+
+ # bdev
+ def bdev_set_options(args):
+ rpc.bdev.bdev_set_options(args.client,
+ bdev_io_pool_size=args.bdev_io_pool_size,
+ bdev_io_cache_size=args.bdev_io_cache_size,
+ bdev_auto_examine=args.bdev_auto_examine)
+
+ p = subparsers.add_parser('bdev_set_options', aliases=['set_bdev_options'],
+ help="""Set options of bdev subsystem""")
+ p.add_argument('-p', '--bdev-io-pool-size', help='Number of bdev_io structures in shared buffer pool', type=int)
+ p.add_argument('-c', '--bdev-io-cache-size', help='Maximum number of bdev_io structures cached per thread', type=int)
+ group = p.add_mutually_exclusive_group()
+ group.add_argument('-e', '--enable-auto-examine', dest='bdev_auto_examine', help='Allow to auto examine', action='store_true')
+ group.add_argument('-d', '--disable-auto-examine', dest='bdev_auto_examine', help='Not allow to auto examine', action='store_false')
+ p.set_defaults(bdev_auto_examine=True)
+ p.set_defaults(func=bdev_set_options)
+
+ def bdev_compress_create(args):
+ print_json(rpc.bdev.bdev_compress_create(args.client,
+ base_bdev_name=args.base_bdev_name,
+ pm_path=args.pm_path,
+ lb_size=args.lb_size))
+
+ p = subparsers.add_parser('bdev_compress_create', aliases=['construct_compress_bdev'],
+ help='Add a compress vbdev')
+ p.add_argument('-b', '--base_bdev_name', help="Name of the base bdev")
+ p.add_argument('-p', '--pm_path', help="Path to persistent memory")
+ p.add_argument('-l', '--lb_size', help="Compressed vol logical block size (optional, if used must be 512 or 4096)", type=int, default=0)
+ p.set_defaults(func=bdev_compress_create)
+
+ def bdev_compress_delete(args):
+ rpc.bdev.bdev_compress_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_compress_delete', aliases=['delete_compress_bdev'],
+ help='Delete a compress disk')
+ p.add_argument('name', help='compress bdev name')
+ p.set_defaults(func=bdev_compress_delete)
+
+ def compress_set_pmd(args):
+ rpc.bdev.compress_set_pmd(args.client,
+ pmd=args.pmd)
+ p = subparsers.add_parser('compress_set_pmd', aliases=['set_compress_pmd'],
+ help='Set pmd option for a compress disk')
+ p.add_argument('-p', '--pmd', type=int, help='0 = auto-select, 1= QAT only, 2 = ISAL only')
+ p.set_defaults(func=compress_set_pmd)
+
+ def bdev_compress_get_orphans(args):
+ print_dict(rpc.bdev.bdev_compress_get_orphans(args.client,
+ name=args.name))
+ p = subparsers.add_parser(
+ 'bdev_compress_get_orphans', help='Display list of orphaned compress bdevs.')
+ p.add_argument('-b', '--name', help="Name of a comp bdev. Example: COMP_Nvme0n1", required=False)
+ p.set_defaults(func=bdev_compress_get_orphans)
+
+ def bdev_crypto_create(args):
+ print_json(rpc.bdev.bdev_crypto_create(args.client,
+ base_bdev_name=args.base_bdev_name,
+ name=args.name,
+ crypto_pmd=args.crypto_pmd,
+ key=args.key,
+ cipher=args.cipher,
+ key2=args.key2))
+ p = subparsers.add_parser('bdev_crypto_create', aliases=['construct_crypto_bdev'],
+ help='Add a crypto vbdev')
+ p.add_argument('base_bdev_name', help="Name of the base bdev")
+ p.add_argument('name', help="Name of the crypto vbdev")
+ p.add_argument('crypto_pmd', help="Name of the crypto device driver")
+ p.add_argument('key', help="Key")
+ p.add_argument('-c', '--cipher', help="cipher to use, AES_CBC or AES_XTS (QAT only)", default="AES_CBC")
+ p.add_argument('-k2', '--key2', help="2nd key for cipher AET_XTS", default=None)
+ p.set_defaults(func=bdev_crypto_create)
+
+ def bdev_crypto_delete(args):
+ rpc.bdev.bdev_crypto_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_crypto_delete', aliases=['delete_crypto_bdev'],
+ help='Delete a crypto disk')
+ p.add_argument('name', help='crypto bdev name')
+ p.set_defaults(func=bdev_crypto_delete)
+
+ def bdev_ocf_create(args):
+ print_json(rpc.bdev.bdev_ocf_create(args.client,
+ name=args.name,
+ mode=args.mode,
+ cache_bdev_name=args.cache_bdev_name,
+ core_bdev_name=args.core_bdev_name))
+ p = subparsers.add_parser('bdev_ocf_create', aliases=['construct_ocf_bdev'],
+ help='Add an OCF block device')
+ p.add_argument('name', help='Name of resulting OCF bdev')
+ p.add_argument('mode', help='OCF cache mode', choices=['wb', 'wt', 'pt', 'wa', 'wi', 'wo'])
+ p.add_argument('cache_bdev_name', help='Name of underlying cache bdev')
+ p.add_argument('core_bdev_name', help='Name of unerlying core bdev')
+ p.set_defaults(func=bdev_ocf_create)
+
+ def bdev_ocf_delete(args):
+ rpc.bdev.bdev_ocf_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_ocf_delete', aliases=['delete_ocf_bdev'],
+ help='Delete an OCF block device')
+ p.add_argument('name', help='Name of OCF bdev')
+ p.set_defaults(func=bdev_ocf_delete)
+
+ def bdev_ocf_get_stats(args):
+ print_dict(rpc.bdev.bdev_ocf_get_stats(args.client,
+ name=args.name))
+ p = subparsers.add_parser('bdev_ocf_get_stats', aliases=['get_ocf_stats'],
+ help='Get statistics of chosen OCF block device')
+ p.add_argument('name', help='Name of OCF bdev')
+ p.set_defaults(func=bdev_ocf_get_stats)
+
+ def bdev_ocf_get_bdevs(args):
+ print_dict(rpc.bdev.bdev_ocf_get_bdevs(args.client,
+ name=args.name))
+ p = subparsers.add_parser('bdev_ocf_get_bdevs', aliases=['get_ocf_bdevs'],
+ help='Get list of OCF devices including unregistered ones')
+ p.add_argument('name', nargs='?', default=None, help='name of OCF vbdev or name of cache device or name of core device (optional)')
+ p.set_defaults(func=bdev_ocf_get_bdevs)
+
+ def bdev_malloc_create(args):
+ num_blocks = (args.total_size * 1024 * 1024) // args.block_size
+ print_json(rpc.bdev.bdev_malloc_create(args.client,
+ num_blocks=int(num_blocks),
+ block_size=args.block_size,
+ name=args.name,
+ uuid=args.uuid))
+ p = subparsers.add_parser('bdev_malloc_create', aliases=['construct_malloc_bdev'],
+ help='Create a bdev with malloc backend')
+ p.add_argument('-b', '--name', help="Name of the bdev")
+ p.add_argument('-u', '--uuid', help="UUID of the bdev")
+ p.add_argument(
+ 'total_size', help='Size of malloc bdev in MB (float > 0)', type=float)
+ p.add_argument('block_size', help='Block size for this bdev', type=int)
+ p.set_defaults(func=bdev_malloc_create)
+
+ def bdev_malloc_delete(args):
+ rpc.bdev.bdev_malloc_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_malloc_delete', aliases=['delete_malloc_bdev'],
+ help='Delete a malloc disk')
+ p.add_argument('name', help='malloc bdev name')
+ p.set_defaults(func=bdev_malloc_delete)
+
+ def bdev_null_create(args):
+ num_blocks = (args.total_size * 1024 * 1024) // args.block_size
+ print_json(rpc.bdev.bdev_null_create(args.client,
+ num_blocks=num_blocks,
+ block_size=args.block_size,
+ name=args.name,
+ uuid=args.uuid,
+ md_size=args.md_size,
+ dif_type=args.dif_type,
+ dif_is_head_of_md=args.dif_is_head_of_md))
+
+ p = subparsers.add_parser('bdev_null_create', aliases=['construct_null_bdev'],
+ help='Add a bdev with null backend')
+ p.add_argument('name', help='Block device name')
+ p.add_argument('-u', '--uuid', help='UUID of the bdev')
+ p.add_argument(
+ 'total_size', help='Size of null bdev in MB (int > 0)', type=int)
+ p.add_argument('block_size', help='Block size for this bdev', type=int)
+ p.add_argument('-m', '--md-size', type=int,
+ help='Metadata size for this bdev. Default 0')
+ p.add_argument('-t', '--dif-type', type=int, choices=[0, 1, 2, 3],
+ help='Protection information type. Default: 0 - no protection')
+ p.add_argument('-d', '--dif-is-head-of-md', action='store_true',
+ help='Protection information is in the first 8 bytes of metadata. Default: in the last 8 bytes')
+ p.set_defaults(func=bdev_null_create)
+
+ def bdev_null_delete(args):
+ rpc.bdev.bdev_null_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_null_delete', aliases=['delete_null_bdev'],
+ help='Delete a null bdev')
+ p.add_argument('name', help='null bdev name')
+ p.set_defaults(func=bdev_null_delete)
+
+ def bdev_aio_create(args):
+ print_json(rpc.bdev.bdev_aio_create(args.client,
+ filename=args.filename,
+ name=args.name,
+ block_size=args.block_size))
+
+ p = subparsers.add_parser('bdev_aio_create', aliases=['construct_aio_bdev'],
+ help='Add a bdev with aio backend')
+ p.add_argument('filename', help='Path to device or file (ex: /dev/sda)')
+ p.add_argument('name', help='Block device name')
+ p.add_argument('block_size', help='Block size for this bdev', type=int, nargs='?', default=0)
+ p.set_defaults(func=bdev_aio_create)
+
+ def bdev_aio_delete(args):
+ rpc.bdev.bdev_aio_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_aio_delete', aliases=['delete_aio_bdev'],
+ help='Delete an aio disk')
+ p.add_argument('name', help='aio bdev name')
+ p.set_defaults(func=bdev_aio_delete)
+
+ def bdev_uring_create(args):
+ print_json(rpc.bdev.bdev_uring_create(args.client,
+ filename=args.filename,
+ name=args.name,
+ block_size=args.block_size))
+
+ p = subparsers.add_parser('bdev_uring_create', help='Create a bdev with io_uring backend')
+ p.add_argument('filename', help='Path to device or file (ex: /dev/nvme0n1)')
+ p.add_argument('name', help='bdev name')
+ p.add_argument('block_size', help='Block size for this bdev', type=int, nargs='?', default=0)
+ p.set_defaults(func=bdev_uring_create)
+
+ def bdev_uring_delete(args):
+ rpc.bdev.bdev_uring_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_uring_delete', help='Delete a uring bdev')
+ p.add_argument('name', help='uring bdev name')
+ p.set_defaults(func=bdev_uring_delete)
+
+ def bdev_nvme_set_options(args):
+ rpc.bdev.bdev_nvme_set_options(args.client,
+ action_on_timeout=args.action_on_timeout,
+ timeout_us=args.timeout_us,
+ retry_count=args.retry_count,
+ arbitration_burst=args.arbitration_burst,
+ low_priority_weight=args.low_priority_weight,
+ medium_priority_weight=args.medium_priority_weight,
+ high_priority_weight=args.high_priority_weight,
+ nvme_adminq_poll_period_us=args.nvme_adminq_poll_period_us,
+ nvme_ioq_poll_period_us=args.nvme_ioq_poll_period_us,
+ io_queue_requests=args.io_queue_requests,
+ delay_cmd_submit=args.delay_cmd_submit)
+
+ p = subparsers.add_parser('bdev_nvme_set_options', aliases=['set_bdev_nvme_options'],
+ help='Set options for the bdev nvme type. This is startup command.')
+ p.add_argument('-a', '--action-on-timeout',
+ help="Action to take on command time out. Valid valies are: none, reset, abort")
+ p.add_argument('-t', '--timeout-us',
+ help="Timeout for each command, in microseconds. If 0, don't track timeouts.", type=int)
+ p.add_argument('-n', '--retry-count',
+ help='the number of attempts per I/O when an I/O fails', type=int)
+ p.add_argument('--arbitration-burst',
+ help='the value is expressed as a power of two', type=int)
+ p.add_argument('--low-priority-weight',
+ help='the maximum number of commands that the controller may launch at one time from a low priority queue', type=int)
+ p.add_argument('--medium-priority-weight',
+ help='the maximum number of commands that the controller may launch at one time from a medium priority queue', type=int)
+ p.add_argument('--high-priority-weight',
+ help='the maximum number of commands that the controller may launch at one time from a high priority queue', type=int)
+ p.add_argument('-p', '--nvme-adminq-poll-period-us',
+ help='How often the admin queue is polled for asynchronous events', type=int)
+ p.add_argument('-i', '--nvme-ioq-poll-period-us',
+ help='How often to poll I/O queues for completions', type=int)
+ p.add_argument('-s', '--io-queue-requests',
+ help='The number of requests allocated for each NVMe I/O queue. Default: 512', type=int)
+ p.add_argument('-d', '--disable-delay-cmd-submit',
+ help='Disable delaying NVMe command submission, i.e. no batching of multiple commands',
+ action='store_false', dest='delay_cmd_submit', default=True)
+ p.set_defaults(func=bdev_nvme_set_options)
+
+ def bdev_nvme_set_hotplug(args):
+ rpc.bdev.bdev_nvme_set_hotplug(args.client, enable=args.enable, period_us=args.period_us)
+
+ p = subparsers.add_parser('bdev_nvme_set_hotplug', aliases=['set_bdev_nvme_hotplug'],
+ help='Set hotplug options for bdev nvme type.')
+ p.add_argument('-d', '--disable', dest='enable', default=False, action='store_false', help="Disable hotplug (default)")
+ p.add_argument('-e', '--enable', dest='enable', action='store_true', help="Enable hotplug")
+ p.add_argument('-r', '--period-us',
+ help='How often the hotplug is processed for insert and remove events', type=int)
+ p.set_defaults(func=bdev_nvme_set_hotplug)
+
+ def bdev_nvme_attach_controller(args):
+ print_array(rpc.bdev.bdev_nvme_attach_controller(args.client,
+ name=args.name,
+ trtype=args.trtype,
+ traddr=args.traddr,
+ adrfam=args.adrfam,
+ trsvcid=args.trsvcid,
+ priority=args.priority,
+ subnqn=args.subnqn,
+ hostnqn=args.hostnqn,
+ hostaddr=args.hostaddr,
+ hostsvcid=args.hostsvcid,
+ prchk_reftag=args.prchk_reftag,
+ prchk_guard=args.prchk_guard))
+
+ p = subparsers.add_parser('bdev_nvme_attach_controller', aliases=['construct_nvme_bdev'],
+ help='Add bdevs with nvme backend')
+ p.add_argument('-b', '--name', help="Name of the NVMe controller, prefix for each bdev name", required=True)
+ p.add_argument('-t', '--trtype',
+ help='NVMe-oF target trtype: e.g., rdma, pcie', required=True)
+ p.add_argument('-a', '--traddr',
+ help='NVMe-oF target address: e.g., an ip address or BDF', required=True)
+ p.add_argument('-f', '--adrfam',
+ help='NVMe-oF target adrfam: e.g., ipv4, ipv6, ib, fc, intra_host')
+ p.add_argument('-s', '--trsvcid',
+ help='NVMe-oF target trsvcid: e.g., a port number')
+ p.add_argument('-p', '--priority',
+ help='NVMe-oF connection priority: e.g., a priority number')
+ p.add_argument('-n', '--subnqn', help='NVMe-oF target subnqn')
+ p.add_argument('-q', '--hostnqn', help='NVMe-oF host subnqn')
+ p.add_argument('-i', '--hostaddr',
+ help='NVMe-oF host address: e.g., an ip address')
+ p.add_argument('-c', '--hostsvcid',
+ help='NVMe-oF host svcid: e.g., a port number')
+ p.add_argument('-r', '--prchk-reftag',
+ help='Enable checking of PI reference tag for I/O processing.', action='store_true')
+ p.add_argument('-g', '--prchk-guard',
+ help='Enable checking of PI guard for I/O processing.', action='store_true')
+ p.set_defaults(func=bdev_nvme_attach_controller)
+
+ def bdev_nvme_get_controllers(args):
+ print_dict(rpc.nvme.bdev_nvme_get_controllers(args.client,
+ name=args.name))
+
+ p = subparsers.add_parser(
+ 'bdev_nvme_get_controllers', aliases=['get_nvme_controllers'],
+ help='Display current NVMe controllers list or required NVMe controller')
+ p.add_argument('-n', '--name', help="Name of the NVMe controller. Example: Nvme0", required=False)
+ p.set_defaults(func=bdev_nvme_get_controllers)
+
+ def bdev_nvme_detach_controller(args):
+ rpc.bdev.bdev_nvme_detach_controller(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_nvme_detach_controller', aliases=['delete_nvme_controller'],
+ help='Detach an NVMe controller and delete any associated bdevs')
+ p.add_argument('name', help="Name of the controller")
+ p.set_defaults(func=bdev_nvme_detach_controller)
+
+ def bdev_nvme_cuse_register(args):
+ rpc.bdev.bdev_nvme_cuse_register(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_nvme_cuse_register',
+ help='Register CUSE devices on NVMe controller')
+ p.add_argument('-n', '--name',
+ help='Name of the NVMe controller. Example: Nvme0', required=True)
+ p.set_defaults(func=bdev_nvme_cuse_register)
+
+ def bdev_nvme_cuse_unregister(args):
+ rpc.bdev.bdev_nvme_cuse_unregister(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_nvme_cuse_unregister',
+ help='Unregister CUSE devices on NVMe controller')
+ p.add_argument('-n', '--name',
+ help='Name of the NVMe controller. Example: Nvme0', required=True)
+ p.set_defaults(func=bdev_nvme_cuse_unregister)
+
+ def bdev_zone_block_create(args):
+ print_json(rpc.bdev.bdev_zone_block_create(args.client,
+ name=args.name,
+ base_bdev=args.base_bdev,
+ zone_capacity=args.zone_capacity,
+ optimal_open_zones=args.optimal_open_zones))
+
+ p = subparsers.add_parser('bdev_zone_block_create',
+ help='Create virtual zone namespace device with block device backend')
+ p.add_argument('-b', '--name', help="Name of the zone device", required=True)
+ p.add_argument('-n', '--base-bdev', help='Name of underlying, non-zoned bdev', required=True)
+ p.add_argument('-z', '--zone-capacity', help='Surfaced zone capacity in blocks', type=int, required=True)
+ p.add_argument('-o', '--optimal-open-zones', help='Number of zones required to reach optimal write speed', type=int, required=True)
+ p.set_defaults(func=bdev_zone_block_create)
+
+ def bdev_zone_block_delete(args):
+ rpc.bdev.bdev_zone_block_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_zone_block_delete', help='Delete a virtual zone namespace device')
+ p.add_argument('name', help='Virtual zone bdev name')
+ p.set_defaults(func=bdev_zone_block_delete)
+
+ def bdev_rbd_create(args):
+ config = None
+ if args.config:
+ config = {}
+ for entry in args.config:
+ parts = entry.split('=', 1)
+ if len(parts) != 2:
+ raise Exception('--config %s not in key=value form' % entry)
+ config[parts[0]] = parts[1]
+ print_json(rpc.bdev.bdev_rbd_create(args.client,
+ name=args.name,
+ user=args.user,
+ config=config,
+ pool_name=args.pool_name,
+ rbd_name=args.rbd_name,
+ block_size=args.block_size))
+
+ p = subparsers.add_parser('bdev_rbd_create', aliases=['construct_rbd_bdev'],
+ help='Add a bdev with ceph rbd backend')
+ p.add_argument('-b', '--name', help="Name of the bdev", required=False)
+ p.add_argument('--user', help="Ceph user name (i.e. admin, not client.admin)", required=False)
+ p.add_argument('--config', action='append', metavar='key=value',
+ help="adds a key=value configuration option for rados_conf_set (default: rely on config file)")
+ p.add_argument('pool_name', help='rbd pool name')
+ p.add_argument('rbd_name', help='rbd image name')
+ p.add_argument('block_size', help='rbd block size', type=int)
+ p.set_defaults(func=bdev_rbd_create)
+
+ def bdev_rbd_delete(args):
+ rpc.bdev.bdev_rbd_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_rbd_delete', aliases=['delete_rbd_bdev'],
+ help='Delete a rbd bdev')
+ p.add_argument('name', help='rbd bdev name')
+ p.set_defaults(func=bdev_rbd_delete)
+
+ def bdev_rbd_resize(args):
+ print_json(rpc.bdev.bdev_rbd_resize(args.client,
+ name=args.name,
+ new_size=int(args.new_size)))
+ rpc.bdev.bdev_rbd_resize(args.client,
+ name=args.name,
+ new_size=int(args.new_size))
+
+ p = subparsers.add_parser('bdev_rbd_resize',
+ help='Resize a rbd bdev')
+ p.add_argument('name', help='rbd bdev name')
+ p.add_argument('new_size', help='new bdev size for resize operation. The unit is MiB')
+ p.set_defaults(func=bdev_rbd_resize)
+
+ def bdev_delay_create(args):
+ print_json(rpc.bdev.bdev_delay_create(args.client,
+ base_bdev_name=args.base_bdev_name,
+ name=args.name,
+ avg_read_latency=args.avg_read_latency,
+ p99_read_latency=args.nine_nine_read_latency,
+ avg_write_latency=args.avg_write_latency,
+ p99_write_latency=args.nine_nine_write_latency))
+
+ p = subparsers.add_parser('bdev_delay_create',
+ help='Add a delay bdev on existing bdev')
+ p.add_argument('-b', '--base-bdev-name', help="Name of the existing bdev", required=True)
+ p.add_argument('-d', '--name', help="Name of the delay bdev", required=True)
+ p.add_argument('-r', '--avg-read-latency',
+ help="Average latency to apply before completing read ops (in microseconds)", required=True, type=int)
+ p.add_argument('-t', '--nine-nine-read-latency',
+ help="latency to apply to 1 in 100 read ops (in microseconds)", required=True, type=int)
+ p.add_argument('-w', '--avg-write-latency',
+ help="Average latency to apply before completing write ops (in microseconds)", required=True, type=int)
+ p.add_argument('-n', '--nine-nine-write-latency',
+ help="latency to apply to 1 in 100 write ops (in microseconds)", required=True, type=int)
+ p.set_defaults(func=bdev_delay_create)
+
+ def bdev_delay_delete(args):
+ rpc.bdev.bdev_delay_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_delay_delete', help='Delete a delay bdev')
+ p.add_argument('name', help='delay bdev name')
+ p.set_defaults(func=bdev_delay_delete)
+
+ def bdev_delay_update_latency(args):
+ print_json(rpc.bdev.bdev_delay_update_latency(args.client,
+ delay_bdev_name=args.delay_bdev_name,
+ latency_type=args.latency_type,
+ latency_us=args.latency_us))
+ p = subparsers.add_parser('bdev_delay_update_latency',
+ help='Update one of the latency values for a given delay bdev')
+ p.add_argument('delay_bdev_name', help='The name of the given delay bdev')
+ p.add_argument('latency_type', help='one of: avg_read, avg_write, p99_read, p99_write. No other values accepted.')
+ p.add_argument('latency_us', help='new latency value in microseconds.', type=int)
+ p.set_defaults(func=bdev_delay_update_latency)
+
+ def bdev_error_create(args):
+ print_json(rpc.bdev.bdev_error_create(args.client,
+ base_name=args.base_name))
+
+ p = subparsers.add_parser('bdev_error_create', aliases=['construct_error_bdev'],
+ help='Add bdev with error injection backend')
+ p.add_argument('base_name', help='base bdev name')
+ p.set_defaults(func=bdev_error_create)
+
+ def bdev_error_delete(args):
+ rpc.bdev.bdev_error_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_error_delete', aliases=['delete_error_bdev'],
+ help='Delete an error bdev')
+ p.add_argument('name', help='error bdev name')
+ p.set_defaults(func=bdev_error_delete)
+
+ def bdev_iscsi_create(args):
+ print_json(rpc.bdev.bdev_iscsi_create(args.client,
+ name=args.name,
+ url=args.url,
+ initiator_iqn=args.initiator_iqn))
+
+ p = subparsers.add_parser('bdev_iscsi_create', aliases=['construct_iscsi_bdev'],
+ help='Add bdev with iSCSI initiator backend')
+ p.add_argument('-b', '--name', help="Name of the bdev", required=True)
+ p.add_argument('-i', '--initiator-iqn', help="Initiator IQN", required=True)
+ p.add_argument('--url', help="iSCSI Lun URL", required=True)
+ p.set_defaults(func=bdev_iscsi_create)
+
+ def bdev_iscsi_delete(args):
+ rpc.bdev.bdev_iscsi_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_iscsi_delete', aliases=['delete_iscsi_bdev'],
+ help='Delete an iSCSI bdev')
+ p.add_argument('name', help='iSCSI bdev name')
+ p.set_defaults(func=bdev_iscsi_delete)
+
+ def bdev_pmem_create(args):
+ print_json(rpc.bdev.bdev_pmem_create(args.client,
+ pmem_file=args.pmem_file,
+ name=args.name))
+
+ p = subparsers.add_parser('bdev_pmem_create', aliases=['construct_pmem_bdev'],
+ help='Add a bdev with pmem backend')
+ p.add_argument('pmem_file', help='Path to pmemblk pool file')
+ p.add_argument('-n', '--name', help='Block device name', required=True)
+ p.set_defaults(func=bdev_pmem_create)
+
+ def bdev_pmem_delete(args):
+ rpc.bdev.bdev_pmem_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_pmem_delete', aliases=['delete_pmem_bdev'],
+ help='Delete a pmem bdev')
+ p.add_argument('name', help='pmem bdev name')
+ p.set_defaults(func=bdev_pmem_delete)
+
+ def bdev_passthru_create(args):
+ print_json(rpc.bdev.bdev_passthru_create(args.client,
+ base_bdev_name=args.base_bdev_name,
+ name=args.name))
+
+ p = subparsers.add_parser('bdev_passthru_create', aliases=['construct_passthru_bdev'],
+ help='Add a pass through bdev on existing bdev')
+ p.add_argument('-b', '--base-bdev-name', help="Name of the existing bdev", required=True)
+ p.add_argument('-p', '--name', help="Name of the pass through bdev", required=True)
+ p.set_defaults(func=bdev_passthru_create)
+
+ def bdev_passthru_delete(args):
+ rpc.bdev.bdev_passthru_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_passthru_delete', aliases=['delete_passthru_bdev'],
+ help='Delete a pass through bdev')
+ p.add_argument('name', help='pass through bdev name')
+ p.set_defaults(func=bdev_passthru_delete)
+
+ def bdev_get_bdevs(args):
+ print_dict(rpc.bdev.bdev_get_bdevs(args.client,
+ name=args.name))
+
+ p = subparsers.add_parser('bdev_get_bdevs', aliases=['get_bdevs'],
+ help='Display current blockdev list or required blockdev')
+ p.add_argument('-b', '--name', help="Name of the Blockdev. Example: Nvme0n1", required=False)
+ p.set_defaults(func=bdev_get_bdevs)
+
+ def bdev_get_iostat(args):
+ print_dict(rpc.bdev.bdev_get_iostat(args.client,
+ name=args.name))
+
+ p = subparsers.add_parser('bdev_get_iostat', aliases=['get_bdevs_iostat'],
+ help='Display current I/O statistics of all the blockdevs or required blockdev.')
+ p.add_argument('-b', '--name', help="Name of the Blockdev. Example: Nvme0n1", required=False)
+ p.set_defaults(func=bdev_get_iostat)
+
+ def bdev_enable_histogram(args):
+ rpc.bdev.bdev_enable_histogram(args.client, name=args.name, enable=args.enable)
+
+ p = subparsers.add_parser('bdev_enable_histogram', aliases=['enable_bdev_histogram'],
+ help='Enable or disable histogram for specified bdev')
+ p.add_argument('-e', '--enable', default=True, dest='enable', action='store_true', help='Enable histograms on specified device')
+ p.add_argument('-d', '--disable', dest='enable', action='store_false', help='Disable histograms on specified device')
+ p.add_argument('name', help='bdev name')
+ p.set_defaults(func=bdev_enable_histogram)
+
+ def bdev_get_histogram(args):
+ print_dict(rpc.bdev.bdev_get_histogram(args.client, name=args.name))
+
+ p = subparsers.add_parser('bdev_get_histogram', aliases=['get_bdev_histogram'],
+ help='Get histogram for specified bdev')
+ p.add_argument('name', help='bdev name')
+ p.set_defaults(func=bdev_get_histogram)
+
+ def bdev_set_qd_sampling_period(args):
+ rpc.bdev.bdev_set_qd_sampling_period(args.client,
+ name=args.name,
+ period=args.period)
+
+ p = subparsers.add_parser('bdev_set_qd_sampling_period', aliases=['set_bdev_qd_sampling_period'],
+ help="Enable or disable tracking of a bdev's queue depth.")
+ p.add_argument('name', help='Blockdev name. Example: Malloc0')
+ p.add_argument('period', help='Period with which to poll the block device queue depth in microseconds.'
+ ' If set to 0, polling will be disabled.',
+ type=int)
+ p.set_defaults(func=bdev_set_qd_sampling_period)
+
+ def bdev_set_qos_limit(args):
+ rpc.bdev.bdev_set_qos_limit(args.client,
+ name=args.name,
+ rw_ios_per_sec=args.rw_ios_per_sec,
+ rw_mbytes_per_sec=args.rw_mbytes_per_sec,
+ r_mbytes_per_sec=args.r_mbytes_per_sec,
+ w_mbytes_per_sec=args.w_mbytes_per_sec)
+
+ p = subparsers.add_parser('bdev_set_qos_limit', aliases=['set_bdev_qos_limit'],
+ help='Set QoS rate limit on a blockdev')
+ p.add_argument('name', help='Blockdev name to set QoS. Example: Malloc0')
+ p.add_argument('--rw_ios_per_sec',
+ help='R/W IOs per second limit (>=10000, example: 20000). 0 means unlimited.',
+ type=int, required=False)
+ p.add_argument('--rw_mbytes_per_sec',
+ help="R/W megabytes per second limit (>=10, example: 100). 0 means unlimited.",
+ type=int, required=False)
+ p.add_argument('--r_mbytes_per_sec',
+ help="Read megabytes per second limit (>=10, example: 100). 0 means unlimited.",
+ type=int, required=False)
+ p.add_argument('--w_mbytes_per_sec',
+ help="Write megabytes per second limit (>=10, example: 100). 0 means unlimited.",
+ type=int, required=False)
+ p.set_defaults(func=bdev_set_qos_limit)
+
+ def bdev_error_inject_error(args):
+ rpc.bdev.bdev_error_inject_error(args.client,
+ name=args.name,
+ io_type=args.io_type,
+ error_type=args.error_type,
+ num=args.num)
+
+ p = subparsers.add_parser('bdev_error_inject_error', aliases=['bdev_inject_error'],
+ help='bdev inject error')
+ p.add_argument('name', help="""the name of the error injection bdev""")
+ p.add_argument('io_type', help="""io_type: 'clear' 'read' 'write' 'unmap' 'flush' 'all'""")
+ p.add_argument('error_type', help="""error_type: 'failure' 'pending'""")
+ p.add_argument(
+ '-n', '--num', help='the number of commands you want to fail', type=int, default=1)
+ p.set_defaults(func=bdev_error_inject_error)
+
+ def bdev_nvme_apply_firmware(args):
+ print_dict(rpc.bdev.bdev_nvme_apply_firmware(args.client,
+ bdev_name=args.bdev_name,
+ filename=args.filename))
+
+ p = subparsers.add_parser('bdev_nvme_apply_firmware', aliases=['apply_firmware'],
+ help='Download and commit firmware to NVMe device')
+ p.add_argument('filename', help='filename of the firmware to download')
+ p.add_argument('bdev_name', help='name of the NVMe device')
+ p.set_defaults(func=bdev_nvme_apply_firmware)
+
+ # iSCSI
+ def iscsi_set_options(args):
+ rpc.iscsi.iscsi_set_options(
+ args.client,
+ auth_file=args.auth_file,
+ node_base=args.node_base,
+ nop_timeout=args.nop_timeout,
+ nop_in_interval=args.nop_in_interval,
+ disable_chap=args.disable_chap,
+ require_chap=args.require_chap,
+ mutual_chap=args.mutual_chap,
+ chap_group=args.chap_group,
+ max_sessions=args.max_sessions,
+ max_queue_depth=args.max_queue_depth,
+ max_connections_per_session=args.max_connections_per_session,
+ default_time2wait=args.default_time2wait,
+ default_time2retain=args.default_time2retain,
+ first_burst_length=args.first_burst_length,
+ immediate_data=args.immediate_data,
+ error_recovery_level=args.error_recovery_level,
+ allow_duplicated_isid=args.allow_duplicated_isid)
+
+ p = subparsers.add_parser('iscsi_set_options', aliases=['set_iscsi_options'],
+ help="""Set options of iSCSI subsystem""")
+ p.add_argument('-f', '--auth-file', help='Path to CHAP shared secret file')
+ p.add_argument('-b', '--node-base', help='Prefix of the name of iSCSI target node')
+ p.add_argument('-o', '--nop-timeout', help='Timeout in seconds to nop-in request to the initiator', type=int)
+ p.add_argument('-n', '--nop-in-interval', help='Time interval in secs between nop-in requests by the target', type=int)
+ p.add_argument('-d', '--disable-chap', help="""CHAP for discovery session should be disabled.
+ *** Mutually exclusive with --require-chap""", action='store_true')
+ p.add_argument('-r', '--require-chap', help="""CHAP for discovery session should be required.
+ *** Mutually exclusive with --disable-chap""", action='store_true')
+ p.add_argument('-m', '--mutual-chap', help='CHAP for discovery session should be mutual', action='store_true')
+ p.add_argument('-g', '--chap-group', help="""Authentication group ID for discovery session.
+ *** Authentication group must be precreated ***""", type=int)
+ p.add_argument('-a', '--max-sessions', help='Maximum number of sessions in the host.', type=int)
+ p.add_argument('-q', '--max-queue-depth', help='Max number of outstanding I/Os per queue.', type=int)
+ p.add_argument('-c', '--max-connections-per-session', help='Negotiated parameter, MaxConnections.', type=int)
+ p.add_argument('-w', '--default-time2wait', help='Negotiated parameter, DefaultTime2Wait.', type=int)
+ p.add_argument('-v', '--default-time2retain', help='Negotiated parameter, DefaultTime2Retain.', type=int)
+ p.add_argument('-s', '--first-burst-length', help='Negotiated parameter, FirstBurstLength.', type=int)
+ p.add_argument('-i', '--immediate-data', help='Negotiated parameter, ImmediateData.', action='store_true')
+ p.add_argument('-l', '--error-recovery-level', help='Negotiated parameter, ErrorRecoveryLevel', type=int)
+ p.add_argument('-p', '--allow-duplicated-isid', help='Allow duplicated initiator session ID.', action='store_true')
+ p.set_defaults(func=iscsi_set_options)
+
+ def iscsi_set_discovery_auth(args):
+ rpc.iscsi.iscsi_set_discovery_auth(
+ args.client,
+ disable_chap=args.disable_chap,
+ require_chap=args.require_chap,
+ mutual_chap=args.mutual_chap,
+ chap_group=args.chap_group)
+
+ p = subparsers.add_parser('iscsi_set_discovery_auth', aliases=['set_iscsi_discovery_auth'],
+ help="""Set CHAP authentication for discovery session.""")
+ p.add_argument('-d', '--disable-chap', help="""CHAP for discovery session should be disabled.
+ *** Mutually exclusive with --require-chap""", action='store_true')
+ p.add_argument('-r', '--require-chap', help="""CHAP for discovery session should be required.
+ *** Mutually exclusive with --disable-chap""", action='store_true')
+ p.add_argument('-m', '--mutual-chap', help='CHAP for discovery session should be mutual', action='store_true')
+ p.add_argument('-g', '--chap-group', help="""Authentication group ID for discovery session.
+ *** Authentication group must be precreated ***""", type=int)
+ p.set_defaults(func=iscsi_set_discovery_auth)
+
+ def iscsi_create_auth_group(args):
+ secrets = None
+ if args.secrets:
+ secrets = [dict(u.split(":") for u in a.split(" ")) for a in args.secrets.split(",")]
+
+ rpc.iscsi.iscsi_create_auth_group(args.client, tag=args.tag, secrets=secrets)
+
+ p = subparsers.add_parser('iscsi_create_auth_group', aliases=['add_iscsi_auth_group'],
+ help='Create authentication group for CHAP authentication.')
+ p.add_argument('tag', help='Authentication group tag (unique, integer > 0).', type=int)
+ p.add_argument('-c', '--secrets', help="""Comma-separated list of CHAP secrets
+<user:user_name secret:chap_secret muser:mutual_user_name msecret:mutual_chap_secret> enclosed in quotes.
+Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 msecret:ms2'""", required=False)
+ p.set_defaults(func=iscsi_create_auth_group)
+
+ def iscsi_delete_auth_group(args):
+ rpc.iscsi.iscsi_delete_auth_group(args.client, tag=args.tag)
+
+ p = subparsers.add_parser('iscsi_delete_auth_group', aliases=['delete_iscsi_auth_group'],
+ help='Delete an authentication group.')
+ p.add_argument('tag', help='Authentication group tag', type=int)
+ p.set_defaults(func=iscsi_delete_auth_group)
+
+ def iscsi_auth_group_add_secret(args):
+ rpc.iscsi.iscsi_auth_group_add_secret(
+ args.client,
+ tag=args.tag,
+ user=args.user,
+ secret=args.secret,
+ muser=args.muser,
+ msecret=args.msecret)
+
+ p = subparsers.add_parser('iscsi_auth_group_add_secret', aliases=['add_secret_to_iscsi_auth_group'],
+ help='Add a secret to an authentication group.')
+ p.add_argument('tag', help='Authentication group tag', type=int)
+ p.add_argument('-u', '--user', help='User name for one-way CHAP authentication', required=True)
+ p.add_argument('-s', '--secret', help='Secret for one-way CHAP authentication', required=True)
+ p.add_argument('-m', '--muser', help='User name for mutual CHAP authentication')
+ p.add_argument('-r', '--msecret', help='Secret for mutual CHAP authentication')
+ p.set_defaults(func=iscsi_auth_group_add_secret)
+
+ def iscsi_auth_group_remove_secret(args):
+ rpc.iscsi.iscsi_auth_group_remove_secret(args.client, tag=args.tag, user=args.user)
+
+ p = subparsers.add_parser('iscsi_auth_group_remove_secret', aliases=['delete_secret_from_iscsi_auth_group'],
+ help='Remove a secret from an authentication group.')
+ p.add_argument('tag', help='Authentication group tag', type=int)
+ p.add_argument('-u', '--user', help='User name for one-way CHAP authentication', required=True)
+ p.set_defaults(func=iscsi_auth_group_remove_secret)
+
+ def iscsi_get_auth_groups(args):
+ print_dict(rpc.iscsi.iscsi_get_auth_groups(args.client))
+
+ p = subparsers.add_parser('iscsi_get_auth_groups', aliases=['get_iscsi_auth_groups'],
+ help='Display current authentication group configuration')
+ p.set_defaults(func=iscsi_get_auth_groups)
+
+ def iscsi_get_portal_groups(args):
+ print_dict(rpc.iscsi.iscsi_get_portal_groups(args.client))
+
+ p = subparsers.add_parser(
+ 'iscsi_get_portal_groups', aliases=['get_portal_groups'],
+ help='Display current portal group configuration')
+ p.set_defaults(func=iscsi_get_portal_groups)
+
+ def iscsi_get_initiator_groups(args):
+ print_dict(rpc.iscsi.iscsi_get_initiator_groups(args.client))
+
+ p = subparsers.add_parser('iscsi_get_initiator_groups',
+ aliases=['get_initiator_groups'],
+ help='Display current initiator group configuration')
+ p.set_defaults(func=iscsi_get_initiator_groups)
+
+ def iscsi_get_target_nodes(args):
+ print_dict(rpc.iscsi.iscsi_get_target_nodes(args.client))
+
+ p = subparsers.add_parser('iscsi_get_target_nodes', aliases=['get_target_nodes'],
+ help='Display target nodes')
+ p.set_defaults(func=iscsi_get_target_nodes)
+
+ def iscsi_create_target_node(args):
+ luns = []
+ for u in args.bdev_name_id_pairs.strip().split(" "):
+ bdev_name, lun_id = u.split(":")
+ luns.append({"bdev_name": bdev_name, "lun_id": int(lun_id)})
+
+ pg_ig_maps = []
+ for u in args.pg_ig_mappings.strip().split(" "):
+ pg, ig = u.split(":")
+ pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
+
+ rpc.iscsi.iscsi_create_target_node(
+ args.client,
+ luns=luns,
+ pg_ig_maps=pg_ig_maps,
+ name=args.name,
+ alias_name=args.alias_name,
+ queue_depth=args.queue_depth,
+ chap_group=args.chap_group,
+ disable_chap=args.disable_chap,
+ require_chap=args.require_chap,
+ mutual_chap=args.mutual_chap,
+ header_digest=args.header_digest,
+ data_digest=args.data_digest)
+
+ p = subparsers.add_parser('iscsi_create_target_node', aliases=['construct_target_node'],
+ help='Add a target node')
+ p.add_argument('name', help='Target node name (ASCII)')
+ p.add_argument('alias_name', help='Target node alias name (ASCII)')
+ p.add_argument('bdev_name_id_pairs', help="""Whitespace-separated list of <bdev name:LUN ID> pairs enclosed
+ in quotes. Format: 'bdev_name0:id0 bdev_name1:id1' etc
+ Example: 'Malloc0:0 Malloc1:1 Malloc5:2'
+ *** The bdevs must pre-exist ***
+ *** LUN0 (id = 0) is required ***
+ *** bdevs names cannot contain space or colon characters ***""")
+ p.add_argument('pg_ig_mappings', help="""List of (Portal_Group_Tag:Initiator_Group_Tag) mappings
+ Whitespace separated, quoted, mapping defined with colon
+ separated list of "tags" (int > 0)
+ Example: '1:1 2:2 2:1'
+ *** The Portal/Initiator Groups must be precreated ***""")
+ p.add_argument('queue_depth', help='Desired target queue depth', type=int)
+ p.add_argument('-g', '--chap-group', help="""Authentication group ID for this target node.
+ *** Authentication group must be precreated ***""", type=int, default=0)
+ p.add_argument('-d', '--disable-chap', help="""CHAP authentication should be disabled for this target node.
+ *** Mutually exclusive with --require-chap ***""", action='store_true')
+ p.add_argument('-r', '--require-chap', help="""CHAP authentication should be required for this target node.
+ *** Mutually exclusive with --disable-chap ***""", action='store_true')
+ p.add_argument(
+ '-m', '--mutual-chap', help='CHAP authentication should be mutual/bidirectional.', action='store_true')
+ p.add_argument('-H', '--header-digest',
+ help='Header Digest should be required for this target node.', action='store_true')
+ p.add_argument('-D', '--data-digest',
+ help='Data Digest should be required for this target node.', action='store_true')
+ p.set_defaults(func=iscsi_create_target_node)
+
+ def iscsi_target_node_add_lun(args):
+ rpc.iscsi.iscsi_target_node_add_lun(
+ args.client,
+ name=args.name,
+ bdev_name=args.bdev_name,
+ lun_id=args.lun_id)
+
+ p = subparsers.add_parser('iscsi_target_node_add_lun', aliases=['target_node_add_lun'],
+ help='Add LUN to the target node')
+ p.add_argument('name', help='Target node name (ASCII)')
+ p.add_argument('bdev_name', help="""bdev name enclosed in quotes.
+ *** bdev name cannot contain space or colon characters ***""")
+ p.add_argument('-i', dest='lun_id', help="""LUN ID (integer >= 0)
+ *** If LUN ID is omitted or -1, the lowest free one is assigned ***""", type=int, required=False)
+ p.set_defaults(func=iscsi_target_node_add_lun)
+
+ def iscsi_target_node_set_auth(args):
+ rpc.iscsi.iscsi_target_node_set_auth(
+ args.client,
+ name=args.name,
+ chap_group=args.chap_group,
+ disable_chap=args.disable_chap,
+ require_chap=args.require_chap,
+ mutual_chap=args.mutual_chap)
+
+ p = subparsers.add_parser('iscsi_target_node_set_auth', aliases=['set_iscsi_target_node_auth'],
+ help='Set CHAP authentication for the target node')
+ p.add_argument('name', help='Target node name (ASCII)')
+ p.add_argument('-g', '--chap-group', help="""Authentication group ID for this target node.
+ *** Authentication group must be precreated ***""", type=int, default=0)
+ p.add_argument('-d', '--disable-chap', help="""CHAP authentication should be disabled for this target node.
+ *** Mutually exclusive with --require-chap ***""", action='store_true')
+ p.add_argument('-r', '--require-chap', help="""CHAP authentication should be required for this target node.
+ *** Mutually exclusive with --disable-chap ***""", action='store_true')
+ p.add_argument('-m', '--mutual-chap', help='CHAP authentication should be mutual/bidirectional.',
+ action='store_true')
+ p.set_defaults(func=iscsi_target_node_set_auth)
+
+ def iscsi_target_node_add_pg_ig_maps(args):
+ pg_ig_maps = []
+ for u in args.pg_ig_mappings.strip().split(" "):
+ pg, ig = u.split(":")
+ pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
+ rpc.iscsi.iscsi_target_node_add_pg_ig_maps(
+ args.client,
+ pg_ig_maps=pg_ig_maps,
+ name=args.name)
+
+ p = subparsers.add_parser('iscsi_target_node_add_pg_ig_maps',
+ aliases=['add_pg_ig_maps'],
+ help='Add PG-IG maps to the target node')
+ p.add_argument('name', help='Target node name (ASCII)')
+ p.add_argument('pg_ig_mappings', help="""List of (Portal_Group_Tag:Initiator_Group_Tag) mappings
+ Whitespace separated, quoted, mapping defined with colon
+ separated list of "tags" (int > 0)
+ Example: '1:1 2:2 2:1'
+ *** The Portal/Initiator Groups must be precreated ***""")
+ p.set_defaults(func=iscsi_target_node_add_pg_ig_maps)
+
+ def iscsi_target_node_remove_pg_ig_maps(args):
+ pg_ig_maps = []
+ for u in args.pg_ig_mappings.strip().split(" "):
+ pg, ig = u.split(":")
+ pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
+ rpc.iscsi.iscsi_target_node_remove_pg_ig_maps(
+ args.client, pg_ig_maps=pg_ig_maps, name=args.name)
+
+ p = subparsers.add_parser('iscsi_target_node_remove_pg_ig_maps',
+ aliases=['delete_pg_ig_maps'],
+ help='Delete PG-IG maps from the target node')
+ p.add_argument('name', help='Target node name (ASCII)')
+ p.add_argument('pg_ig_mappings', help="""List of (Portal_Group_Tag:Initiator_Group_Tag) mappings
+ Whitespace separated, quoted, mapping defined with colon
+ separated list of "tags" (int > 0)
+ Example: '1:1 2:2 2:1'
+ *** The Portal/Initiator Groups must be precreated ***""")
+ p.set_defaults(func=iscsi_target_node_remove_pg_ig_maps)
+
+ def iscsi_create_portal_group(args):
+ portals = []
+ for p in args.portal_list.strip().split(' '):
+ ip, separator, port_cpumask = p.rpartition(':')
+ split_port_cpumask = port_cpumask.split('@')
+ if len(split_port_cpumask) == 1:
+ port = port_cpumask
+ portals.append({'host': ip, 'port': port})
+ else:
+ port = split_port_cpumask[0]
+ cpumask = split_port_cpumask[1]
+ portals.append({'host': ip, 'port': port})
+ print("WARNING: Specifying a portal group with a CPU mask is no longer supported. Ignoring it.")
+ rpc.iscsi.iscsi_create_portal_group(
+ args.client,
+ portals=portals,
+ tag=args.tag)
+
+ p = subparsers.add_parser('iscsi_create_portal_group', aliases=['add_portal_group'],
+ help='Add a portal group')
+ p.add_argument(
+ 'tag', help='Portal group tag (unique, integer > 0)', type=int)
+ p.add_argument('portal_list', help="""List of portals in host:port format, separated by whitespace
+ Example: '192.168.100.100:3260 192.168.100.100:3261 192.168.100.100:3262""")
+ p.set_defaults(func=iscsi_create_portal_group)
+
+ def iscsi_create_initiator_group(args):
+ initiators = []
+ netmasks = []
+ for i in args.initiator_list.strip().split(' '):
+ initiators.append(i)
+ for n in args.netmask_list.strip().split(' '):
+ netmasks.append(n)
+ rpc.iscsi.iscsi_create_initiator_group(
+ args.client,
+ tag=args.tag,
+ initiators=initiators,
+ netmasks=netmasks)
+
+ p = subparsers.add_parser('iscsi_create_initiator_group', aliases=['add_initiator_group'],
+ help='Add an initiator group')
+ p.add_argument(
+ 'tag', help='Initiator group tag (unique, integer > 0)', type=int)
+ p.add_argument('initiator_list', help="""Whitespace-separated list of initiator hostnames or IP addresses,
+ enclosed in quotes. Example: 'ANY' or '127.0.0.1 192.168.200.100'""")
+ p.add_argument('netmask_list', help="""Whitespace-separated list of initiator netmasks enclosed in quotes.
+ Example: '255.255.0.0 255.248.0.0' etc""")
+ p.set_defaults(func=iscsi_create_initiator_group)
+
+ def iscsi_initiator_group_add_initiators(args):
+ initiators = None
+ netmasks = None
+ if args.initiator_list:
+ initiators = []
+ for i in args.initiator_list.strip().split(' '):
+ initiators.append(i)
+ if args.netmask_list:
+ netmasks = []
+ for n in args.netmask_list.strip().split(' '):
+ netmasks.append(n)
+ rpc.iscsi.iscsi_initiator_group_add_initiators(
+ args.client,
+ tag=args.tag,
+ initiators=initiators,
+ netmasks=netmasks)
+
+ p = subparsers.add_parser('iscsi_initiator_group_add_initiators',
+ aliases=['add_initiators_to_initiator_group'],
+ help='Add initiators to an existing initiator group')
+ p.add_argument(
+ 'tag', help='Initiator group tag (unique, integer > 0)', type=int)
+ p.add_argument('-n', dest='initiator_list', help="""Whitespace-separated list of initiator hostnames or IP addresses,
+ enclosed in quotes. This parameter can be omitted. Example: 'ANY' or '127.0.0.1 192.168.200.100'""", required=False)
+ p.add_argument('-m', dest='netmask_list', help="""Whitespace-separated list of initiator netmasks enclosed in quotes.
+ This parameter can be omitted. Example: '255.255.0.0 255.248.0.0' etc""", required=False)
+ p.set_defaults(func=iscsi_initiator_group_add_initiators)
+
+ def iscsi_initiator_group_remove_initiators(args):
+ initiators = None
+ netmasks = None
+ if args.initiator_list:
+ initiators = []
+ for i in args.initiator_list.strip().split(' '):
+ initiators.append(i)
+ if args.netmask_list:
+ netmasks = []
+ for n in args.netmask_list.strip().split(' '):
+ netmasks.append(n)
+ rpc.iscsi.iscsi_initiator_group_remove_initiators(
+ args.client,
+ tag=args.tag,
+ initiators=initiators,
+ netmasks=netmasks)
+
+ p = subparsers.add_parser('iscsi_initiator_group_remove_initiators',
+ aliases=['delete_initiators_from_initiator_group'],
+ help='Delete initiators from an existing initiator group')
+ p.add_argument(
+ 'tag', help='Initiator group tag (unique, integer > 0)', type=int)
+ p.add_argument('-n', dest='initiator_list', help="""Whitespace-separated list of initiator hostnames or IP addresses,
+ enclosed in quotes. This parameter can be omitted. Example: 'ANY' or '127.0.0.1 192.168.200.100'""", required=False)
+ p.add_argument('-m', dest='netmask_list', help="""Whitespace-separated list of initiator netmasks enclosed in quotes.
+ This parameter can be omitted. Example: '255.255.0.0 255.248.0.0' etc""", required=False)
+ p.set_defaults(func=iscsi_initiator_group_remove_initiators)
+
+ def iscsi_delete_target_node(args):
+ rpc.iscsi.iscsi_delete_target_node(
+ args.client, target_node_name=args.target_node_name)
+
+ p = subparsers.add_parser('iscsi_delete_target_node', aliases=['delete_target_node'],
+ help='Delete a target node')
+ p.add_argument('target_node_name',
+ help='Target node name to be deleted. Example: iqn.2016-06.io.spdk:disk1.')
+ p.set_defaults(func=iscsi_delete_target_node)
+
+ def iscsi_delete_portal_group(args):
+ rpc.iscsi.iscsi_delete_portal_group(args.client, tag=args.tag)
+
+ p = subparsers.add_parser('iscsi_delete_portal_group',
+ aliases=['delete_portal_group'],
+ help='Delete a portal group')
+ p.add_argument(
+ 'tag', help='Portal group tag (unique, integer > 0)', type=int)
+ p.set_defaults(func=iscsi_delete_portal_group)
+
+ def iscsi_delete_initiator_group(args):
+ rpc.iscsi.iscsi_delete_initiator_group(args.client, tag=args.tag)
+
+ p = subparsers.add_parser('iscsi_delete_initiator_group',
+ aliases=['delete_initiator_group'],
+ help='Delete an initiator group')
+ p.add_argument(
+ 'tag', help='Initiator group tag (unique, integer > 0)', type=int)
+ p.set_defaults(func=iscsi_delete_initiator_group)
+
+ def iscsi_portal_group_set_auth(args):
+ rpc.iscsi.iscsi_portal_group_set_auth(
+ args.client,
+ tag=args.tag,
+ chap_group=args.chap_group,
+ disable_chap=args.disable_chap,
+ require_chap=args.require_chap,
+ mutual_chap=args.mutual_chap)
+
+ p = subparsers.add_parser('iscsi_portal_group_set_auth',
+ help='Set CHAP authentication for discovery sessions specific for the portal group')
+ p.add_argument('tag', help='Portal group tag (unique, integer > 0)', type=int)
+ p.add_argument('-g', '--chap-group', help="""Authentication group ID for this portal group.
+ *** Authentication group must be precreated ***""", type=int, default=0)
+ p.add_argument('-d', '--disable-chap', help="""CHAP authentication should be disabled for this portal group.
+ *** Mutually exclusive with --require-chap ***""", action='store_true')
+ p.add_argument('-r', '--require-chap', help="""CHAP authentication should be required for this portal group.
+ *** Mutually exclusive with --disable-chap ***""", action='store_true')
+ p.add_argument('-m', '--mutual-chap', help='CHAP authentication should be mutual/bidirectional.',
+ action='store_true')
+ p.set_defaults(func=iscsi_portal_group_set_auth)
+
+ def iscsi_get_connections(args):
+ print_dict(rpc.iscsi.iscsi_get_connections(args.client))
+
+ p = subparsers.add_parser('iscsi_get_connections', aliases=['get_iscsi_connections'],
+ help='Display iSCSI connections')
+ p.set_defaults(func=iscsi_get_connections)
+
+ def iscsi_get_options(args):
+ print_dict(rpc.iscsi.iscsi_get_options(args.client))
+
+ p = subparsers.add_parser('iscsi_get_options', aliases=['get_iscsi_global_params'],
+ help='Display iSCSI global parameters')
+ p.set_defaults(func=iscsi_get_options)
+
+ def scsi_get_devices(args):
+ print_dict(rpc.iscsi.scsi_get_devices(args.client))
+
+ p = subparsers.add_parser('scsi_get_devices', aliases=['get_scsi_devices'],
+ help='Display SCSI devices')
+ p.set_defaults(func=scsi_get_devices)
+
+ # trace
+ def trace_enable_tpoint_group(args):
+ rpc.trace.trace_enable_tpoint_group(args.client, name=args.name)
+
+ p = subparsers.add_parser('trace_enable_tpoint_group', aliases=['enable_tpoint_group'],
+ help='enable trace on a specific tpoint group')
+ p.add_argument(
+ 'name', help="""trace group name we want to enable in tpoint_group_mask.
+ (for example "bdev" for bdev trace group, "all" for all trace groups).""")
+ p.set_defaults(func=trace_enable_tpoint_group)
+
+ def trace_disable_tpoint_group(args):
+ rpc.trace.trace_disable_tpoint_group(args.client, name=args.name)
+
+ p = subparsers.add_parser('trace_disable_tpoint_group', aliases=['disable_tpoint_group'],
+ help='disable trace on a specific tpoint group')
+ p.add_argument(
+ 'name', help="""trace group name we want to disable in tpoint_group_mask.
+ (for example "bdev" for bdev trace group, "all" for all trace groups).""")
+ p.set_defaults(func=trace_disable_tpoint_group)
+
+ def trace_get_tpoint_group_mask(args):
+ print_dict(rpc.trace.trace_get_tpoint_group_mask(args.client))
+
+ p = subparsers.add_parser('trace_get_tpoint_group_mask', aliases=['get_tpoint_group_mask'],
+ help='get trace point group mask')
+ p.set_defaults(func=trace_get_tpoint_group_mask)
+
+ # log
+ def log_set_flag(args):
+ rpc.log.log_set_flag(args.client, flag=args.flag)
+
+ p = subparsers.add_parser('log_set_flag', help='set log flag', aliases=['set_log_flag'])
+ p.add_argument(
+ 'flag', help='log flag we want to set. (for example "nvme").')
+ p.set_defaults(func=log_set_flag)
+
+ def log_clear_flag(args):
+ rpc.log.log_clear_flag(args.client, flag=args.flag)
+
+ p = subparsers.add_parser('log_clear_flag', help='clear log flag', aliases=['clear_log_flag'])
+ p.add_argument(
+ 'flag', help='log flag we want to clear. (for example "nvme").')
+ p.set_defaults(func=log_clear_flag)
+
+ def log_get_flags(args):
+ print_dict(rpc.log.log_get_flags(args.client))
+
+ p = subparsers.add_parser('log_get_flags', help='get log flags', aliases=['get_log_flags'])
+ p.set_defaults(func=log_get_flags)
+
+ def log_set_level(args):
+ rpc.log.log_set_level(args.client, level=args.level)
+
+ p = subparsers.add_parser('log_set_level', aliases=['set_log_level'],
+ help='set log level')
+ p.add_argument('level', help='log level we want to set. (for example "DEBUG").')
+ p.set_defaults(func=log_set_level)
+
+ def log_get_level(args):
+ print_dict(rpc.log.log_get_level(args.client))
+
+ p = subparsers.add_parser('log_get_level', aliases=['get_log_level'],
+ help='get log level')
+ p.set_defaults(func=log_get_level)
+
+ def log_set_print_level(args):
+ rpc.log.log_set_print_level(args.client, level=args.level)
+
+ p = subparsers.add_parser('log_set_print_level', aliases=['set_log_print_level'],
+ help='set log print level')
+ p.add_argument('level', help='log print level we want to set. (for example "DEBUG").')
+ p.set_defaults(func=log_set_print_level)
+
+ def log_get_print_level(args):
+ print_dict(rpc.log.log_get_print_level(args.client))
+
+ p = subparsers.add_parser('log_get_print_level', aliases=['get_log_print_level'],
+ help='get log print level')
+ p.set_defaults(func=log_get_print_level)
+
+ # lvol
+ def bdev_lvol_create_lvstore(args):
+ print_json(rpc.lvol.bdev_lvol_create_lvstore(args.client,
+ bdev_name=args.bdev_name,
+ lvs_name=args.lvs_name,
+ cluster_sz=args.cluster_sz,
+ clear_method=args.clear_method))
+
+ p = subparsers.add_parser('bdev_lvol_create_lvstore', aliases=['construct_lvol_store'],
+ help='Add logical volume store on base bdev')
+ p.add_argument('bdev_name', help='base bdev name')
+ p.add_argument('lvs_name', help='name for lvol store')
+ p.add_argument('-c', '--cluster-sz', help='size of cluster (in bytes)', type=int, required=False)
+ p.add_argument('--clear-method', help="""Change clear method for data region.
+ Available: none, unmap, write_zeroes""", required=False)
+ p.set_defaults(func=bdev_lvol_create_lvstore)
+
+ def bdev_lvol_rename_lvstore(args):
+ rpc.lvol.bdev_lvol_rename_lvstore(args.client,
+ old_name=args.old_name,
+ new_name=args.new_name)
+
+ p = subparsers.add_parser('bdev_lvol_rename_lvstore', aliases=['rename_lvol_store'],
+ help='Change logical volume store name')
+ p.add_argument('old_name', help='old name')
+ p.add_argument('new_name', help='new name')
+ p.set_defaults(func=bdev_lvol_rename_lvstore)
+
+ def bdev_lvol_create(args):
+ print_json(rpc.lvol.bdev_lvol_create(args.client,
+ lvol_name=args.lvol_name,
+ size=args.size * 1024 * 1024,
+ thin_provision=args.thin_provision,
+ clear_method=args.clear_method,
+ uuid=args.uuid,
+ lvs_name=args.lvs_name))
+
+ p = subparsers.add_parser('bdev_lvol_create', aliases=['construct_lvol_bdev'],
+ help='Add a bdev with an logical volume backend')
+ p.add_argument('-u', '--uuid', help='lvol store UUID', required=False)
+ p.add_argument('-l', '--lvs-name', help='lvol store name', required=False)
+ p.add_argument('-t', '--thin-provision', action='store_true', help='create lvol bdev as thin provisioned')
+ p.add_argument('-c', '--clear-method', help="""Change default data clusters clear method.
+ Available: none, unmap, write_zeroes""", required=False)
+ p.add_argument('lvol_name', help='name for this lvol')
+ p.add_argument('size', help='size in MiB for this bdev', type=int)
+ p.set_defaults(func=bdev_lvol_create)
+
+ def bdev_lvol_snapshot(args):
+ print_json(rpc.lvol.bdev_lvol_snapshot(args.client,
+ lvol_name=args.lvol_name,
+ snapshot_name=args.snapshot_name))
+
+ p = subparsers.add_parser('bdev_lvol_snapshot', aliases=['snapshot_lvol_bdev'],
+ help='Create a snapshot of an lvol bdev')
+ p.add_argument('lvol_name', help='lvol bdev name')
+ p.add_argument('snapshot_name', help='lvol snapshot name')
+ p.set_defaults(func=bdev_lvol_snapshot)
+
+ def bdev_lvol_clone(args):
+ print_json(rpc.lvol.bdev_lvol_clone(args.client,
+ snapshot_name=args.snapshot_name,
+ clone_name=args.clone_name))
+
+ p = subparsers.add_parser('bdev_lvol_clone', aliases=['clone_lvol_bdev'],
+ help='Create a clone of an lvol snapshot')
+ p.add_argument('snapshot_name', help='lvol snapshot name')
+ p.add_argument('clone_name', help='lvol clone name')
+ p.set_defaults(func=bdev_lvol_clone)
+
+ def bdev_lvol_rename(args):
+ rpc.lvol.bdev_lvol_rename(args.client,
+ old_name=args.old_name,
+ new_name=args.new_name)
+
+ p = subparsers.add_parser('bdev_lvol_rename', aliases=['rename_lvol_bdev'],
+ help='Change lvol bdev name')
+ p.add_argument('old_name', help='lvol bdev name')
+ p.add_argument('new_name', help='new lvol name')
+ p.set_defaults(func=bdev_lvol_rename)
+
+ def bdev_lvol_inflate(args):
+ rpc.lvol.bdev_lvol_inflate(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_lvol_inflate', aliases=['inflate_lvol_bdev'],
+ help='Make thin provisioned lvol a thick provisioned lvol')
+ p.add_argument('name', help='lvol bdev name')
+ p.set_defaults(func=bdev_lvol_inflate)
+
+ def bdev_lvol_decouple_parent(args):
+ rpc.lvol.bdev_lvol_decouple_parent(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_lvol_decouple_parent', aliases=['decouple_parent_lvol_bdev'],
+ help='Decouple parent of lvol')
+ p.add_argument('name', help='lvol bdev name')
+ p.set_defaults(func=bdev_lvol_decouple_parent)
+
+ def bdev_lvol_resize(args):
+ rpc.lvol.bdev_lvol_resize(args.client,
+ name=args.name,
+ size=args.size * 1024 * 1024)
+
+ p = subparsers.add_parser('bdev_lvol_resize', aliases=['resize_lvol_bdev'],
+ help='Resize existing lvol bdev')
+ p.add_argument('name', help='lvol bdev name')
+ p.add_argument('size', help='new size in MiB for this bdev', type=int)
+ p.set_defaults(func=bdev_lvol_resize)
+
+ def bdev_lvol_set_read_only(args):
+ rpc.lvol.bdev_lvol_set_read_only(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_lvol_set_read_only', aliases=['set_read_only_lvol_bdev'],
+ help='Mark lvol bdev as read only')
+ p.add_argument('name', help='lvol bdev name')
+ p.set_defaults(func=bdev_lvol_set_read_only)
+
+ def bdev_lvol_delete(args):
+ rpc.lvol.bdev_lvol_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_lvol_delete', aliases=['destroy_lvol_bdev'],
+ help='Destroy a logical volume')
+ p.add_argument('name', help='lvol bdev name')
+ p.set_defaults(func=bdev_lvol_delete)
+
+ def bdev_lvol_delete_lvstore(args):
+ rpc.lvol.bdev_lvol_delete_lvstore(args.client,
+ uuid=args.uuid,
+ lvs_name=args.lvs_name)
+
+ p = subparsers.add_parser('bdev_lvol_delete_lvstore', aliases=['destroy_lvol_store'],
+ help='Destroy an logical volume store')
+ p.add_argument('-u', '--uuid', help='lvol store UUID', required=False)
+ p.add_argument('-l', '--lvs-name', help='lvol store name', required=False)
+ p.set_defaults(func=bdev_lvol_delete_lvstore)
+
+ def bdev_lvol_get_lvstores(args):
+ print_dict(rpc.lvol.bdev_lvol_get_lvstores(args.client,
+ uuid=args.uuid,
+ lvs_name=args.lvs_name))
+
+ p = subparsers.add_parser('bdev_lvol_get_lvstores', aliases=['get_lvol_stores'],
+ help='Display current logical volume store list')
+ p.add_argument('-u', '--uuid', help='lvol store UUID', required=False)
+ p.add_argument('-l', '--lvs-name', help='lvol store name', required=False)
+ p.set_defaults(func=bdev_lvol_get_lvstores)
+
+ def bdev_raid_get_bdevs(args):
+ print_array(rpc.bdev.bdev_raid_get_bdevs(args.client,
+ category=args.category))
+
+ p = subparsers.add_parser('bdev_raid_get_bdevs', aliases=['get_raid_bdevs'],
+ help="""This is used to list all the raid bdev names based on the input category
+ requested. Category should be one of 'all', 'online', 'configuring' or 'offline'. 'all' means all the raid bdevs whether
+ they are online or configuring or offline. 'online' is the raid bdev which is registered with bdev layer. 'configuring'
+ is the raid bdev which does not have full configuration discovered yet. 'offline' is the raid bdev which is not registered
+ with bdev as of now and it has encountered any error or user has requested to offline the raid bdev""")
+ p.add_argument('category', help='all or online or configuring or offline')
+ p.set_defaults(func=bdev_raid_get_bdevs)
+
+ def bdev_raid_create(args):
+ base_bdevs = []
+ for u in args.base_bdevs.strip().split(" "):
+ base_bdevs.append(u)
+
+ rpc.bdev.bdev_raid_create(args.client,
+ name=args.name,
+ strip_size=args.strip_size,
+ strip_size_kb=args.strip_size_kb,
+ raid_level=args.raid_level,
+ base_bdevs=base_bdevs)
+ p = subparsers.add_parser('bdev_raid_create', aliases=['construct_raid_bdev'],
+ help='Create new raid bdev')
+ p.add_argument('-n', '--name', help='raid bdev name', required=True)
+ p.add_argument('-s', '--strip-size', help='strip size in KB (deprecated)', type=int)
+ p.add_argument('-z', '--strip-size_kb', help='strip size in KB', type=int)
+ p.add_argument('-r', '--raid-level', help='raid level, only raid level 0 is supported', required=True)
+ p.add_argument('-b', '--base-bdevs', help='base bdevs name, whitespace separated list in quotes', required=True)
+ p.set_defaults(func=bdev_raid_create)
+
+ def bdev_raid_delete(args):
+ rpc.bdev.bdev_raid_delete(args.client,
+ name=args.name)
+ p = subparsers.add_parser('bdev_raid_delete', aliases=['destroy_raid_bdev'],
+ help='Delete existing raid bdev')
+ p.add_argument('name', help='raid bdev name')
+ p.set_defaults(func=bdev_raid_delete)
+
+ # split
+ def bdev_split_create(args):
+ print_array(rpc.bdev.bdev_split_create(args.client,
+ base_bdev=args.base_bdev,
+ split_count=args.split_count,
+ split_size_mb=args.split_size_mb))
+
+ p = subparsers.add_parser('bdev_split_create', aliases=['construct_split_vbdev'],
+ help="""Add given disk name to split config. If bdev with base_name
+ name exist the split bdevs will be created right away, if not split bdevs will be created when base bdev became
+ available (during examination process).""")
+ p.add_argument('base_bdev', help='base bdev name')
+ p.add_argument('-s', '--split-size-mb', help='size in MiB for each bdev', type=int, default=0)
+ p.add_argument('split_count', help="""Optional - number of split bdevs to create. Total size * split_count must not
+ exceed the base bdev size.""", type=int)
+ p.set_defaults(func=bdev_split_create)
+
+ def bdev_split_delete(args):
+ rpc.bdev.bdev_split_delete(args.client,
+ base_bdev=args.base_bdev)
+
+ p = subparsers.add_parser('bdev_split_delete', aliases=['destruct_split_vbdev'],
+ help="""Delete split config with all created splits.""")
+ p.add_argument('base_bdev', help='base bdev name')
+ p.set_defaults(func=bdev_split_delete)
+
+ # ftl
+ ftl_valid_limits = ('crit', 'high', 'low', 'start')
+
+ def bdev_ftl_create(args):
+ def parse_limits(limits, arg_dict, key_suffix=''):
+ for limit in limits.split(','):
+ key, value = limit.split(':', 1)
+ if key in ftl_valid_limits:
+ arg_dict['limit_' + key + key_suffix] = int(value)
+ else:
+ raise ValueError('Limit {} is not supported'.format(key))
+
+ arg_limits = {}
+ if args.limit_threshold:
+ parse_limits(args.limit_threshold, arg_limits, '_threshold')
+
+ if args.limit:
+ parse_limits(args.limit, arg_limits)
+
+ print_dict(rpc.bdev.bdev_ftl_create(args.client,
+ name=args.name,
+ base_bdev=args.base_bdev,
+ uuid=args.uuid,
+ cache=args.cache,
+ allow_open_bands=args.allow_open_bands,
+ overprovisioning=args.overprovisioning,
+ l2p_path=args.l2p_path,
+ use_append=args.use_append,
+ **arg_limits))
+
+ p = subparsers.add_parser('bdev_ftl_create', aliases=['construct_ftl_bdev'], help='Add FTL bdev')
+ p.add_argument('-b', '--name', help="Name of the bdev", required=True)
+ p.add_argument('-d', '--base_bdev', help='Name of zoned bdev used as underlying device',
+ required=True)
+ p.add_argument('-u', '--uuid', help='UUID of restored bdev (not applicable when creating new '
+ 'instance): e.g. b286d19a-0059-4709-abcd-9f7732b1567d (optional)')
+ p.add_argument('-c', '--cache', help='Name of the bdev to be used as a write buffer cache (optional)')
+ p.add_argument('-o', '--allow_open_bands', help='Restoring after dirty shutdown without cache will'
+ ' result in partial data recovery, instead of error', action='store_true')
+ p.add_argument('--overprovisioning', help='Percentage of device used for relocation, not exposed'
+ ' to user (optional)', type=int)
+ p.add_argument('--l2p_path', help='Path to persistent memory file or device to store l2p onto, '
+ 'by default l2p is kept in DRAM and is volatile (optional)')
+ p.add_argument('--use_append', help='Use appends instead of writes', action='store_true')
+
+ limits = p.add_argument_group('Defrag limits', 'Configures defrag limits and thresholds for'
+ ' levels ' + str(ftl_valid_limits)[1:-1])
+ limits.add_argument('--limit', help='Percentage of allowed user versus internal writes at given'
+ ' levels, e.g. crit:0,high:20,low:80')
+ limits.add_argument('--limit-threshold', help='Number of free bands triggering a given level of'
+ ' write limiting e.g. crit:1,high:2,low:3,start:4')
+ p.set_defaults(func=bdev_ftl_create)
+
+ def bdev_ftl_delete(args):
+ print_dict(rpc.bdev.bdev_ftl_delete(args.client, name=args.name))
+
+ p = subparsers.add_parser('bdev_ftl_delete', aliases=['delete_ftl_bdev'],
+ help='Delete FTL bdev')
+ p.add_argument('-b', '--name', help="Name of the bdev", required=True)
+ p.set_defaults(func=bdev_ftl_delete)
+
+ # vmd
+ def enable_vmd(args):
+ print_dict(rpc.vmd.enable_vmd(args.client))
+
+ p = subparsers.add_parser('enable_vmd', help='Enable VMD enumeration')
+ p.set_defaults(func=enable_vmd)
+
+ # nbd
+ def nbd_start_disk(args):
+ print(rpc.nbd.nbd_start_disk(args.client,
+ bdev_name=args.bdev_name,
+ nbd_device=args.nbd_device))
+
+ p = subparsers.add_parser('nbd_start_disk', aliases=['start_nbd_disk'],
+ help='Export a bdev as an nbd disk')
+ p.add_argument('bdev_name', help='Blockdev name to be exported. Example: Malloc0.')
+ p.add_argument('nbd_device', help='Nbd device name to be assigned. Example: /dev/nbd0.', nargs='?')
+ p.set_defaults(func=nbd_start_disk)
+
+ def nbd_stop_disk(args):
+ rpc.nbd.nbd_stop_disk(args.client,
+ nbd_device=args.nbd_device)
+
+ p = subparsers.add_parser('nbd_stop_disk', aliases=['stop_nbd_disk'],
+ help='Stop an nbd disk')
+ p.add_argument('nbd_device', help='Nbd device name to be stopped. Example: /dev/nbd0.')
+ p.set_defaults(func=nbd_stop_disk)
+
+ def nbd_get_disks(args):
+ print_dict(rpc.nbd.nbd_get_disks(args.client,
+ nbd_device=args.nbd_device))
+
+ p = subparsers.add_parser('nbd_get_disks', aliases=['get_nbd_disks'],
+ help='Display full or specified nbd device list')
+ p.add_argument('-n', '--nbd-device', help="Path of the nbd device. Example: /dev/nbd0", required=False)
+ p.set_defaults(func=nbd_get_disks)
+
+ # net
+ def net_interface_add_ip_address(args):
+ rpc.net.net_interface_add_ip_address(args.client, ifc_index=args.ifc_index, ip_addr=args.ip_addr)
+
+ p = subparsers.add_parser('net_interface_add_ip_address', aliases=['add_ip_address'],
+ help='Add IP address')
+ p.add_argument('ifc_index', help='ifc index of the nic device.', type=int)
+ p.add_argument('ip_addr', help='ip address will be added.')
+ p.set_defaults(func=net_interface_add_ip_address)
+
+ def net_interface_delete_ip_address(args):
+ rpc.net.net_interface_delete_ip_address(args.client, ifc_index=args.ifc_index, ip_addr=args.ip_addr)
+
+ p = subparsers.add_parser('net_interface_delete_ip_address', aliases=['delete_ip_address'],
+ help='Delete IP address')
+ p.add_argument('ifc_index', help='ifc index of the nic device.', type=int)
+ p.add_argument('ip_addr', help='ip address will be deleted.')
+ p.set_defaults(func=net_interface_delete_ip_address)
+
+ def net_get_interfaces(args):
+ print_dict(rpc.net.net_get_interfaces(args.client))
+
+ p = subparsers.add_parser(
+ 'net_get_interfaces', aliases=['get_interfaces'], help='Display current interface list')
+ p.set_defaults(func=net_get_interfaces)
+
+ # NVMe-oF
+ def nvmf_set_max_subsystems(args):
+ rpc.nvmf.nvmf_set_max_subsystems(args.client,
+ max_subsystems=args.max_subsystems)
+
+ p = subparsers.add_parser('nvmf_set_max_subsystems', aliases=['set_nvmf_target_max_subsystems'],
+ help='Set the maximum number of NVMf target subsystems')
+ p.add_argument('-x', '--max-subsystems', help='Max number of NVMf subsystems', type=int, required=True)
+ p.set_defaults(func=nvmf_set_max_subsystems)
+
+ def nvmf_set_config(args):
+ rpc.nvmf.nvmf_set_config(args.client,
+ acceptor_poll_rate=args.acceptor_poll_rate,
+ conn_sched=args.conn_sched,
+ passthru_identify_ctrlr=args.passthru_identify_ctrlr)
+
+ p = subparsers.add_parser('nvmf_set_config', aliases=['set_nvmf_target_config'],
+ help='Set NVMf target config')
+ p.add_argument('-r', '--acceptor-poll-rate', help='Polling interval of the acceptor for incoming connections (usec)', type=int)
+ p.add_argument('-s', '--conn-sched', help='(Deprecated). Ignored.')
+ p.add_argument('-i', '--passthru-identify-ctrlr', help="""Passthrough fields like serial number and model number
+ when the controller has a single namespace that is an NVMe bdev""", action='store_true')
+ p.set_defaults(func=nvmf_set_config)
+
+ def nvmf_create_transport(args):
+ rpc.nvmf.nvmf_create_transport(args.client,
+ trtype=args.trtype,
+ tgt_name=args.tgt_name,
+ max_queue_depth=args.max_queue_depth,
+ max_qpairs_per_ctrlr=args.max_qpairs_per_ctrlr,
+ max_io_qpairs_per_ctrlr=args.max_io_qpairs_per_ctrlr,
+ in_capsule_data_size=args.in_capsule_data_size,
+ max_io_size=args.max_io_size,
+ io_unit_size=args.io_unit_size,
+ max_aq_depth=args.max_aq_depth,
+ num_shared_buffers=args.num_shared_buffers,
+ buf_cache_size=args.buf_cache_size,
+ max_srq_depth=args.max_srq_depth,
+ no_srq=args.no_srq,
+ c2h_success=args.c2h_success,
+ dif_insert_or_strip=args.dif_insert_or_strip,
+ sock_priority=args.sock_priority,
+ acceptor_backlog=args.acceptor_backlog,
+ abort_timeout_sec=args.abort_timeout_sec)
+
+ p = subparsers.add_parser('nvmf_create_transport', help='Create NVMf transport')
+ p.add_argument('-t', '--trtype', help='Transport type (ex. RDMA)', type=str, required=True)
+ p.add_argument('-g', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.add_argument('-q', '--max-queue-depth', help='Max number of outstanding I/O per queue', type=int)
+ p.add_argument('-p', '--max-qpairs-per-ctrlr', help="""Max number of SQ and CQ per controller.
+ Deprecated, use max-io-qpairs-per-ctrlr""", type=int)
+ p.add_argument('-m', '--max-io-qpairs-per-ctrlr', help='Max number of IO qpairs per controller', type=int)
+ p.add_argument('-c', '--in-capsule-data-size', help='Max number of in-capsule data size', type=int)
+ p.add_argument('-i', '--max-io-size', help='Max I/O size (bytes)', type=int)
+ p.add_argument('-u', '--io-unit-size', help='I/O unit size (bytes)', type=int)
+ p.add_argument('-a', '--max-aq-depth', help='Max number of admin cmds per AQ', type=int)
+ p.add_argument('-n', '--num-shared-buffers', help='The number of pooled data buffers available to the transport', type=int)
+ p.add_argument('-b', '--buf-cache-size', help='The number of shared buffers to reserve for each poll group', type=int)
+ p.add_argument('-s', '--max-srq-depth', help='Max number of outstanding I/O per SRQ. Relevant only for RDMA transport', type=int)
+ p.add_argument('-r', '--no-srq', action='store_true', help='Disable per-thread shared receive queue. Relevant only for RDMA transport')
+ p.add_argument('-o', '--c2h-success', action='store_false', help='Disable C2H success optimization. Relevant only for TCP transport')
+ p.add_argument('-f', '--dif-insert-or-strip', action='store_true', help='Enable DIF insert/strip. Relevant only for TCP transport')
+ p.add_argument('-y', '--sock-priority', help='The sock priority of the tcp connection. Relevant only for TCP transport', type=int)
+ p.add_argument('-l', '--acceptor_backlog', help='Pending connections allowed at one time. Relevant only for RDMA transport', type=int)
+ p.add_argument('-x', '--abort-timeout-sec', help='Abort execution timeout value, in seconds', type=int)
+ p.set_defaults(func=nvmf_create_transport)
+
+ def nvmf_get_transports(args):
+ print_dict(rpc.nvmf.nvmf_get_transports(args.client, tgt_name=args.tgt_name))
+
+ p = subparsers.add_parser('nvmf_get_transports', aliases=['get_nvmf_transports'],
+ help='Display nvmf transports')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.set_defaults(func=nvmf_get_transports)
+
+ def nvmf_get_subsystems(args):
+ print_dict(rpc.nvmf.nvmf_get_subsystems(args.client, tgt_name=args.tgt_name))
+
+ p = subparsers.add_parser('nvmf_get_subsystems', aliases=['get_nvmf_subsystems'],
+ help='Display nvmf subsystems')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.set_defaults(func=nvmf_get_subsystems)
+
+ def nvmf_create_subsystem(args):
+ rpc.nvmf.nvmf_create_subsystem(args.client,
+ nqn=args.nqn,
+ tgt_name=args.tgt_name,
+ serial_number=args.serial_number,
+ model_number=args.model_number,
+ allow_any_host=args.allow_any_host,
+ max_namespaces=args.max_namespaces)
+
+ p = subparsers.add_parser('nvmf_create_subsystem', aliases=['nvmf_subsystem_create'],
+ help='Create an NVMe-oF subsystem')
+ p.add_argument('nqn', help='Subsystem NQN (ASCII)')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.add_argument("-s", "--serial-number", help="""
+ Format: 'sn' etc
+ Example: 'SPDK00000000000001'""", default='00000000000000000000')
+ p.add_argument("-d", "--model-number", help="""
+ Format: 'mn' etc
+ Example: 'SPDK Controller'""", default='SPDK bdev Controller')
+ p.add_argument("-a", "--allow-any-host", action='store_true', help="Allow any host to connect (don't enforce host NQN whitelist)")
+ p.add_argument("-m", "--max-namespaces", help="Maximum number of namespaces allowed",
+ type=int, default=0)
+ p.set_defaults(func=nvmf_create_subsystem)
+
+ def nvmf_delete_subsystem(args):
+ rpc.nvmf.nvmf_delete_subsystem(args.client,
+ nqn=args.subsystem_nqn,
+ tgt_name=args.tgt_name)
+
+ p = subparsers.add_parser('nvmf_delete_subsystem', aliases=['delete_nvmf_subsystem'],
+ help='Delete a nvmf subsystem')
+ p.add_argument('subsystem_nqn',
+ help='subsystem nqn to be deleted. Example: nqn.2016-06.io.spdk:cnode1.')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.set_defaults(func=nvmf_delete_subsystem)
+
+ def nvmf_subsystem_add_listener(args):
+ rpc.nvmf.nvmf_subsystem_add_listener(args.client,
+ nqn=args.nqn,
+ trtype=args.trtype,
+ traddr=args.traddr,
+ tgt_name=args.tgt_name,
+ adrfam=args.adrfam,
+ trsvcid=args.trsvcid)
+
+ p = subparsers.add_parser('nvmf_subsystem_add_listener', help='Add a listener to an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('-t', '--trtype', help='NVMe-oF transport type: e.g., rdma', required=True)
+ p.add_argument('-a', '--traddr', help='NVMe-oF transport address: e.g., an ip address', required=True)
+ p.add_argument('-p', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.add_argument('-f', '--adrfam', help='NVMe-oF transport adrfam: e.g., ipv4, ipv6, ib, fc, intra_host')
+ p.add_argument('-s', '--trsvcid', help='NVMe-oF transport service id: e.g., a port number')
+ p.set_defaults(func=nvmf_subsystem_add_listener)
+
+ def nvmf_subsystem_remove_listener(args):
+ rpc.nvmf.nvmf_subsystem_remove_listener(args.client,
+ nqn=args.nqn,
+ trtype=args.trtype,
+ traddr=args.traddr,
+ tgt_name=args.tgt_name,
+ adrfam=args.adrfam,
+ trsvcid=args.trsvcid)
+
+ p = subparsers.add_parser('nvmf_subsystem_remove_listener', help='Remove a listener from an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('-t', '--trtype', help='NVMe-oF transport type: e.g., rdma', required=True)
+ p.add_argument('-a', '--traddr', help='NVMe-oF transport address: e.g., an ip address', required=True)
+ p.add_argument('-p', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.add_argument('-f', '--adrfam', help='NVMe-oF transport adrfam: e.g., ipv4, ipv6, ib, fc, intra_host')
+ p.add_argument('-s', '--trsvcid', help='NVMe-oF transport service id: e.g., a port number')
+ p.set_defaults(func=nvmf_subsystem_remove_listener)
+
+ def nvmf_subsystem_add_ns(args):
+ rpc.nvmf.nvmf_subsystem_add_ns(args.client,
+ nqn=args.nqn,
+ bdev_name=args.bdev_name,
+ tgt_name=args.tgt_name,
+ ptpl_file=args.ptpl_file,
+ nsid=args.nsid,
+ nguid=args.nguid,
+ eui64=args.eui64,
+ uuid=args.uuid)
+
+ p = subparsers.add_parser('nvmf_subsystem_add_ns', help='Add a namespace to an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('bdev_name', help='The name of the bdev that will back this namespace')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.add_argument('-p', '--ptpl-file', help='The persistent reservation storage location (optional)', type=str)
+ p.add_argument('-n', '--nsid', help='The requested NSID (optional)', type=int)
+ p.add_argument('-g', '--nguid', help='Namespace globally unique identifier (optional)')
+ p.add_argument('-e', '--eui64', help='Namespace EUI-64 identifier (optional)')
+ p.add_argument('-u', '--uuid', help='Namespace UUID (optional)')
+ p.set_defaults(func=nvmf_subsystem_add_ns)
+
+ def nvmf_subsystem_remove_ns(args):
+ rpc.nvmf.nvmf_subsystem_remove_ns(args.client,
+ nqn=args.nqn,
+ nsid=args.nsid,
+ tgt_name=args.tgt_name)
+
+ p = subparsers.add_parser('nvmf_subsystem_remove_ns', help='Remove a namespace to an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('nsid', help='The requested NSID', type=int)
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.set_defaults(func=nvmf_subsystem_remove_ns)
+
+ def nvmf_subsystem_add_host(args):
+ rpc.nvmf.nvmf_subsystem_add_host(args.client,
+ nqn=args.nqn,
+ host=args.host,
+ tgt_name=args.tgt_name)
+
+ p = subparsers.add_parser('nvmf_subsystem_add_host', help='Add a host to an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('host', help='Host NQN to allow')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.set_defaults(func=nvmf_subsystem_add_host)
+
+ def nvmf_subsystem_remove_host(args):
+ rpc.nvmf.nvmf_subsystem_remove_host(args.client,
+ nqn=args.nqn,
+ host=args.host,
+ tgt_name=args.tgt_name)
+
+ p = subparsers.add_parser('nvmf_subsystem_remove_host', help='Remove a host from an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('host', help='Host NQN to remove')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.set_defaults(func=nvmf_subsystem_remove_host)
+
+ def nvmf_subsystem_allow_any_host(args):
+ rpc.nvmf.nvmf_subsystem_allow_any_host(args.client,
+ nqn=args.nqn,
+ disable=args.disable,
+ tgt_name=args.tgt_name)
+
+ p = subparsers.add_parser('nvmf_subsystem_allow_any_host', help='Allow any host to connect to the subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('-e', '--enable', action='store_true', help='Enable allowing any host')
+ p.add_argument('-d', '--disable', action='store_true', help='Disable allowing any host')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.set_defaults(func=nvmf_subsystem_allow_any_host)
+
+ def nvmf_get_stats(args):
+ print_dict(rpc.nvmf.nvmf_get_stats(args.client, tgt_name=args.tgt_name))
+
+ p = subparsers.add_parser(
+ 'nvmf_get_stats', help='Display current statistics for NVMf subsystem')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.set_defaults(func=nvmf_get_stats)
+
+ # pmem
+ def bdev_pmem_create_pool(args):
+ num_blocks = int((args.total_size * 1024 * 1024) / args.block_size)
+ rpc.pmem.bdev_pmem_create_pool(args.client,
+ pmem_file=args.pmem_file,
+ num_blocks=num_blocks,
+ block_size=args.block_size)
+
+ p = subparsers.add_parser('bdev_pmem_create_pool', aliases=['create_pmem_pool'],
+ help='Create pmem pool')
+ p.add_argument('pmem_file', help='Path to pmemblk pool file')
+ p.add_argument('total_size', help='Size of malloc bdev in MB (int > 0)', type=int)
+ p.add_argument('block_size', help='Block size for this pmem pool', type=int)
+ p.set_defaults(func=bdev_pmem_create_pool)
+
+ def bdev_pmem_get_pool_info(args):
+ print_dict(rpc.pmem.bdev_pmem_get_pool_info(args.client,
+ pmem_file=args.pmem_file))
+
+ p = subparsers.add_parser('bdev_pmem_get_pool_info', aliases=['pmem_pool_info'],
+ help='Display pmem pool info and check consistency')
+ p.add_argument('pmem_file', help='Path to pmemblk pool file')
+ p.set_defaults(func=bdev_pmem_get_pool_info)
+
+ def bdev_pmem_delete_pool(args):
+ rpc.pmem.bdev_pmem_delete_pool(args.client,
+ pmem_file=args.pmem_file)
+
+ p = subparsers.add_parser('bdev_pmem_delete_pool', aliases=['delete_pmem_pool'],
+ help='Delete pmem pool')
+ p.add_argument('pmem_file', help='Path to pmemblk pool file')
+ p.set_defaults(func=bdev_pmem_delete_pool)
+
+ # subsystem
+ def framework_get_subsystems(args):
+ print_dict(rpc.subsystem.framework_get_subsystems(args.client))
+
+ p = subparsers.add_parser('framework_get_subsystems', aliases=['get_subsystems'],
+ help="""Print subsystems array in initialization order. Each subsystem
+ entry contain (unsorted) array of subsystems it depends on.""")
+ p.set_defaults(func=framework_get_subsystems)
+
+ def framework_get_config(args):
+ print_dict(rpc.subsystem.framework_get_config(args.client, args.name))
+
+ p = subparsers.add_parser('framework_get_config', aliases=['get_subsystem_config'],
+ help="""Print subsystem configuration""")
+ p.add_argument('name', help='Name of subsystem to query')
+ p.set_defaults(func=framework_get_config)
+
+ # vhost
+ def vhost_controller_set_coalescing(args):
+ rpc.vhost.vhost_controller_set_coalescing(args.client,
+ ctrlr=args.ctrlr,
+ delay_base_us=args.delay_base_us,
+ iops_threshold=args.iops_threshold)
+
+ p = subparsers.add_parser('vhost_controller_set_coalescing', aliases=['set_vhost_controller_coalescing'],
+ help='Set vhost controller coalescing')
+ p.add_argument('ctrlr', help='controller name')
+ p.add_argument('delay_base_us', help='Base delay time', type=int)
+ p.add_argument('iops_threshold', help='IOPS threshold when coalescing is enabled', type=int)
+ p.set_defaults(func=vhost_controller_set_coalescing)
+
+ def vhost_create_scsi_controller(args):
+ rpc.vhost.vhost_create_scsi_controller(args.client,
+ ctrlr=args.ctrlr,
+ cpumask=args.cpumask)
+
+ p = subparsers.add_parser(
+ 'vhost_create_scsi_controller', aliases=['construct_vhost_scsi_controller'],
+ help='Add new vhost controller')
+ p.add_argument('ctrlr', help='controller name')
+ p.add_argument('--cpumask', help='cpu mask for this controller')
+ p.set_defaults(func=vhost_create_scsi_controller)
+
+ def vhost_scsi_controller_add_target(args):
+ print_json(rpc.vhost.vhost_scsi_controller_add_target(args.client,
+ ctrlr=args.ctrlr,
+ scsi_target_num=args.scsi_target_num,
+ bdev_name=args.bdev_name))
+
+ p = subparsers.add_parser('vhost_scsi_controller_add_target',
+ aliases=['add_vhost_scsi_lun'],
+ help='Add lun to vhost controller')
+ p.add_argument('ctrlr', help='conntroller name where add lun')
+ p.add_argument('scsi_target_num', help='scsi_target_num', type=int)
+ p.add_argument('bdev_name', help='bdev name')
+ p.set_defaults(func=vhost_scsi_controller_add_target)
+
+ def vhost_scsi_controller_remove_target(args):
+ rpc.vhost.vhost_scsi_controller_remove_target(args.client,
+ ctrlr=args.ctrlr,
+ scsi_target_num=args.scsi_target_num)
+
+ p = subparsers.add_parser('vhost_scsi_controller_remove_target',
+ aliases=['remove_vhost_scsi_target'],
+ help='Remove target from vhost controller')
+ p.add_argument('ctrlr', help='controller name to remove target from')
+ p.add_argument('scsi_target_num', help='scsi_target_num', type=int)
+ p.set_defaults(func=vhost_scsi_controller_remove_target)
+
+ def vhost_create_blk_controller(args):
+ rpc.vhost.vhost_create_blk_controller(args.client,
+ ctrlr=args.ctrlr,
+ dev_name=args.dev_name,
+ cpumask=args.cpumask,
+ readonly=args.readonly,
+ packed_ring=args.packed_ring)
+
+ p = subparsers.add_parser('vhost_create_blk_controller',
+ aliases=['construct_vhost_blk_controller'],
+ help='Add a new vhost block controller')
+ p.add_argument('ctrlr', help='controller name')
+ p.add_argument('dev_name', help='device name')
+ p.add_argument('--cpumask', help='cpu mask for this controller')
+ p.add_argument("-r", "--readonly", action='store_true', help='Set controller as read-only')
+ p.add_argument("-p", "--packed_ring", action='store_true', help='Set controller as packed ring supported')
+ p.set_defaults(func=vhost_create_blk_controller)
+
+ def vhost_create_nvme_controller(args):
+ rpc.vhost.vhost_create_nvme_controller(args.client,
+ ctrlr=args.ctrlr,
+ io_queues=args.io_queues,
+ cpumask=args.cpumask)
+
+ p = subparsers.add_parser('vhost_create_nvme_controller', aliases=['vhost_create_nvme_controller'],
+ help='Add new vhost controller')
+ p.add_argument('ctrlr', help='controller name')
+ p.add_argument('io_queues', help='number of IO queues for the controller', type=int)
+ p.add_argument('--cpumask', help='cpu mask for this controller')
+ p.set_defaults(func=vhost_create_nvme_controller)
+
+ def vhost_nvme_controller_add_ns(args):
+ rpc.vhost.vhost_nvme_controller_add_ns(args.client,
+ ctrlr=args.ctrlr,
+ bdev_name=args.bdev_name)
+
+ p = subparsers.add_parser('vhost_nvme_controller_add_ns', aliases=['add_vhost_nvme_ns'],
+ help='Add a Namespace to vhost controller')
+ p.add_argument('ctrlr', help='conntroller name where add a Namespace')
+ p.add_argument('bdev_name', help='block device name for a new Namespace')
+ p.set_defaults(func=vhost_nvme_controller_add_ns)
+
+ def vhost_get_controllers(args):
+ print_dict(rpc.vhost.vhost_get_controllers(args.client, args.name))
+
+ p = subparsers.add_parser('vhost_get_controllers', aliases=['get_vhost_controllers'],
+ help='List all or specific vhost controller(s)')
+ p.add_argument('-n', '--name', help="Name of vhost controller", required=False)
+ p.set_defaults(func=vhost_get_controllers)
+
+ def vhost_delete_controller(args):
+ rpc.vhost.vhost_delete_controller(args.client,
+ ctrlr=args.ctrlr)
+
+ p = subparsers.add_parser('vhost_delete_controller', aliases=['remove_vhost_controller'],
+ help='Delete a vhost controller')
+ p.add_argument('ctrlr', help='controller name')
+ p.set_defaults(func=vhost_delete_controller)
+
+ def bdev_virtio_attach_controller(args):
+ print_array(rpc.vhost.bdev_virtio_attach_controller(args.client,
+ name=args.name,
+ trtype=args.trtype,
+ traddr=args.traddr,
+ dev_type=args.dev_type,
+ vq_count=args.vq_count,
+ vq_size=args.vq_size))
+
+ p = subparsers.add_parser('bdev_virtio_attach_controller', aliases=['construct_virtio_dev'],
+ help="""Attach virtio controller using provided
+ transport type and device type. This will also create bdevs for any block devices connected to the
+ controller (for example, SCSI devices for a virtio-scsi controller).
+ Result is array of added bdevs.""")
+ p.add_argument('name', help="Use this name as base for new created bdevs")
+ p.add_argument('-t', '--trtype',
+ help='Virtio target transport type: pci or user', required=True)
+ p.add_argument('-a', '--traddr',
+ help='Transport type specific target address: e.g. UNIX domain socket path or BDF', required=True)
+ p.add_argument('-d', '--dev-type',
+ help='Device type: blk or scsi', required=True)
+ p.add_argument('--vq-count', help='Number of virtual queues to be used.', type=int)
+ p.add_argument('--vq-size', help='Size of each queue', type=int)
+ p.set_defaults(func=bdev_virtio_attach_controller)
+
+ def bdev_virtio_scsi_get_devices(args):
+ print_dict(rpc.vhost.bdev_virtio_scsi_get_devices(args.client))
+
+ p = subparsers.add_parser('bdev_virtio_scsi_get_devices', aliases=['get_virtio_scsi_devs'],
+ help='List all Virtio-SCSI devices.')
+ p.set_defaults(func=bdev_virtio_scsi_get_devices)
+
+ def bdev_virtio_detach_controller(args):
+ rpc.vhost.bdev_virtio_detach_controller(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_virtio_detach_controller', aliases=['remove_virtio_bdev'],
+ help="""Remove a Virtio device
+ This will delete all bdevs exposed by this device""")
+ p.add_argument('name', help='Virtio device name. E.g. VirtioUser0')
+ p.set_defaults(func=bdev_virtio_detach_controller)
+
+ # OCSSD
+ def bdev_ocssd_create(args):
+ nsid = int(args.nsid) if args.nsid is not None else None
+ print_json(rpc.bdev.bdev_ocssd_create(args.client,
+ ctrlr_name=args.ctrlr_name,
+ bdev_name=args.name,
+ nsid=nsid,
+ range=args.range))
+
+ p = subparsers.add_parser('bdev_ocssd_create',
+ help='Creates zoned bdev on specified Open Channel controller')
+ p.add_argument('-c', '--ctrlr_name', help='Name of the OC NVMe controller', required=True)
+ p.add_argument('-b', '--name', help='Name of the bdev to create', required=True)
+ p.add_argument('-n', '--nsid', help='Namespace ID', required=False)
+ p.add_argument('-r', '--range', help='Parallel unit range (in the form of BEGIN-END (inclusive))',
+ required=False)
+ p.set_defaults(func=bdev_ocssd_create)
+
+ def bdev_ocssd_delete(args):
+ print_json(rpc.bdev.bdev_ocssd_delete(args.client,
+ name=args.name))
+
+ p = subparsers.add_parser('bdev_ocssd_delete',
+ help='Deletes Open Channel bdev')
+ p.add_argument('name', help='Name of the Open Channel bdev')
+ p.set_defaults(func=bdev_ocssd_delete)
+
+ # ioat
+ def ioat_scan_accel_engine(args):
+ pci_whitelist = []
+ if args.pci_whitelist:
+ for w in args.pci_whitelist.strip().split(" "):
+ pci_whitelist.append(w)
+ rpc.ioat.ioat_scan_accel_engine(args.client, pci_whitelist)
+
+ p = subparsers.add_parser('ioat_scan_accel_engine',
+ aliases=['ioat_scan_copy_engine', 'scan_ioat_copy_engine'],
+ help='Set scan and enable IOAT accel engine offload.')
+ p.add_argument('-w', '--pci-whitelist', help="""Whitespace-separated list of PCI addresses in
+ domain:bus:device.function format or domain.bus.device.function format""")
+ p.set_defaults(func=ioat_scan_accel_engine)
+
+ # idxd
+ def idxd_scan_accel_engine(args):
+ rpc.idxd.idxd_scan_accel_engine(args.client, config_number=args.config_number)
+
+ p = subparsers.add_parser('idxd_scan_accel_engine',
+ help='Set config and enable idxd accel engine offload.')
+ p.add_argument('-c', '--config-number', help="""Pre-defined configuration number to use. See docs.""", type=int)
+ p.set_defaults(func=idxd_scan_accel_engine)
+
+ # opal
+ def bdev_nvme_opal_init(args):
+ rpc.nvme.bdev_nvme_opal_init(args.client,
+ nvme_ctrlr_name=args.nvme_ctrlr_name,
+ password=args.password)
+
+ p = subparsers.add_parser('bdev_nvme_opal_init', help='take ownership and activate')
+ p.add_argument('-b', '--nvme-ctrlr-name', help='nvme ctrlr name')
+ p.add_argument('-p', '--password', help='password for admin')
+ p.set_defaults(func=bdev_nvme_opal_init)
+
+ def bdev_nvme_opal_revert(args):
+ rpc.nvme.bdev_nvme_opal_revert(args.client,
+ nvme_ctrlr_name=args.nvme_ctrlr_name,
+ password=args.password)
+ p = subparsers.add_parser('bdev_nvme_opal_revert', help='Revert to default factory settings')
+ p.add_argument('-b', '--nvme-ctrlr-name', help='nvme ctrlr name')
+ p.add_argument('-p', '--password', help='password')
+ p.set_defaults(func=bdev_nvme_opal_revert)
+
+ def bdev_opal_create(args):
+ print_json(rpc.bdev.bdev_opal_create(args.client,
+ nvme_ctrlr_name=args.nvme_ctrlr_name,
+ nsid=args.nsid,
+ locking_range_id=args.locking_range_id,
+ range_start=args.range_start,
+ range_length=args.range_length,
+ password=args.password))
+
+ p = subparsers.add_parser('bdev_opal_create', help="""Create opal bdev on specified NVMe controller""")
+ p.add_argument('-b', '--nvme-ctrlr-name', help='nvme ctrlr name', required=True)
+ p.add_argument('-n', '--nsid', help='namespace ID (only support nsid=1 for now)', type=int, required=True)
+ p.add_argument('-i', '--locking-range-id', help='locking range id', type=int, required=True)
+ p.add_argument('-s', '--range-start', help='locking range start LBA', type=int, required=True)
+ p.add_argument('-l', '--range-length', help='locking range length (in blocks)', type=int, required=True)
+ p.add_argument('-p', '--password', help='admin password', required=True)
+ p.set_defaults(func=bdev_opal_create)
+
+ def bdev_opal_get_info(args):
+ print_dict(rpc.bdev.bdev_opal_get_info(args.client,
+ bdev_name=args.bdev_name,
+ password=args.password))
+
+ p = subparsers.add_parser('bdev_opal_get_info', help='get opal locking range info for this bdev')
+ p.add_argument('-b', '--bdev-name', help='opal bdev')
+ p.add_argument('-p', '--password', help='password')
+ p.set_defaults(func=bdev_opal_get_info)
+
+ def bdev_opal_delete(args):
+ rpc.bdev.bdev_opal_delete(args.client,
+ bdev_name=args.bdev_name,
+ password=args.password)
+
+ p = subparsers.add_parser('bdev_opal_delete', help="""delete a virtual opal bdev""")
+ p.add_argument('-b', '--bdev-name', help='opal virtual bdev', required=True)
+ p.add_argument('-p', '--password', help='admin password', required=True)
+ p.set_defaults(func=bdev_opal_delete)
+
+ def bdev_opal_new_user(args):
+ rpc.bdev.bdev_opal_new_user(args.client,
+ bdev_name=args.bdev_name,
+ admin_password=args.admin_password,
+ user_id=args.user_id,
+ user_password=args.user_password)
+
+ p = subparsers.add_parser('bdev_opal_new_user', help="""Add a user to opal bdev who can set lock state for this bdev""")
+ p.add_argument('-b', '--bdev-name', help='opal bdev', required=True)
+ p.add_argument('-p', '--admin-password', help='admin password', required=True)
+ p.add_argument('-i', '--user-id', help='ID for new user', type=int, required=True)
+ p.add_argument('-u', '--user-password', help='password set for this user', required=True)
+ p.set_defaults(func=bdev_opal_new_user)
+
+ def bdev_opal_set_lock_state(args):
+ rpc.bdev.bdev_opal_set_lock_state(args.client,
+ bdev_name=args.bdev_name,
+ user_id=args.user_id,
+ password=args.password,
+ lock_state=args.lock_state)
+
+ p = subparsers.add_parser('bdev_opal_set_lock_state', help="""set lock state for an opal bdev""")
+ p.add_argument('-b', '--bdev-name', help='opal bdev', required=True)
+ p.add_argument('-i', '--user-id', help='ID of the user who want to set lock state, either admin or a user assigned to this bdev',
+ type=int, required=True)
+ p.add_argument('-p', '--password', help='password of this user', required=True)
+ p.add_argument('-l', '--lock-state', help='lock state to set, choose from {readwrite, readonly, rwlock}', required=True)
+ p.set_defaults(func=bdev_opal_set_lock_state)
+
+ # bdev_nvme_send_cmd
+ def bdev_nvme_send_cmd(args):
+ print_dict(rpc.nvme.bdev_nvme_send_cmd(args.client,
+ name=args.nvme_name,
+ cmd_type=args.cmd_type,
+ data_direction=args.data_direction,
+ cmdbuf=args.cmdbuf,
+ data=args.data,
+ metadata=args.metadata,
+ data_len=args.data_length,
+ metadata_len=args.metadata_length,
+ timeout_ms=args.timeout_ms))
+
+ p = subparsers.add_parser('bdev_nvme_send_cmd', aliases=['send_nvme_cmd'],
+ help='NVMe passthrough cmd.')
+ p.add_argument('-n', '--nvme-name', help="""Name of the operating NVMe controller""")
+ p.add_argument('-t', '--cmd-type', help="""Type of nvme cmd. Valid values are: admin, io""")
+ p.add_argument('-r', '--data-direction', help="""Direction of data transfer. Valid values are: c2h, h2c""")
+ p.add_argument('-c', '--cmdbuf', help="""NVMe command encoded by base64 urlsafe""")
+ p.add_argument('-d', '--data', help="""Data transferring to controller from host, encoded by base64 urlsafe""")
+ p.add_argument('-m', '--metadata', help="""Metadata transferring to controller from host, encoded by base64 urlsafe""")
+ p.add_argument('-D', '--data-length', help="""Data length required to transfer from controller to host""", type=int)
+ p.add_argument('-M', '--metadata-length', help="""Metadata length required to transfer from controller to host""", type=int)
+ p.add_argument('-T', '--timeout-ms',
+ help="""Command execution timeout value, in milliseconds, if 0, don't track timeout""", type=int, default=0)
+ p.set_defaults(func=bdev_nvme_send_cmd)
+
+ # Notifications
+ def notify_get_types(args):
+ print_dict(rpc.notify.notify_get_types(args.client))
+
+ p = subparsers.add_parser('notify_get_types', aliases=['get_notification_types'],
+ help='List available notifications that user can subscribe to.')
+ p.set_defaults(func=notify_get_types)
+
+ def notify_get_notifications(args):
+ ret = rpc.notify.notify_get_notifications(args.client,
+ id=args.id,
+ max=args.max)
+ print_dict(ret)
+
+ p = subparsers.add_parser('notify_get_notifications', aliases=['get_notifications'],
+ help='Get notifications')
+ p.add_argument('-i', '--id', help="""First ID to start fetching from""", type=int)
+ p.add_argument('-n', '--max', help="""Maximum number of notifications to return in response""", type=int)
+ p.set_defaults(func=notify_get_notifications)
+
+ def thread_get_stats(args):
+ print_dict(rpc.app.thread_get_stats(args.client))
+
+ p = subparsers.add_parser(
+ 'thread_get_stats', help='Display current statistics of all the threads')
+ p.set_defaults(func=thread_get_stats)
+
+ def thread_set_cpumask(args):
+ ret = rpc.app.thread_set_cpumask(args.client,
+ id=args.id,
+ cpumask=args.cpumask)
+ p = subparsers.add_parser('thread_set_cpumask',
+ help="""set the cpumask of the thread whose ID matches to the
+ specified value. The thread may be migrated to one of the specified CPUs.""")
+ p.add_argument('-i', '--id', type=int, help='thread ID')
+ p.add_argument('-m', '--cpumask', help='cpumask for this thread')
+ p.set_defaults(func=thread_set_cpumask)
+
+ def thread_get_pollers(args):
+ print_dict(rpc.app.thread_get_pollers(args.client))
+
+ p = subparsers.add_parser(
+ 'thread_get_pollers', help='Display current pollers of all the threads')
+ p.set_defaults(func=thread_get_pollers)
+
+ def thread_get_io_channels(args):
+ print_dict(rpc.app.thread_get_io_channels(args.client))
+
+ p = subparsers.add_parser(
+ 'thread_get_io_channels', help='Display current IO channels of all the threads')
+ p.set_defaults(func=thread_get_io_channels)
+
+ def env_dpdk_get_mem_stats(args):
+ print_dict(rpc.env_dpdk.env_dpdk_get_mem_stats(args.client))
+
+ p = subparsers.add_parser(
+ 'env_dpdk_get_mem_stats', help='write the dpdk memory stats to a file.')
+ p.set_defaults(func=env_dpdk_get_mem_stats)
+
+ # blobfs
+ def blobfs_detect(args):
+ print(rpc.blobfs.blobfs_detect(args.client,
+ bdev_name=args.bdev_name))
+
+ p = subparsers.add_parser('blobfs_detect', help='Detect whether a blobfs exists on bdev')
+ p.add_argument('bdev_name', help='Blockdev name to detect blobfs. Example: Malloc0.')
+ p.set_defaults(func=blobfs_detect)
+
+ def blobfs_create(args):
+ print(rpc.blobfs.blobfs_create(args.client,
+ bdev_name=args.bdev_name,
+ cluster_sz=args.cluster_sz))
+
+ p = subparsers.add_parser('blobfs_create', help='Build a blobfs on bdev')
+ p.add_argument('bdev_name', help='Blockdev name to build blobfs. Example: Malloc0.')
+ p.add_argument('-c', '--cluster_sz',
+ help="""Size of cluster in bytes (Optional). Must be multiple of 4KB page size. Default and minimal value is 1M.""")
+ p.set_defaults(func=blobfs_create)
+
+ def blobfs_mount(args):
+ print(rpc.blobfs.blobfs_mount(args.client,
+ bdev_name=args.bdev_name,
+ mountpoint=args.mountpoint))
+
+ p = subparsers.add_parser('blobfs_mount', help='Mount a blobfs on bdev to host path by FUSE')
+ p.add_argument('bdev_name', help='Blockdev name where the blobfs is. Example: Malloc0.')
+ p.add_argument('mountpoint', help='Mountpoint path in host to mount blobfs. Example: /mnt/.')
+ p.set_defaults(func=blobfs_mount)
+
+ def blobfs_set_cache_size(args):
+ print(rpc.blobfs.blobfs_set_cache_size(args.client,
+ size_in_mb=args.size_in_mb))
+
+ p = subparsers.add_parser('blobfs_set_cache_size', help='Set cache size for blobfs')
+ p.add_argument('size_in_mb', help='Cache size for blobfs in megabytes.', type=int)
+ p.set_defaults(func=blobfs_set_cache_size)
+
+ # sock
+ def sock_impl_get_options(args):
+ print_json(rpc.sock.sock_impl_get_options(args.client,
+ impl_name=args.impl))
+
+ p = subparsers.add_parser('sock_impl_get_options', help="""Get options of socket layer implementation""")
+ p.add_argument('-i', '--impl', help='Socket implementation name, e.g. posix', required=True)
+ p.set_defaults(func=sock_impl_get_options)
+
+ def sock_impl_set_options(args):
+ rpc.sock.sock_impl_set_options(args.client,
+ impl_name=args.impl,
+ recv_buf_size=args.recv_buf_size,
+ send_buf_size=args.send_buf_size,
+ enable_recv_pipe=args.enable_recv_pipe,
+ enable_zerocopy_send=args.enable_zerocopy_send)
+
+ p = subparsers.add_parser('sock_impl_set_options', help="""Set options of socket layer implementation""")
+ p.add_argument('-i', '--impl', help='Socket implementation name, e.g. posix', required=True)
+ p.add_argument('-r', '--recv-buf-size', help='Size of receive buffer on socket in bytes', type=int)
+ p.add_argument('-s', '--send-buf-size', help='Size of send buffer on socket in bytes', type=int)
+ p.add_argument('--enable-recv-pipe', help='Enable receive pipe',
+ action='store_true', dest='enable_recv_pipe')
+ p.add_argument('--disable-recv-pipe', help='Disable receive pipe',
+ action='store_false', dest='enable_recv_pipe')
+ p.add_argument('--enable-zerocopy-send', help='Enable zerocopy on send',
+ action='store_true', dest='enable_zerocopy_send')
+ p.add_argument('--disable-zerocopy-send', help='Disable zerocopy on send',
+ action='store_false', dest='enable_zerocopy_send')
+ p.set_defaults(func=sock_impl_set_options, enable_recv_pipe=None, enable_zerocopy_send=None)
+
+ def check_called_name(name):
+ if name in deprecated_aliases:
+ print("{} is deprecated, use {} instead.".format(name, deprecated_aliases[name]), file=sys.stderr)
+
+ class dry_run_client:
+ def call(self, method, params=None):
+ print("Request:\n" + json.dumps({"method": method, "params": params}, indent=2))
+
+ def null_print(arg):
+ pass
+
+ def call_rpc_func(args):
+ args.func(args)
+ check_called_name(args.called_rpc_name)
+
+ def execute_script(parser, client, fd):
+ executed_rpc = ""
+ for rpc_call in map(str.rstrip, fd):
+ if not rpc_call.strip():
+ continue
+ executed_rpc = "\n".join([executed_rpc, rpc_call])
+ args = parser.parse_args(shlex.split(rpc_call))
+ args.client = client
+ try:
+ call_rpc_func(args)
+ except JSONRPCException as ex:
+ print("Exception:")
+ print(executed_rpc.strip() + " <<<")
+ print(ex.message)
+ exit(1)
+
+ # Create temporary parser, pull out the plugin parameter, load the module, and then run the real argument parser
+ plugin_parser = argparse.ArgumentParser(add_help=False)
+ plugin_parser.add_argument('--plugin', dest='rpc_plugin', help='Module name of plugin with additional RPC commands')
+
+ rpc_module = plugin_parser.parse_known_args()[0].rpc_plugin
+ if rpc_module is not None:
+ try:
+ rpc_plugin = importlib.import_module(rpc_module)
+ try:
+ rpc_plugin.spdk_rpc_plugin_initialize(subparsers)
+ except AttributeError:
+ print("Module %s does not contain 'spdk_rpc_plugin_initialize' function" % rpc_module)
+ except ModuleNotFoundError:
+ print("Module %s not found" % rpc_module)
+
+ args = parser.parse_args()
+
+ if sys.stdin.isatty() and not hasattr(args, 'func'):
+ # No arguments and no data piped through stdin
+ parser.print_help()
+ exit(1)
+ if args.is_server:
+ for input in sys.stdin:
+ cmd = shlex.split(input)
+ try:
+ tmp_args = parser.parse_args(cmd)
+ except SystemExit as ex:
+ print("**STATUS=1", flush=True)
+ continue
+
+ try:
+ tmp_args.client = rpc.client.JSONRPCClient(
+ tmp_args.server_addr, tmp_args.port, tmp_args.timeout,
+ log_level=getattr(logging, tmp_args.verbose.upper()), conn_retries=tmp_args.conn_retries)
+ call_rpc_func(tmp_args)
+ print("**STATUS=0", flush=True)
+ except JSONRPCException as ex:
+ print(ex.message)
+ print("**STATUS=1", flush=True)
+ exit(0)
+ elif args.dry_run:
+ args.client = dry_run_client()
+ print_dict = null_print
+ print_json = null_print
+ print_array = null_print
+ else:
+ args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout,
+ log_level=getattr(logging, args.verbose.upper()),
+ conn_retries=args.conn_retries)
+ if hasattr(args, 'func'):
+ try:
+ call_rpc_func(args)
+ except JSONRPCException as ex:
+ print(ex.message)
+ exit(1)
+ else:
+ execute_script(parser, args.client, sys.stdin)
diff --git a/src/spdk/scripts/rpc/__init__.py b/src/spdk/scripts/rpc/__init__.py
new file mode 100644
index 000000000..f764d7ae5
--- /dev/null
+++ b/src/spdk/scripts/rpc/__init__.py
@@ -0,0 +1,201 @@
+import json
+import os
+import sys
+
+from io import IOBase as io
+
+from . import app
+from . import bdev
+from . import blobfs
+from . import env_dpdk
+from . import idxd
+from . import ioat
+from . import iscsi
+from . import log
+from . import lvol
+from . import nbd
+from . import net
+from . import notify
+from . import nvme
+from . import nvmf
+from . import pmem
+from . import subsystem
+from . import trace
+from . import vhost
+from . import vmd
+from . import sock
+from . import client as rpc_client
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('start_subsystem_init')
+def framework_start_init(client):
+ """Start initialization of subsystems"""
+ return client.call('framework_start_init')
+
+
+@deprecated_alias('wait_subsystem_init')
+def framework_wait_init(client):
+ """Block until subsystems have been initialized"""
+ return client.call('framework_wait_init')
+
+
+@deprecated_alias("get_rpc_methods")
+def rpc_get_methods(client, current=None, include_aliases=None):
+ """Get list of supported RPC methods.
+ Args:
+ current: Get list of RPC methods only callable in the current state.
+ include_aliases: Include aliases in the list with RPC methods.
+ """
+ params = {}
+
+ if current:
+ params['current'] = current
+ if include_aliases:
+ params['include_aliases'] = include_aliases
+
+ return client.call('rpc_get_methods', params)
+
+
+@deprecated_alias("get_spdk_version")
+def spdk_get_version(client):
+ """Get SPDK version"""
+ return client.call('spdk_get_version')
+
+
+def _json_dump(config, fd, indent):
+ if indent is None:
+ indent = 2
+ elif indent < 0:
+ indent = None
+ json.dump(config, fd, indent=indent)
+ fd.write('\n')
+
+
+def _json_load(j):
+ if j == sys.stdin or isinstance(j, io):
+ json_conf = json.load(j)
+ elif os.path.exists(j):
+ with open(j, "r") as j:
+ json_conf = json.load(j)
+ else:
+ json_conf = json.loads(j)
+ return json_conf
+
+
+def save_config(client, fd, indent=2):
+ """Write current (live) configuration of SPDK subsystems and targets to stdout.
+ Args:
+ fd: opened file descriptor where data will be saved
+ indent: Indent level. Value less than 0 mean compact mode.
+ Default indent level is 2.
+ """
+ config = {
+ 'subsystems': []
+ }
+
+ for elem in client.call('framework_get_subsystems'):
+ cfg = {
+ 'subsystem': elem['subsystem'],
+ 'config': client.call('framework_get_config', {"name": elem['subsystem']})
+ }
+ config['subsystems'].append(cfg)
+
+ _json_dump(config, fd, indent)
+
+
+def load_config(client, fd, include_aliases=False):
+ """Configure SPDK subsystems and targets using JSON RPC read from stdin.
+ Args:
+ fd: opened file descriptor where data will be taken from
+ """
+ json_config = _json_load(fd)
+
+ # remove subsystems with no config
+ subsystems = json_config['subsystems']
+ for subsystem in list(subsystems):
+ if not subsystem['config']:
+ subsystems.remove(subsystem)
+
+ # check if methods in the config file are known
+ allowed_methods = client.call('rpc_get_methods', {'include_aliases': include_aliases})
+ if not subsystems and 'framework_start_init' in allowed_methods:
+ framework_start_init(client)
+ return
+
+ for subsystem in list(subsystems):
+ config = subsystem['config']
+ for elem in list(config):
+ if 'method' not in elem or elem['method'] not in allowed_methods:
+ raise rpc_client.JSONRPCException("Unknown method was included in the config file")
+
+ while subsystems:
+ allowed_methods = client.call('rpc_get_methods', {'current': True,
+ 'include_aliases': include_aliases})
+ allowed_found = False
+
+ for subsystem in list(subsystems):
+ config = subsystem['config']
+ for elem in list(config):
+ if 'method' not in elem or elem['method'] not in allowed_methods:
+ continue
+
+ client.call(elem['method'], elem['params'])
+ config.remove(elem)
+ allowed_found = True
+
+ if not config:
+ subsystems.remove(subsystem)
+
+ if 'framework_start_init' in allowed_methods:
+ framework_start_init(client)
+ allowed_found = True
+
+ if not allowed_found:
+ break
+
+ if subsystems:
+ print("Some configs were skipped because the RPC state that can call them passed over.")
+
+
+def save_subsystem_config(client, fd, indent=2, name=None):
+ """Write current (live) configuration of SPDK subsystem to stdout.
+ Args:
+ fd: opened file descriptor where data will be saved
+ indent: Indent level. Value less than 0 mean compact mode.
+ Default is indent level 2.
+ """
+ cfg = {
+ 'subsystem': name,
+ 'config': client.call('framework_get_config', {"name": name})
+ }
+
+ _json_dump(cfg, fd, indent)
+
+
+def load_subsystem_config(client, fd):
+ """Configure SPDK subsystem using JSON RPC read from stdin.
+ Args:
+ fd: opened file descriptor where data will be taken from
+ """
+ subsystem = _json_load(fd)
+
+ if not subsystem['config']:
+ return
+
+ allowed_methods = client.call('rpc_get_methods')
+ config = subsystem['config']
+ for elem in list(config):
+ if 'method' not in elem or elem['method'] not in allowed_methods:
+ raise rpc_client.JSONRPCException("Unknown method was included in the config file")
+
+ allowed_methods = client.call('rpc_get_methods', {'current': True})
+ for elem in list(config):
+ if 'method' not in elem or elem['method'] not in allowed_methods:
+ continue
+
+ client.call(elem['method'], elem['params'])
+ config.remove(elem)
+
+ if config:
+ print("Some configs were skipped because they cannot be called in the current RPC state.")
diff --git a/src/spdk/scripts/rpc/app.py b/src/spdk/scripts/rpc/app.py
new file mode 100644
index 000000000..9412de17d
--- /dev/null
+++ b/src/spdk/scripts/rpc/app.py
@@ -0,0 +1,78 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('kill_instance')
+def spdk_kill_instance(client, sig_name):
+ """Send a signal to the SPDK process.
+
+ Args:
+ sig_name: signal to send ("SIGINT", "SIGTERM", "SIGQUIT", "SIGHUP", or "SIGKILL")
+ """
+ params = {'sig_name': sig_name}
+ return client.call('spdk_kill_instance', params)
+
+
+@deprecated_alias('context_switch_monitor')
+def framework_monitor_context_switch(client, enabled=None):
+ """Query or set state of context switch monitoring.
+
+ Args:
+ enabled: True to enable monitoring; False to disable monitoring; None to query (optional)
+
+ Returns:
+ Current context switch monitoring state (after applying enabled flag).
+ """
+ params = {}
+ if enabled is not None:
+ params['enabled'] = enabled
+ return client.call('framework_monitor_context_switch', params)
+
+
+def framework_get_reactors(client):
+ """Query list of all reactors.
+
+ Returns:
+ List of all reactors.
+ """
+ return client.call('framework_get_reactors')
+
+
+def thread_get_stats(client):
+ """Query threads statistics.
+
+ Returns:
+ Current threads statistics.
+ """
+ return client.call('thread_get_stats')
+
+
+def thread_set_cpumask(client, id, cpumask):
+ """Set the cpumask of the thread whose ID matches to the specified value.
+
+ Args:
+ id: thread ID
+ cpumask: cpumask for this thread
+
+ Returns:
+ True or False
+ """
+ params = {'id': id, 'cpumask': cpumask}
+ return client.call('thread_set_cpumask', params)
+
+
+def thread_get_pollers(client):
+ """Query current pollers.
+
+ Returns:
+ Current pollers.
+ """
+ return client.call('thread_get_pollers')
+
+
+def thread_get_io_channels(client):
+ """Query current IO channels.
+
+ Returns:
+ Current IO channels.
+ """
+ return client.call('thread_get_io_channels')
diff --git a/src/spdk/scripts/rpc/bdev.py b/src/spdk/scripts/rpc/bdev.py
new file mode 100644
index 000000000..8c669c0b2
--- /dev/null
+++ b/src/spdk/scripts/rpc/bdev.py
@@ -0,0 +1,1105 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('set_bdev_options')
+def bdev_set_options(client, bdev_io_pool_size=None, bdev_io_cache_size=None, bdev_auto_examine=None):
+ """Set parameters for the bdev subsystem.
+
+ Args:
+ bdev_io_pool_size: number of bdev_io structures in shared buffer pool (optional)
+ bdev_io_cache_size: maximum number of bdev_io structures cached per thread (optional)
+ bdev_auto_examine: if set to false, the bdev layer will not examine every disks automatically (optional)
+ """
+ params = {}
+
+ if bdev_io_pool_size:
+ params['bdev_io_pool_size'] = bdev_io_pool_size
+ if bdev_io_cache_size:
+ params['bdev_io_cache_size'] = bdev_io_cache_size
+ if bdev_auto_examine is not None:
+ params["bdev_auto_examine"] = bdev_auto_examine
+
+ return client.call('bdev_set_options', params)
+
+
+@deprecated_alias('construct_compress_bdev')
+def bdev_compress_create(client, base_bdev_name, pm_path, lb_size):
+ """Construct a compress virtual block device.
+
+ Args:
+ base_bdev_name: name of the underlying base bdev
+ pm_path: path to persistent memory
+ lb_size: logical block size for the compressed vol in bytes. Must be 4K or 512.
+
+ Returns:
+ Name of created virtual block device.
+ """
+ params = {'base_bdev_name': base_bdev_name, 'pm_path': pm_path, 'lb_size': lb_size}
+
+ return client.call('bdev_compress_create', params)
+
+
+@deprecated_alias('delete_compress_bdev')
+def bdev_compress_delete(client, name):
+ """Delete compress virtual block device.
+
+ Args:
+ name: name of compress vbdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_compress_delete', params)
+
+
+@deprecated_alias('set_compress_pmd')
+def compress_set_pmd(client, pmd):
+ """Set pmd options for the bdev compress.
+
+ Args:
+ pmd: 0 = auto-select, 1 = QAT, 2 = ISAL
+ """
+ params = {'pmd': pmd}
+
+ return client.call('compress_set_pmd', params)
+
+
+def bdev_compress_get_orphans(client, name=None):
+ """Get a list of comp bdevs that do not have a pmem file (aka orphaned).
+
+ Args:
+ name: comp bdev name to query (optional; if omitted, query all comp bdevs)
+
+ Returns:
+ List of comp bdev names.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('bdev_compress_get_orphans', params)
+
+
+@deprecated_alias('construct_crypto_bdev')
+def bdev_crypto_create(client, base_bdev_name, name, crypto_pmd, key, cipher=None, key2=None):
+ """Construct a crypto virtual block device.
+
+ Args:
+ base_bdev_name: name of the underlying base bdev
+ name: name for the crypto vbdev
+ crypto_pmd: name of of the DPDK crypto driver to use
+ key: key
+
+ Returns:
+ Name of created virtual block device.
+ """
+ params = {'base_bdev_name': base_bdev_name, 'name': name, 'crypto_pmd': crypto_pmd, 'key': key}
+ if cipher:
+ params['cipher'] = cipher
+ if key2:
+ params['key2'] = key2
+ return client.call('bdev_crypto_create', params)
+
+
+@deprecated_alias('delete_crypto_bdev')
+def bdev_crypto_delete(client, name):
+ """Delete crypto virtual block device.
+
+ Args:
+ name: name of crypto vbdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_crypto_delete', params)
+
+
+@deprecated_alias('construct_ocf_bdev')
+def bdev_ocf_create(client, name, mode, cache_bdev_name, core_bdev_name):
+ """Add an OCF block device
+
+ Args:
+ name: name of constructed OCF bdev
+ mode: OCF cache mode: {'wb', 'wt', 'pt', 'wa', 'wi', 'wo'}
+ cache_bdev_name: name of underlying cache bdev
+ core_bdev_name: name of underlying core bdev
+
+ Returns:
+ Name of created block device
+ """
+ params = {'name': name, 'mode': mode, 'cache_bdev_name': cache_bdev_name, 'core_bdev_name': core_bdev_name}
+
+ return client.call('bdev_ocf_create', params)
+
+
+@deprecated_alias('delete_ocf_bdev')
+def bdev_ocf_delete(client, name):
+ """Delete an OCF device
+
+ Args:
+ name: name of OCF bdev
+
+ """
+ params = {'name': name}
+
+ return client.call('bdev_ocf_delete', params)
+
+
+@deprecated_alias('get_ocf_stats')
+def bdev_ocf_get_stats(client, name):
+ """Get statistics of chosen OCF block device
+
+ Args:
+ name: name of OCF bdev
+
+ Returns:
+ Statistics as json object
+ """
+ params = {'name': name}
+
+ return client.call('bdev_ocf_get_stats', params)
+
+
+@deprecated_alias('get_ocf_stats')
+def bdev_ocf_get_bdevs(client, name=None):
+ """Get list of OCF devices including unregistered ones
+
+ Args:
+ name: name of OCF vbdev or name of cache device or name of core device (optional)
+
+ Returns:
+ Array of OCF devices with their current status
+ """
+ params = None
+ if name:
+ params = {'name': name}
+ return client.call('bdev_ocf_get_bdevs', params)
+
+
+@deprecated_alias('construct_malloc_bdev')
+def bdev_malloc_create(client, num_blocks, block_size, name=None, uuid=None):
+ """Construct a malloc block device.
+
+ Args:
+ num_blocks: size of block device in blocks
+ block_size: block size of device; must be a power of 2 and at least 512
+ name: name of block device (optional)
+ uuid: UUID of block device (optional)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {'num_blocks': num_blocks, 'block_size': block_size}
+ if name:
+ params['name'] = name
+ if uuid:
+ params['uuid'] = uuid
+ return client.call('bdev_malloc_create', params)
+
+
+@deprecated_alias('delete_malloc_bdev')
+def bdev_malloc_delete(client, name):
+ """Delete malloc block device.
+
+ Args:
+ bdev_name: name of malloc bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_malloc_delete', params)
+
+
+@deprecated_alias('construct_null_bdev')
+def bdev_null_create(client, num_blocks, block_size, name, uuid=None, md_size=None,
+ dif_type=None, dif_is_head_of_md=None):
+ """Construct a null block device.
+
+ Args:
+ num_blocks: size of block device in blocks
+ block_size: block size of device; data part size must be a power of 2 and at least 512
+ name: name of block device
+ uuid: UUID of block device (optional)
+ md_size: metadata size of device (optional)
+ dif_type: protection information type (optional)
+ dif_is_head_of_md: protection information is in the first 8 bytes of metadata (optional)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {'name': name, 'num_blocks': num_blocks,
+ 'block_size': block_size}
+ if uuid:
+ params['uuid'] = uuid
+ if md_size:
+ params['md_size'] = md_size
+ if dif_type:
+ params['dif_type'] = dif_type
+ if dif_is_head_of_md:
+ params['dif_is_head_of_md'] = dif_is_head_of_md
+ return client.call('bdev_null_create', params)
+
+
+@deprecated_alias('delete_null_bdev')
+def bdev_null_delete(client, name):
+ """Remove null bdev from the system.
+
+ Args:
+ name: name of null bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_null_delete', params)
+
+
+@deprecated_alias('get_raid_bdevs')
+def bdev_raid_get_bdevs(client, category):
+ """Get list of raid bdevs based on category
+
+ Args:
+ category: any one of all or online or configuring or offline
+
+ Returns:
+ List of raid bdev names
+ """
+ params = {'category': category}
+ return client.call('bdev_raid_get_bdevs', params)
+
+
+@deprecated_alias('construct_raid_bdev')
+def bdev_raid_create(client, name, raid_level, base_bdevs, strip_size=None, strip_size_kb=None):
+ """Create raid bdev. Either strip size arg will work but one is required.
+
+ Args:
+ name: user defined raid bdev name
+ strip_size (deprecated): strip size of raid bdev in KB, supported values like 8, 16, 32, 64, 128, 256, etc
+ strip_size_kb: strip size of raid bdev in KB, supported values like 8, 16, 32, 64, 128, 256, etc
+ raid_level: raid level of raid bdev, supported values 0
+ base_bdevs: Space separated names of Nvme bdevs in double quotes, like "Nvme0n1 Nvme1n1 Nvme2n1"
+
+ Returns:
+ None
+ """
+ params = {'name': name, 'raid_level': raid_level, 'base_bdevs': base_bdevs}
+
+ if strip_size:
+ params['strip_size'] = strip_size
+
+ if strip_size_kb:
+ params['strip_size_kb'] = strip_size_kb
+
+ return client.call('bdev_raid_create', params)
+
+
+@deprecated_alias('destroy_raid_bdev')
+def bdev_raid_delete(client, name):
+ """Delete raid bdev
+
+ Args:
+ name: raid bdev name
+
+ Returns:
+ None
+ """
+ params = {'name': name}
+ return client.call('bdev_raid_delete', params)
+
+
+@deprecated_alias('construct_aio_bdev')
+def bdev_aio_create(client, filename, name, block_size=None):
+ """Construct a Linux AIO block device.
+
+ Args:
+ filename: path to device or file (ex: /dev/sda)
+ name: name of block device
+ block_size: block size of device (optional; autodetected if omitted)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {'name': name,
+ 'filename': filename}
+
+ if block_size:
+ params['block_size'] = block_size
+
+ return client.call('bdev_aio_create', params)
+
+
+@deprecated_alias('delete_aio_bdev')
+def bdev_aio_delete(client, name):
+ """Remove aio bdev from the system.
+
+ Args:
+ bdev_name: name of aio bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_aio_delete', params)
+
+
+def bdev_uring_create(client, filename, name, block_size=None):
+ """Create a bdev with Linux io_uring backend.
+
+ Args:
+ filename: path to device or file (ex: /dev/nvme0n1)
+ name: name of bdev
+ block_size: block size of device (optional; autodetected if omitted)
+
+ Returns:
+ Name of created bdev.
+ """
+ params = {'name': name,
+ 'filename': filename}
+
+ if block_size:
+ params['block_size'] = block_size
+
+ return client.call('bdev_uring_create', params)
+
+
+def bdev_uring_delete(client, name):
+ """Delete a uring bdev.
+
+ Args:
+ name: name of uring bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_uring_delete', params)
+
+
+@deprecated_alias('set_bdev_nvme_options')
+def bdev_nvme_set_options(client, action_on_timeout=None, timeout_us=None, retry_count=None,
+ arbitration_burst=None, low_priority_weight=None,
+ medium_priority_weight=None, high_priority_weight=None,
+ nvme_adminq_poll_period_us=None, nvme_ioq_poll_period_us=None, io_queue_requests=None,
+ delay_cmd_submit=None):
+ """Set options for the bdev nvme. This is startup command.
+
+ Args:
+ action_on_timeout: action to take on command time out. Valid values are: none, reset, abort (optional)
+ timeout_us: Timeout for each command, in microseconds. If 0, don't track timeouts (optional)
+ retry_count: The number of attempts per I/O when an I/O fails (optional)
+ arbitration_burst: The value is expressed as a power of two (optional)
+ low_prioity_weight: The number of commands that may be executed from the low priority queue at one time (optional)
+ medium_prioity_weight: The number of commands that may be executed from the medium priority queue at one time (optional)
+ high_prioity_weight: The number of commands that may be executed from the high priority queue at one time (optional)
+ nvme_adminq_poll_period_us: How often the admin queue is polled for asynchronous events in microseconds (optional)
+ nvme_ioq_poll_period_us: How often to poll I/O queues for completions in microseconds (optional)
+ io_queue_requests: The number of requests allocated for each NVMe I/O queue. Default: 512 (optional)
+ delay_cmd_submit: Enable delayed NVMe command submission to allow batching of multiple commands (optional)
+ """
+ params = {}
+
+ if action_on_timeout:
+ params['action_on_timeout'] = action_on_timeout
+
+ if timeout_us:
+ params['timeout_us'] = timeout_us
+
+ if retry_count:
+ params['retry_count'] = retry_count
+
+ if arbitration_burst:
+ params['arbitration_burst'] = arbitration_burst
+
+ if low_priority_weight:
+ params['low_priority_weight'] = low_priority_weight
+
+ if medium_priority_weight:
+ params['medium_priority_weight'] = medium_priority_weight
+
+ if high_priority_weight:
+ params['high_priority_weight'] = high_priority_weight
+
+ if nvme_adminq_poll_period_us:
+ params['nvme_adminq_poll_period_us'] = nvme_adminq_poll_period_us
+
+ if nvme_ioq_poll_period_us:
+ params['nvme_ioq_poll_period_us'] = nvme_ioq_poll_period_us
+
+ if io_queue_requests:
+ params['io_queue_requests'] = io_queue_requests
+
+ if delay_cmd_submit is not None:
+ params['delay_cmd_submit'] = delay_cmd_submit
+
+ return client.call('bdev_nvme_set_options', params)
+
+
+@deprecated_alias('set_bdev_nvme_hotplug')
+def bdev_nvme_set_hotplug(client, enable, period_us=None):
+ """Set options for the bdev nvme. This is startup command.
+
+ Args:
+ enable: True to enable hotplug, False to disable.
+ period_us: how often the hotplug is processed for insert and remove events. Set 0 to reset to default. (optional)
+ """
+ params = {'enable': enable}
+
+ if period_us:
+ params['period_us'] = period_us
+
+ return client.call('bdev_nvme_set_hotplug', params)
+
+
+@deprecated_alias('construct_nvme_bdev')
+def bdev_nvme_attach_controller(client, name, trtype, traddr, adrfam=None, trsvcid=None,
+ priority=None, subnqn=None, hostnqn=None, hostaddr=None,
+ hostsvcid=None, prchk_reftag=None, prchk_guard=None):
+ """Construct block device for each NVMe namespace in the attached controller.
+
+ Args:
+ name: bdev name prefix; "n" + namespace ID will be appended to create unique names
+ trtype: transport type ("PCIe", "RDMA")
+ traddr: transport address (PCI BDF or IP address)
+ adrfam: address family ("IPv4", "IPv6", "IB", or "FC") (optional for PCIe)
+ trsvcid: transport service ID (port number for IP-based addresses; optional for PCIe)
+ priority: transport connection priority (Sock priority for TCP-based transports; optional)
+ subnqn: subsystem NQN to connect to (optional)
+ hostnqn: NQN to connect from (optional)
+ hostaddr: host transport address (IP address for IP-based transports, NULL for PCIe or FC; optional)
+ hostsvcid: host transport service ID (port number for IP-based transports, NULL for PCIe or FC; optional)
+ prchk_reftag: Enable checking of PI reference tag for I/O processing (optional)
+ prchk_guard: Enable checking of PI guard for I/O processing (optional)
+
+ Returns:
+ Names of created block devices.
+ """
+ params = {'name': name,
+ 'trtype': trtype,
+ 'traddr': traddr}
+
+ if hostnqn:
+ params['hostnqn'] = hostnqn
+
+ if hostaddr:
+ params['hostaddr'] = hostaddr
+
+ if hostsvcid:
+ params['hostsvcid'] = hostsvcid
+
+ if adrfam:
+ params['adrfam'] = adrfam
+
+ if trsvcid:
+ params['trsvcid'] = trsvcid
+
+ if priority:
+ params['priority'] = priority
+
+ if subnqn:
+ params['subnqn'] = subnqn
+
+ if prchk_reftag:
+ params['prchk_reftag'] = prchk_reftag
+
+ if prchk_guard:
+ params['prchk_guard'] = prchk_guard
+
+ return client.call('bdev_nvme_attach_controller', params)
+
+
+@deprecated_alias('delete_nvme_controller')
+def bdev_nvme_detach_controller(client, name):
+ """Detach NVMe controller and delete any associated bdevs.
+
+ Args:
+ name: controller name
+ """
+
+ params = {'name': name}
+ return client.call('bdev_nvme_detach_controller', params)
+
+
+def bdev_nvme_cuse_register(client, name):
+ """Register CUSE devices on NVMe controller.
+
+ Args:
+ name: Name of the operating NVMe controller
+ """
+ params = {'name': name}
+
+ return client.call('bdev_nvme_cuse_register', params)
+
+
+def bdev_nvme_cuse_unregister(client, name):
+ """Unregister CUSE devices on NVMe controller.
+
+ Args:
+ name: Name of the operating NVMe controller
+ """
+ params = {'name': name}
+
+ return client.call('bdev_nvme_cuse_unregister', params)
+
+
+def bdev_zone_block_create(client, name, base_bdev, zone_capacity, optimal_open_zones):
+ """Creates a virtual zone device on top of existing non-zoned bdev.
+
+ Args:
+ name: Zone device name
+ base_bdev: Base Nvme bdev name
+ zone_capacity: Surfaced zone capacity in blocks
+ optimal_open_zones: Number of zones required to reach optimal write speed (optional, default: 1)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {'name': name,
+ 'base_bdev': base_bdev,
+ 'zone_capacity': zone_capacity,
+ 'optimal_open_zones': optimal_open_zones}
+
+ return client.call('bdev_zone_block_create', params)
+
+
+def bdev_zone_block_delete(client, name):
+ """Remove block zone bdev from the system.
+
+ Args:
+ name: name of block zone bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_zone_block_delete', params)
+
+
+@deprecated_alias('construct_rbd_bdev')
+def bdev_rbd_create(client, pool_name, rbd_name, block_size, name=None, user=None, config=None):
+ """Create a Ceph RBD block device.
+
+ Args:
+ pool_name: Ceph RBD pool name
+ rbd_name: Ceph RBD image name
+ block_size: block size of RBD volume
+ name: name of block device (optional)
+ user: Ceph user name (optional)
+ config: map of config keys to values (optional)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'pool_name': pool_name,
+ 'rbd_name': rbd_name,
+ 'block_size': block_size,
+ }
+
+ if name:
+ params['name'] = name
+ if user is not None:
+ params['user_id'] = user
+ if config is not None:
+ params['config'] = config
+
+ return client.call('bdev_rbd_create', params)
+
+
+@deprecated_alias('delete_rbd_bdev')
+def bdev_rbd_delete(client, name):
+ """Remove rbd bdev from the system.
+
+ Args:
+ name: name of rbd bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_rbd_delete', params)
+
+
+def bdev_rbd_resize(client, name, new_size):
+ """Resize rbd bdev in the system.
+
+ Args:
+ name: name of rbd bdev to resize
+ new_size: new bdev size of resize operation. The unit is MiB
+ """
+ params = {
+ 'name': name,
+ 'new_size': new_size,
+ }
+ return client.call('bdev_rbd_resize', params)
+
+
+@deprecated_alias('construct_error_bdev')
+def bdev_error_create(client, base_name):
+ """Construct an error injection block device.
+
+ Args:
+ base_name: base bdev name
+ """
+ params = {'base_name': base_name}
+ return client.call('bdev_error_create', params)
+
+
+def bdev_delay_create(client, base_bdev_name, name, avg_read_latency, p99_read_latency, avg_write_latency, p99_write_latency):
+ """Construct a delay block device.
+
+ Args:
+ base_bdev_name: name of the existing bdev
+ name: name of block device
+ avg_read_latency: complete 99% of read ops with this delay
+ p99_read_latency: complete 1% of read ops with this delay
+ avg_write_latency: complete 99% of write ops with this delay
+ p99_write_latency: complete 1% of write ops with this delay
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'base_bdev_name': base_bdev_name,
+ 'name': name,
+ 'avg_read_latency': avg_read_latency,
+ 'p99_read_latency': p99_read_latency,
+ 'avg_write_latency': avg_write_latency,
+ 'p99_write_latency': p99_write_latency,
+ }
+ return client.call('bdev_delay_create', params)
+
+
+def bdev_delay_delete(client, name):
+ """Remove delay bdev from the system.
+
+ Args:
+ name: name of delay bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_delay_delete', params)
+
+
+def bdev_delay_update_latency(client, delay_bdev_name, latency_type, latency_us):
+ """Update the latency value for a delay block device
+
+ Args:
+ delay_bdev_name: name of the delay bdev
+ latency_type: 'one of: avg_read, avg_write, p99_read, p99_write. No other values accepted.'
+ latency_us: 'new latency value.'
+
+ Returns:
+ True if successful, or a specific error otherwise.
+ """
+ params = {
+ 'delay_bdev_name': delay_bdev_name,
+ 'latency_type': latency_type,
+ 'latency_us': latency_us,
+ }
+ return client.call('bdev_delay_update_latency', params)
+
+
+@deprecated_alias('delete_error_bdev')
+def bdev_error_delete(client, name):
+ """Remove error bdev from the system.
+
+ Args:
+ bdev_name: name of error bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_error_delete', params)
+
+
+@deprecated_alias('construct_iscsi_bdev')
+def bdev_iscsi_create(client, name, url, initiator_iqn):
+ """Construct an iSCSI block device.
+
+ Args:
+ name: name of block device
+ url: iSCSI URL
+ initiator_iqn: IQN name to be used by initiator
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'name': name,
+ 'url': url,
+ 'initiator_iqn': initiator_iqn,
+ }
+ return client.call('bdev_iscsi_create', params)
+
+
+@deprecated_alias('delete_iscsi_bdev')
+def bdev_iscsi_delete(client, name):
+ """Remove iSCSI bdev from the system.
+
+ Args:
+ bdev_name: name of iSCSI bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_iscsi_delete', params)
+
+
+@deprecated_alias('construct_pmem_bdev')
+def bdev_pmem_create(client, pmem_file, name):
+ """Construct a libpmemblk block device.
+
+ Args:
+ pmem_file: path to pmemblk pool file
+ name: name of block device
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'pmem_file': pmem_file,
+ 'name': name
+ }
+ return client.call('bdev_pmem_create', params)
+
+
+@deprecated_alias('delete_pmem_bdev')
+def bdev_pmem_delete(client, name):
+ """Remove pmem bdev from the system.
+
+ Args:
+ name: name of pmem bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_pmem_delete', params)
+
+
+@deprecated_alias('construct_passthru_bdev')
+def bdev_passthru_create(client, base_bdev_name, name):
+ """Construct a pass-through block device.
+
+ Args:
+ base_bdev_name: name of the existing bdev
+ name: name of block device
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'base_bdev_name': base_bdev_name,
+ 'name': name,
+ }
+ return client.call('bdev_passthru_create', params)
+
+
+@deprecated_alias('delete_passthru_bdev')
+def bdev_passthru_delete(client, name):
+ """Remove pass through bdev from the system.
+
+ Args:
+ name: name of pass through bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_passthru_delete', params)
+
+
+def bdev_opal_create(client, nvme_ctrlr_name, nsid, locking_range_id, range_start, range_length, password):
+ """Create opal virtual block devices from a base nvme bdev.
+
+ Args:
+ nvme_ctrlr_name: name of the nvme ctrlr
+ nsid: namespace ID of nvme ctrlr
+ locking_range_id: locking range ID corresponding to this virtual bdev
+ range_start: start address of this locking range
+ range_length: length of this locking range
+ password: admin password of base nvme bdev
+
+ Returns:
+ Name of the new created block devices.
+ """
+ params = {
+ 'nvme_ctrlr_name': nvme_ctrlr_name,
+ 'nsid': nsid,
+ 'locking_range_id': locking_range_id,
+ 'range_start': range_start,
+ 'range_length': range_length,
+ 'password': password,
+ }
+
+ return client.call('bdev_opal_create', params)
+
+
+def bdev_opal_get_info(client, bdev_name, password):
+ """Get opal locking range info.
+
+ Args:
+ bdev_name: name of opal vbdev to get info
+ password: admin password
+
+ Returns:
+ Locking range info.
+ """
+ params = {
+ 'bdev_name': bdev_name,
+ 'password': password,
+ }
+
+ return client.call('bdev_opal_get_info', params)
+
+
+def bdev_opal_delete(client, bdev_name, password):
+ """Delete opal virtual bdev from the system.
+
+ Args:
+ bdev_name: name of opal vbdev to delete
+ password: admin password of base nvme bdev
+ """
+ params = {
+ 'bdev_name': bdev_name,
+ 'password': password,
+ }
+
+ return client.call('bdev_opal_delete', params)
+
+
+def bdev_opal_new_user(client, bdev_name, admin_password, user_id, user_password):
+ """Add a user to opal bdev who can set lock state for this bdev.
+
+ Args:
+ bdev_name: name of opal vbdev
+ admin_password: admin password
+ user_id: ID of the user who will be added to this opal bdev
+ user_password: password set for this user
+ """
+ params = {
+ 'bdev_name': bdev_name,
+ 'admin_password': admin_password,
+ 'user_id': user_id,
+ 'user_password': user_password,
+ }
+
+ return client.call('bdev_opal_new_user', params)
+
+
+def bdev_opal_set_lock_state(client, bdev_name, user_id, password, lock_state):
+ """set lock state for an opal bdev.
+
+ Args:
+ bdev_name: name of opal vbdev
+ user_id: ID of the user who will set lock state
+ password: password of the user
+ lock_state: lock state to set
+ """
+ params = {
+ 'bdev_name': bdev_name,
+ 'user_id': user_id,
+ 'password': password,
+ 'lock_state': lock_state,
+ }
+
+ return client.call('bdev_opal_set_lock_state', params)
+
+
+@deprecated_alias('construct_split_vbdev')
+def bdev_split_create(client, base_bdev, split_count, split_size_mb=None):
+ """Create split block devices from a base bdev.
+
+ Args:
+ base_bdev: name of bdev to split
+ split_count: number of split bdevs to create
+ split_size_mb: size of each split volume in MiB (optional)
+
+ Returns:
+ List of created block devices.
+ """
+ params = {
+ 'base_bdev': base_bdev,
+ 'split_count': split_count,
+ }
+ if split_size_mb:
+ params['split_size_mb'] = split_size_mb
+
+ return client.call('bdev_split_create', params)
+
+
+@deprecated_alias('destruct_split_vbdev')
+def bdev_split_delete(client, base_bdev):
+ """Delete split block devices.
+
+ Args:
+ base_bdev: name of previously split bdev
+ """
+ params = {
+ 'base_bdev': base_bdev,
+ }
+
+ return client.call('bdev_split_delete', params)
+
+
+@deprecated_alias('construct_ftl_bdev')
+def bdev_ftl_create(client, name, base_bdev, **kwargs):
+ """Construct FTL bdev
+
+ Args:
+ name: name of the bdev
+ base_bdev: name of the base bdev
+ kwargs: optional parameters
+ """
+ params = {'name': name,
+ 'base_bdev': base_bdev}
+ for key, value in kwargs.items():
+ if value is not None:
+ params[key] = value
+
+ return client.call('bdev_ftl_create', params)
+
+
+@deprecated_alias('delete_ftl_bdev')
+def bdev_ftl_delete(client, name):
+ """Delete FTL bdev
+
+ Args:
+ name: name of the bdev
+ """
+ params = {'name': name}
+
+ return client.call('bdev_ftl_delete', params)
+
+
+def bdev_ocssd_create(client, ctrlr_name, bdev_name, nsid=None, range=None):
+ """Creates Open Channel zoned bdev on specified Open Channel controller
+
+ Args:
+ ctrlr_name: name of the OC NVMe controller
+ bdev_name: name of the bdev to create
+ nsid: namespace ID
+ range: parallel unit range
+ """
+ params = {'ctrlr_name': ctrlr_name,
+ 'bdev_name': bdev_name}
+
+ if nsid is not None:
+ params['nsid'] = nsid
+
+ if range is not None:
+ params['range'] = range
+
+ return client.call('bdev_ocssd_create', params)
+
+
+def bdev_ocssd_delete(client, name):
+ """Deletes Open Channel bdev
+
+ Args:
+ name: name of the bdev
+ """
+ params = {'name': name}
+
+ return client.call('bdev_ocssd_delete', params)
+
+
+@deprecated_alias('get_bdevs')
+def bdev_get_bdevs(client, name=None):
+ """Get information about block devices.
+
+ Args:
+ name: bdev name to query (optional; if omitted, query all bdevs)
+
+ Returns:
+ List of bdev information objects.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('bdev_get_bdevs', params)
+
+
+@deprecated_alias('get_bdevs_iostat')
+def bdev_get_iostat(client, name=None):
+ """Get I/O statistics for block devices.
+
+ Args:
+ name: bdev name to query (optional; if omitted, query all bdevs)
+
+ Returns:
+ I/O statistics for the requested block devices.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('bdev_get_iostat', params)
+
+
+@deprecated_alias('enable_bdev_histogram')
+def bdev_enable_histogram(client, name, enable):
+ """Control whether histogram is enabled for specified bdev.
+
+ Args:
+ bdev_name: name of bdev
+ """
+ params = {'name': name, "enable": enable}
+ return client.call('bdev_enable_histogram', params)
+
+
+@deprecated_alias('get_bdev_histogram')
+def bdev_get_histogram(client, name):
+ """Get histogram for specified bdev.
+
+ Args:
+ bdev_name: name of bdev
+ """
+ params = {'name': name}
+ return client.call('bdev_get_histogram', params)
+
+
+@deprecated_alias('bdev_inject_error')
+def bdev_error_inject_error(client, name, io_type, error_type, num=1):
+ """Inject an error via an error bdev.
+
+ Args:
+ name: name of error bdev
+ io_type: one of "clear", "read", "write", "unmap", "flush", or "all"
+ error_type: one of "failure" or "pending"
+ num: number of commands to fail
+ """
+ params = {
+ 'name': name,
+ 'io_type': io_type,
+ 'error_type': error_type,
+ 'num': num,
+ }
+
+ return client.call('bdev_error_inject_error', params)
+
+
+@deprecated_alias('set_bdev_qd_sampling_period')
+def bdev_set_qd_sampling_period(client, name, period):
+ """Enable queue depth tracking on a specified bdev.
+
+ Args:
+ name: name of a bdev on which to track queue depth.
+ period: period (in microseconds) at which to update the queue depth reading. If set to 0, polling will be disabled.
+ """
+
+ params = {}
+ params['name'] = name
+ params['period'] = period
+ return client.call('bdev_set_qd_sampling_period', params)
+
+
+@deprecated_alias('set_bdev_qos_limit')
+def bdev_set_qos_limit(
+ client,
+ name,
+ rw_ios_per_sec=None,
+ rw_mbytes_per_sec=None,
+ r_mbytes_per_sec=None,
+ w_mbytes_per_sec=None):
+ """Set QoS rate limit on a block device.
+
+ Args:
+ name: name of block device
+ rw_ios_per_sec: R/W IOs per second limit (>=10000, example: 20000). 0 means unlimited.
+ rw_mbytes_per_sec: R/W megabytes per second limit (>=10, example: 100). 0 means unlimited.
+ r_mbytes_per_sec: Read megabytes per second limit (>=10, example: 100). 0 means unlimited.
+ w_mbytes_per_sec: Write megabytes per second limit (>=10, example: 100). 0 means unlimited.
+ """
+ params = {}
+ params['name'] = name
+ if rw_ios_per_sec is not None:
+ params['rw_ios_per_sec'] = rw_ios_per_sec
+ if rw_mbytes_per_sec is not None:
+ params['rw_mbytes_per_sec'] = rw_mbytes_per_sec
+ if r_mbytes_per_sec is not None:
+ params['r_mbytes_per_sec'] = r_mbytes_per_sec
+ if w_mbytes_per_sec is not None:
+ params['w_mbytes_per_sec'] = w_mbytes_per_sec
+ return client.call('bdev_set_qos_limit', params)
+
+
+@deprecated_alias('apply_firmware')
+def bdev_nvme_apply_firmware(client, bdev_name, filename):
+ """Download and commit firmware to NVMe device.
+
+ Args:
+ bdev_name: name of NVMe block device
+ filename: filename of the firmware to download
+ """
+ params = {
+ 'filename': filename,
+ 'bdev_name': bdev_name,
+ }
+ return client.call('bdev_nvme_apply_firmware', params)
diff --git a/src/spdk/scripts/rpc/blobfs.py b/src/spdk/scripts/rpc/blobfs.py
new file mode 100644
index 000000000..a064afecf
--- /dev/null
+++ b/src/spdk/scripts/rpc/blobfs.py
@@ -0,0 +1,57 @@
+def blobfs_detect(client, bdev_name):
+ """Detect whether a blobfs exists on bdev.
+
+ Args:
+ bdev_name: block device name to detect blobfs
+
+ Returns:
+ True if a blobfs exists on the bdev; False otherwise.
+ """
+ params = {
+ 'bdev_name': bdev_name
+ }
+ return client.call('blobfs_detect', params)
+
+
+def blobfs_create(client, bdev_name, cluster_sz=None):
+ """Build blobfs on bdev.
+
+ Args:
+ bdev_name: block device name to build blobfs
+ cluster_sz: Size of cluster in bytes (Optional). Must be multiple of 4KB page size. Default and minimal value is 1M.
+ """
+ params = {
+ 'bdev_name': bdev_name
+ }
+ if cluster_sz:
+ params['cluster_sz'] = cluster_sz
+ return client.call('blobfs_create', params)
+
+
+def blobfs_mount(client, bdev_name, mountpoint):
+ """Mount blobfs on bdev by FUSE.
+
+ Args:
+ bdev_name: block device name where the blobfs is
+ mountpoint: Mountpoint path in host to mount blobfs
+ """
+ params = {
+ 'bdev_name': bdev_name,
+ 'mountpoint': mountpoint
+ }
+ return client.call('blobfs_mount', params)
+
+
+def blobfs_set_cache_size(client, size_in_mb):
+ """Set cache size for the blobstore filesystem.
+
+ Args:
+ size_in_mb: Cache size in megabytes
+
+ Returns:
+ True if cache size is set successfully; False if failed to set.
+ """
+ params = {
+ 'size_in_mb': size_in_mb
+ }
+ return client.call('blobfs_set_cache_size', params)
diff --git a/src/spdk/scripts/rpc/client.py b/src/spdk/scripts/rpc/client.py
new file mode 100644
index 000000000..52ba8d216
--- /dev/null
+++ b/src/spdk/scripts/rpc/client.py
@@ -0,0 +1,183 @@
+import json
+import socket
+import time
+import os
+import logging
+import copy
+
+
+def print_dict(d):
+ print(json.dumps(d, indent=2))
+
+
+def print_json(s):
+ print(json.dumps(s, indent=2).strip('"'))
+
+
+class JSONRPCException(Exception):
+ def __init__(self, message):
+ self.message = message
+
+
+class JSONRPCClient(object):
+ def __init__(self, addr, port=None, timeout=60.0, **kwargs):
+ self.sock = None
+ ch = logging.StreamHandler()
+ ch.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
+ ch.setLevel(logging.DEBUG)
+ self._logger = logging.getLogger("JSONRPCClient(%s)" % addr)
+ self._logger.addHandler(ch)
+ self.log_set_level(kwargs.get('log_level', logging.ERROR))
+ connect_retries = kwargs.get('conn_retries', 0)
+
+ self.timeout = timeout
+ self._request_id = 0
+ self._recv_buf = ""
+ self._reqs = []
+
+ for i in range(connect_retries):
+ try:
+ self._connect(addr, port)
+ return
+ except Exception as e:
+ # ignore and retry in 200ms
+ time.sleep(0.2)
+
+ # try one last time without try/except
+ self._connect(addr, port)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exception_type, exception_value, traceback):
+ self.close()
+
+ def _connect(self, addr, port):
+ try:
+ if os.path.exists(addr):
+ self._logger.debug("Trying to connect to UNIX socket: %s", addr)
+ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.sock.connect(addr)
+ elif port:
+ if ':' in addr:
+ self._logger.debug("Trying to connect to IPv6 address addr:%s, port:%i", addr, port)
+ for res in socket.getaddrinfo(addr, port, socket.AF_INET6, socket.SOCK_STREAM, socket.SOL_TCP):
+ af, socktype, proto, canonname, sa = res
+ self.sock = socket.socket(af, socktype, proto)
+ self.sock.connect(sa)
+ else:
+ self._logger.debug("Trying to connect to IPv4 address addr:%s, port:%i'", addr, port)
+ self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.sock.connect((addr, port))
+ else:
+ raise socket.error("Unix socket '%s' does not exist" % addr)
+ except socket.error as ex:
+ raise JSONRPCException("Error while connecting to %s\n"
+ "Error details: %s" % (addr, ex))
+
+ def get_logger(self):
+ return self._logger
+
+ """Set logging level
+
+ Args:
+ lvl: Log level to set as accepted by logger.setLevel
+ """
+ def log_set_level(self, lvl):
+ self._logger.info("Setting log level to %s", lvl)
+ self._logger.setLevel(lvl)
+ self._logger.info("Log level set to %s", lvl)
+
+ def close(self):
+ if getattr(self, "sock", None):
+ self.sock.shutdown(socket.SHUT_RDWR)
+ self.sock.close()
+ self.sock = None
+
+ def add_request(self, method, params):
+ self._request_id += 1
+ req = {
+ 'jsonrpc': '2.0',
+ 'method': method,
+ 'id': self._request_id
+ }
+
+ if params:
+ req['params'] = copy.deepcopy(params)
+
+ self._logger.debug("append request:\n%s\n", json.dumps(req))
+ self._reqs.append(req)
+ return self._request_id
+
+ def flush(self):
+ self._logger.debug("Flushing buffer")
+ # TODO: We can drop indent parameter
+ reqstr = "\n".join(json.dumps(req, indent=2) for req in self._reqs)
+ self._reqs = []
+ self._logger.info("Requests:\n%s\n", reqstr)
+ self.sock.sendall(reqstr.encode("utf-8"))
+
+ def send(self, method, params=None):
+ id = self.add_request(method, params)
+ self.flush()
+ return id
+
+ def decode_one_response(self):
+ try:
+ self._logger.debug("Trying to decode response '%s'", self._recv_buf)
+ buf = self._recv_buf.lstrip()
+ obj, idx = json.JSONDecoder().raw_decode(buf)
+ self._recv_buf = buf[idx:]
+ return obj
+ except ValueError:
+ self._logger.debug("Partial response")
+ return None
+
+ def recv(self):
+ start_time = time.process_time()
+ response = self.decode_one_response()
+ while not response:
+ try:
+ timeout = self.timeout - (time.process_time() - start_time)
+ self.sock.settimeout(timeout)
+ newdata = self.sock.recv(4096)
+ if not newdata:
+ self.sock.close()
+ self.sock = None
+ raise JSONRPCException("Connection closed with partial response:\n%s\n" % self._recv_buf)
+ self._recv_buf += newdata.decode("utf-8")
+ response = self.decode_one_response()
+ except socket.timeout:
+ break # throw exception after loop to avoid Python freaking out about nested exceptions
+ except ValueError:
+ continue # incomplete response; keep buffering
+
+ if not response:
+ raise JSONRPCException("Timeout while waiting for response:\n%s\n" % self._recv_buf)
+
+ self._logger.info("response:\n%s\n", json.dumps(response, indent=2))
+ return response
+
+ def call(self, method, params={}):
+ self._logger.debug("call('%s')" % method)
+ req_id = self.send(method, params)
+ try:
+ response = self.recv()
+ except JSONRPCException as e:
+ """ Don't expect response to kill """
+ if not self.sock and method == "spdk_kill_instance":
+ self._logger.info("Connection terminated but ignoring since method is '%s'" % method)
+ return {}
+ else:
+ raise e
+
+ if 'error' in response:
+ params["method"] = method
+ params["req_id"] = req_id
+ msg = "\n".join(["request:", "%s" % json.dumps(params, indent=2),
+ "Got JSON-RPC error response",
+ "response:",
+ json.dumps(response['error'], indent=2)])
+ raise JSONRPCException(msg)
+
+ return response['result']
diff --git a/src/spdk/scripts/rpc/env_dpdk.py b/src/spdk/scripts/rpc/env_dpdk.py
new file mode 100644
index 000000000..f2c098e52
--- /dev/null
+++ b/src/spdk/scripts/rpc/env_dpdk.py
@@ -0,0 +1,8 @@
+def env_dpdk_get_mem_stats(client):
+ """Dump the applications memory stats to a file.
+
+ Returns:
+ The path to the file where the stats are written.
+ """
+
+ return client.call('env_dpdk_get_mem_stats')
diff --git a/src/spdk/scripts/rpc/helpers.py b/src/spdk/scripts/rpc/helpers.py
new file mode 100644
index 000000000..d931fcf14
--- /dev/null
+++ b/src/spdk/scripts/rpc/helpers.py
@@ -0,0 +1,16 @@
+import sys
+
+deprecated_aliases = {}
+
+
+def deprecated_alias(old_name):
+ def wrap(f):
+ def old_f(*args, **kwargs):
+ ret = f(*args, **kwargs)
+ print("{} is deprecated, use {} instead.".format(old_name, f.__name__), file=sys.stderr)
+ return ret
+ old_f.__name__ = old_name
+ deprecated_aliases[old_name] = f.__name__
+ setattr(sys.modules[f.__module__], old_name, old_f)
+ return f
+ return wrap
diff --git a/src/spdk/scripts/rpc/idxd.py b/src/spdk/scripts/rpc/idxd.py
new file mode 100644
index 000000000..3e076c68e
--- /dev/null
+++ b/src/spdk/scripts/rpc/idxd.py
@@ -0,0 +1,8 @@
+def idxd_scan_accel_engine(client, config_number):
+ """Scan and enable IDXD accel engine.
+
+ Args:
+ config_number: Pre-defined configuration number, see docs.
+ """
+ params = {'config_number': config_number}
+ return client.call('idxd_scan_accel_engine', params)
diff --git a/src/spdk/scripts/rpc/ioat.py b/src/spdk/scripts/rpc/ioat.py
new file mode 100644
index 000000000..ae43a3c3f
--- /dev/null
+++ b/src/spdk/scripts/rpc/ioat.py
@@ -0,0 +1,17 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('ioat_scan_copy_engine')
+@deprecated_alias('scan_ioat_copy_engine')
+def ioat_scan_accel_engine(client, pci_whitelist):
+ """Scan and enable IOAT accel engine.
+
+ Args:
+ pci_whitelist: Python list of PCI addresses in
+ domain:bus:device.function format or
+ domain.bus.device.function format
+ """
+ params = {}
+ if pci_whitelist:
+ params['pci_whitelist'] = pci_whitelist
+ return client.call('ioat_scan_accel_engine', params)
diff --git a/src/spdk/scripts/rpc/iscsi.py b/src/spdk/scripts/rpc/iscsi.py
new file mode 100644
index 000000000..6d64b6185
--- /dev/null
+++ b/src/spdk/scripts/rpc/iscsi.py
@@ -0,0 +1,558 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('set_iscsi_options')
+def iscsi_set_options(
+ client,
+ auth_file=None,
+ node_base=None,
+ nop_timeout=None,
+ nop_in_interval=None,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None,
+ chap_group=None,
+ max_sessions=None,
+ max_queue_depth=None,
+ max_connections_per_session=None,
+ default_time2wait=None,
+ default_time2retain=None,
+ first_burst_length=None,
+ immediate_data=None,
+ error_recovery_level=None,
+ allow_duplicated_isid=None):
+ """Set iSCSI target options.
+
+ Args:
+ auth_file: Path to CHAP shared secret file (optional)
+ node_base: Prefix of the name of iSCSI target node (optional)
+ nop_timeout: Timeout in seconds to nop-in request to the initiator (optional)
+ nop_in_interval: Time interval in secs between nop-in requests by the target (optional)
+ disable_chap: CHAP for discovery session should be disabled (optional)
+ require_chap: CHAP for discovery session should be required
+ mutual_chap: CHAP for discovery session should be mutual
+ chap_group: Authentication group ID for discovery session
+ max_sessions: Maximum number of sessions in the host
+ max_queue_depth: Maximum number of outstanding I/Os per queue
+ max_connections_per_session: Negotiated parameter, MaxConnections
+ default_time2wait: Negotiated parameter, DefaultTime2Wait
+ default_time2retain: Negotiated parameter, DefaultTime2Retain
+ first_burst_length: Negotiated parameter, FirstBurstLength
+ immediate_data: Negotiated parameter, ImmediateData
+ error_recovery_level: Negotiated parameter, ErrorRecoveryLevel
+ allow_duplicated_isid: Allow duplicated initiator session ID
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ if auth_file:
+ params['auth_file'] = auth_file
+ if node_base:
+ params['node_base'] = node_base
+ if nop_timeout:
+ params['nop_timeout'] = nop_timeout
+ if nop_in_interval:
+ params['nop_in_interval'] = nop_in_interval
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ if chap_group:
+ params['chap_group'] = chap_group
+ if max_sessions:
+ params['max_sessions'] = max_sessions
+ if max_queue_depth:
+ params['max_queue_depth'] = max_queue_depth
+ if max_connections_per_session:
+ params['max_connections_per_session'] = max_connections_per_session
+ if default_time2wait:
+ params['default_time2wait'] = default_time2wait
+ if default_time2retain:
+ params['default_time2retain'] = default_time2retain
+ if first_burst_length:
+ params['first_burst_length'] = first_burst_length
+ if immediate_data:
+ params['immediate_data'] = immediate_data
+ if error_recovery_level:
+ params['error_recovery_level'] = error_recovery_level
+ if allow_duplicated_isid:
+ params['allow_duplicated_isid'] = allow_duplicated_isid
+
+ return client.call('iscsi_set_options', params)
+
+
+@deprecated_alias('set_iscsi_discovery_auth')
+def iscsi_set_discovery_auth(
+ client,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None,
+ chap_group=None):
+ """Set CHAP authentication for discovery service.
+
+ Args:
+ disable_chap: CHAP for discovery session should be disabled (optional)
+ require_chap: CHAP for discovery session should be required (optional)
+ mutual_chap: CHAP for discovery session should be mutual (optional)
+ chap_group: Authentication group ID for discovery session (optional)
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ if chap_group:
+ params['chap_group'] = chap_group
+
+ return client.call('iscsi_set_discovery_auth', params)
+
+
+@deprecated_alias('get_iscsi_auth_groups')
+def iscsi_get_auth_groups(client):
+ """Display current authentication group configuration.
+
+ Returns:
+ List of current authentication group configuration.
+ """
+ return client.call('iscsi_get_auth_groups')
+
+
+@deprecated_alias('get_portal_groups')
+def iscsi_get_portal_groups(client):
+ """Display current portal group configuration.
+
+ Returns:
+ List of current portal group configuration.
+ """
+ return client.call('iscsi_get_portal_groups')
+
+
+@deprecated_alias('get_initiator_groups')
+def iscsi_get_initiator_groups(client):
+ """Display current initiator group configuration.
+
+ Returns:
+ List of current initiator group configuration.
+ """
+ return client.call('iscsi_get_initiator_groups')
+
+
+@deprecated_alias('get_target_nodes')
+def iscsi_get_target_nodes(client):
+ """Display target nodes.
+
+ Returns:
+ List of ISCSI target node objects.
+ """
+ return client.call('iscsi_get_target_nodes')
+
+
+@deprecated_alias('construct_target_node')
+def iscsi_create_target_node(
+ client,
+ luns,
+ pg_ig_maps,
+ name,
+ alias_name,
+ queue_depth,
+ chap_group=None,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None,
+ header_digest=None,
+ data_digest=None):
+ """Add a target node.
+
+ Args:
+ luns: List of bdev_name_id_pairs, e.g. [{"bdev_name": "Malloc1", "lun_id": 1}]
+ pg_ig_maps: List of pg_ig_mappings, e.g. [{"pg_tag": pg, "ig_tag": ig}]
+ name: Target node name (ASCII)
+ alias_name: Target node alias name (ASCII)
+ queue_depth: Desired target queue depth
+ chap_group: Authentication group ID for this target node
+ disable_chap: CHAP authentication should be disabled for this target node
+ require_chap: CHAP authentication should be required for this target node
+ mutual_chap: CHAP authentication should be mutual/bidirectional
+ header_digest: Header Digest should be required for this target node
+ data_digest: Data Digest should be required for this target node
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ 'alias_name': alias_name,
+ 'pg_ig_maps': pg_ig_maps,
+ 'luns': luns,
+ 'queue_depth': queue_depth,
+ }
+
+ if chap_group:
+ params['chap_group'] = chap_group
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ if header_digest:
+ params['header_digest'] = header_digest
+ if data_digest:
+ params['data_digest'] = data_digest
+ return client.call('iscsi_create_target_node', params)
+
+
+@deprecated_alias('target_node_add_lun')
+def iscsi_target_node_add_lun(client, name, bdev_name, lun_id=None):
+ """Add LUN to the target node.
+
+ Args:
+ name: Target node name (ASCII)
+ bdev_name: bdev name
+ lun_id: LUN ID (integer >= 0)
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ 'bdev_name': bdev_name,
+ }
+ if lun_id:
+ params['lun_id'] = lun_id
+ return client.call('iscsi_target_node_add_lun', params)
+
+
+@deprecated_alias('set_iscsi_target_node_auth')
+def iscsi_target_node_set_auth(
+ client,
+ name,
+ chap_group=None,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None):
+ """Set CHAP authentication for the target node.
+
+ Args:
+ name: Target node name (ASCII)
+ chap_group: Authentication group ID for this target node
+ disable_chap: CHAP authentication should be disabled for this target node
+ require_chap: CHAP authentication should be required for this target node
+ mutual_chap: CHAP authentication should be mutual/bidirectional
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ }
+
+ if chap_group:
+ params['chap_group'] = chap_group
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ return client.call('iscsi_target_node_set_auth', params)
+
+
+@deprecated_alias('add_iscsi_auth_group')
+def iscsi_create_auth_group(client, tag, secrets=None):
+ """Create authentication group for CHAP authentication.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0).
+ secrets: Array of secrets objects (optional).
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+
+ if secrets:
+ params['secrets'] = secrets
+ return client.call('iscsi_create_auth_group', params)
+
+
+@deprecated_alias('delete_iscsi_auth_group')
+def iscsi_delete_auth_group(client, tag):
+ """Delete an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+ return client.call('iscsi_delete_auth_group', params)
+
+
+@deprecated_alias('add_secret_to_iscsi_auth_group')
+def iscsi_auth_group_add_secret(client, tag, user, secret, muser=None, msecret=None):
+ """Add a secret to an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+ user: User name for one-way CHAP authentication
+ secret: Secret for one-way CHAP authentication
+ muser: User name for mutual CHAP authentication (optional)
+ msecret: Secret for mutual CHAP authentication (optional)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag, 'user': user, 'secret': secret}
+
+ if muser:
+ params['muser'] = muser
+ if msecret:
+ params['msecret'] = msecret
+ return client.call('iscsi_auth_group_add_secret', params)
+
+
+@deprecated_alias('delete_secret_from_iscsi_auth_group')
+def iscsi_auth_group_remove_secret(client, tag, user):
+ """Remove a secret from an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+ user: User name for one-way CHAP authentication
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag, 'user': user}
+ return client.call('iscsi_auth_group_remove_secret', params)
+
+
+@deprecated_alias('delete_pg_ig_maps')
+def iscsi_target_node_remove_pg_ig_maps(client, pg_ig_maps, name):
+ """Delete PG-IG maps from the target node.
+
+ Args:
+ pg_ig_maps: List of pg_ig_mappings, e.g. [{"pg_tag": pg, "ig_tag": ig}]
+ name: Target node alias name (ASCII)
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ 'pg_ig_maps': pg_ig_maps,
+ }
+ return client.call('iscsi_target_node_remove_pg_ig_maps', params)
+
+
+@deprecated_alias('add_pg_ig_maps')
+def iscsi_target_node_add_pg_ig_maps(client, pg_ig_maps, name):
+ """Add PG-IG maps to the target node.
+
+ Args:
+ pg_ig_maps: List of pg_ig_mappings, e.g. [{"pg_tag": pg, "ig_tag": ig}]
+ name: Target node alias name (ASCII)
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ 'pg_ig_maps': pg_ig_maps,
+ }
+ return client.call('iscsi_target_node_add_pg_ig_maps', params)
+
+
+@deprecated_alias('add_portal_group')
+def iscsi_create_portal_group(client, portals, tag):
+ """Add a portal group.
+
+ Args:
+ portals: List of portals, e.g. [{'host': ip, 'port': port}]
+ tag: Initiator group tag (unique, integer > 0)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag, 'portals': portals}
+ return client.call('iscsi_create_portal_group', params)
+
+
+@deprecated_alias('add_initiator_group')
+def iscsi_create_initiator_group(client, tag, initiators, netmasks):
+ """Add an initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses, e.g. ["127.0.0.1","192.168.200.100"]
+ netmasks: List of initiator netmasks, e.g. ["255.255.0.0","255.248.0.0"]
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag, 'initiators': initiators, 'netmasks': netmasks}
+ return client.call('iscsi_create_initiator_group', params)
+
+
+@deprecated_alias('add_initiators_to_initiator_group')
+def iscsi_initiator_group_add_initiators(
+ client,
+ tag,
+ initiators=None,
+ netmasks=None):
+ """Add initiators to an existing initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses, e.g. ["127.0.0.1","192.168.200.100"]
+ netmasks: List of initiator netmasks, e.g. ["255.255.0.0","255.248.0.0"]
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+
+ if initiators:
+ params['initiators'] = initiators
+ if netmasks:
+ params['netmasks'] = netmasks
+ return client.call('iscsi_initiator_group_add_initiators', params)
+
+
+@deprecated_alias('delete_initiators_from_initiator_group')
+def iscsi_initiator_group_remove_initiators(
+ client, tag, initiators=None, netmasks=None):
+ """Delete initiators from an existing initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses, e.g. ["127.0.0.1","192.168.200.100"]
+ netmasks: List of initiator netmasks, e.g. ["255.255.0.0","255.248.0.0"]
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+
+ if initiators:
+ params['initiators'] = initiators
+ if netmasks:
+ params['netmasks'] = netmasks
+ return client.call('iscsi_initiator_group_remove_initiators', params)
+
+
+@deprecated_alias('delete_target_node')
+def iscsi_delete_target_node(client, target_node_name):
+ """Delete a target node.
+
+ Args:
+ target_node_name: Target node name to be deleted. Example: iqn.2016-06.io.spdk:disk1.
+
+ Returns:
+ True or False
+ """
+ params = {'name': target_node_name}
+ return client.call('iscsi_delete_target_node', params)
+
+
+@deprecated_alias('delete_portal_group')
+def iscsi_delete_portal_group(client, tag):
+ """Delete a portal group.
+
+ Args:
+ tag: Portal group tag (unique, integer > 0)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+ return client.call('iscsi_delete_portal_group', params)
+
+
+@deprecated_alias('delete_initiator_group')
+def iscsi_delete_initiator_group(client, tag):
+ """Delete an initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+ return client.call('iscsi_delete_initiator_group', params)
+
+
+def iscsi_portal_group_set_auth(
+ client,
+ tag,
+ chap_group=None,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None):
+ """Set CHAP authentication for discovery sessions specific for the portal group.
+
+ Args:
+ tag: Portal group tag (unique, integer > 0)
+ chap_group: Authentication group ID for this portal group
+ disable_chap: CHAP authentication should be disabled for this portal group
+ require_chap: CHAP authentication should be required for this portal group
+ mutual_chap: CHAP authentication should be mutual/bidirectional
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'tag': tag,
+ }
+
+ if chap_group:
+ params['chap_group'] = chap_group
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ return client.call('iscsi_portal_group_set_auth', params)
+
+
+@deprecated_alias('get_iscsi_connections')
+def iscsi_get_connections(client):
+ """Display iSCSI connections.
+
+ Returns:
+ List of iSCSI connection.
+ """
+ return client.call('iscsi_get_connections')
+
+
+@deprecated_alias('get_iscsi_global_params')
+def iscsi_get_options(client):
+ """Display iSCSI global parameters.
+
+ Returns:
+ List of iSCSI global parameter.
+ """
+ return client.call('iscsi_get_options')
+
+
+@deprecated_alias('get_iscsi_devices')
+def scsi_get_devices(client):
+ """Display SCSI devices.
+
+ Returns:
+ List of SCSI device.
+ """
+ return client.call('scsi_get_devices')
diff --git a/src/spdk/scripts/rpc/log.py b/src/spdk/scripts/rpc/log.py
new file mode 100644
index 000000000..ee40bf833
--- /dev/null
+++ b/src/spdk/scripts/rpc/log.py
@@ -0,0 +1,75 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('set_log_flag')
+def log_set_flag(client, flag):
+ """Set log flag.
+
+ Args:
+ flag: log flag we want to set. (for example "nvme")
+ """
+ params = {'flag': flag}
+ return client.call('log_set_flag', params)
+
+
+@deprecated_alias('clear_log_flag')
+def log_clear_flag(client, flag):
+ """Clear log flag.
+
+ Args:
+ flag: log flag we want to clear. (for example "nvme")
+ """
+ params = {'flag': flag}
+ return client.call('log_clear_flag', params)
+
+
+@deprecated_alias('get_log_flags')
+def log_get_flags(client):
+ """Get log flags
+
+ Returns:
+ List of log flags
+ """
+ return client.call('log_get_flags')
+
+
+@deprecated_alias('set_log_level')
+def log_set_level(client, level):
+ """Set log level.
+
+ Args:
+ level: log level we want to set. (for example "DEBUG")
+ """
+ params = {'level': level}
+ return client.call('log_set_level', params)
+
+
+@deprecated_alias('get_log_level')
+def log_get_level(client):
+ """Get log level
+
+ Returns:
+ Current log level
+ """
+ return client.call('log_get_level')
+
+
+@deprecated_alias('set_log_print_level')
+def log_set_print_level(client, level):
+ """Set log print level.
+
+ Args:
+ level: log print level we want to set. (for example "DEBUG")
+ """
+ params = {'level': level}
+ return client.call('log_set_print_level', params)
+
+
+@deprecated_alias('get_log_print_level')
+def log_get_print_level(client):
+ """Get log print level
+
+ Returns:
+ Current log print level
+ """
+ return client.call('log_get_print_level')
diff --git a/src/spdk/scripts/rpc/lvol.py b/src/spdk/scripts/rpc/lvol.py
new file mode 100644
index 000000000..1c31f5eda
--- /dev/null
+++ b/src/spdk/scripts/rpc/lvol.py
@@ -0,0 +1,228 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('construct_lvol_store')
+def bdev_lvol_create_lvstore(client, bdev_name, lvs_name, cluster_sz=None, clear_method=None):
+ """Construct a logical volume store.
+
+ Args:
+ bdev_name: bdev on which to construct logical volume store
+ lvs_name: name of the logical volume store to create
+ cluster_sz: cluster size of the logical volume store in bytes (optional)
+ clear_method: Change clear method for data region. Available: none, unmap, write_zeroes (optional)
+
+ Returns:
+ UUID of created logical volume store.
+ """
+ params = {'bdev_name': bdev_name, 'lvs_name': lvs_name}
+ if cluster_sz:
+ params['cluster_sz'] = cluster_sz
+ if clear_method:
+ params['clear_method'] = clear_method
+ return client.call('bdev_lvol_create_lvstore', params)
+
+
+@deprecated_alias('rename_lvol_store')
+def bdev_lvol_rename_lvstore(client, old_name, new_name):
+ """Rename a logical volume store.
+
+ Args:
+ old_name: existing logical volume store name
+ new_name: new logical volume store name
+ """
+ params = {
+ 'old_name': old_name,
+ 'new_name': new_name
+ }
+ return client.call('bdev_lvol_rename_lvstore', params)
+
+
+@deprecated_alias('construct_lvol_bdev')
+def bdev_lvol_create(client, lvol_name, size, thin_provision=False, uuid=None, lvs_name=None, clear_method=None):
+ """Create a logical volume on a logical volume store.
+
+ Args:
+ lvol_name: name of logical volume to create
+ size: desired size of logical volume in bytes (will be rounded up to a multiple of cluster size)
+ thin_provision: True to enable thin provisioning
+ uuid: UUID of logical volume store to create logical volume on (optional)
+ lvs_name: name of logical volume store to create logical volume on (optional)
+
+ Either uuid or lvs_name must be specified, but not both.
+
+ Returns:
+ Name of created logical volume block device.
+ """
+ if (uuid and lvs_name) or (not uuid and not lvs_name):
+ raise ValueError("Either uuid or lvs_name must be specified, but not both")
+
+ params = {'lvol_name': lvol_name, 'size': size}
+ if thin_provision:
+ params['thin_provision'] = thin_provision
+ if uuid:
+ params['uuid'] = uuid
+ if lvs_name:
+ params['lvs_name'] = lvs_name
+ if clear_method:
+ params['clear_method'] = clear_method
+ return client.call('bdev_lvol_create', params)
+
+
+@deprecated_alias('snapshot_lvol_bdev')
+def bdev_lvol_snapshot(client, lvol_name, snapshot_name):
+ """Capture a snapshot of the current state of a logical volume.
+
+ Args:
+ lvol_name: logical volume to create a snapshot from
+ snapshot_name: name for the newly created snapshot
+
+ Returns:
+ Name of created logical volume snapshot.
+ """
+ params = {
+ 'lvol_name': lvol_name,
+ 'snapshot_name': snapshot_name
+ }
+ return client.call('bdev_lvol_snapshot', params)
+
+
+@deprecated_alias('clone_lvol_bdev')
+def bdev_lvol_clone(client, snapshot_name, clone_name):
+ """Create a logical volume based on a snapshot.
+
+ Args:
+ snapshot_name: snapshot to clone
+ clone_name: name of logical volume to create
+
+ Returns:
+ Name of created logical volume clone.
+ """
+ params = {
+ 'snapshot_name': snapshot_name,
+ 'clone_name': clone_name
+ }
+ return client.call('bdev_lvol_clone', params)
+
+
+@deprecated_alias('rename_lvol_bdev')
+def bdev_lvol_rename(client, old_name, new_name):
+ """Rename a logical volume.
+
+ Args:
+ old_name: existing logical volume name
+ new_name: new logical volume name
+ """
+ params = {
+ 'old_name': old_name,
+ 'new_name': new_name
+ }
+ return client.call('bdev_lvol_rename', params)
+
+
+@deprecated_alias('resize_lvol_bdev')
+def bdev_lvol_resize(client, name, size):
+ """Resize a logical volume.
+
+ Args:
+ name: name of logical volume to resize
+ size: desired size of logical volume in bytes (will be rounded up to a multiple of cluster size)
+ """
+ params = {
+ 'name': name,
+ 'size': size,
+ }
+ return client.call('bdev_lvol_resize', params)
+
+
+@deprecated_alias('set_read_only_lvol_bdev')
+def bdev_lvol_set_read_only(client, name):
+ """Mark logical volume as read only.
+
+ Args:
+ name: name of logical volume to set as read only
+ """
+ params = {
+ 'name': name,
+ }
+ return client.call('bdev_lvol_set_read_only', params)
+
+
+@deprecated_alias('destroy_lvol_bdev')
+def bdev_lvol_delete(client, name):
+ """Destroy a logical volume.
+
+ Args:
+ name: name of logical volume to destroy
+ """
+ params = {
+ 'name': name,
+ }
+ return client.call('bdev_lvol_delete', params)
+
+
+@deprecated_alias('inflate_lvol_bdev')
+def bdev_lvol_inflate(client, name):
+ """Inflate a logical volume.
+
+ Args:
+ name: name of logical volume to inflate
+ """
+ params = {
+ 'name': name,
+ }
+ return client.call('bdev_lvol_inflate', params)
+
+
+@deprecated_alias('decouple_parent_lvol_bdev')
+def bdev_lvol_decouple_parent(client, name):
+ """Decouple parent of a logical volume.
+
+ Args:
+ name: name of logical volume to decouple parent
+ """
+ params = {
+ 'name': name,
+ }
+ return client.call('bdev_lvol_decouple_parent', params)
+
+
+@deprecated_alias('destroy_lvol_store')
+def bdev_lvol_delete_lvstore(client, uuid=None, lvs_name=None):
+ """Destroy a logical volume store.
+
+ Args:
+ uuid: UUID of logical volume store to destroy (optional)
+ lvs_name: name of logical volume store to destroy (optional)
+
+ Either uuid or lvs_name must be specified, but not both.
+ """
+ if (uuid and lvs_name) or (not uuid and not lvs_name):
+ raise ValueError("Exactly one of uuid or lvs_name must be specified")
+
+ params = {}
+ if uuid:
+ params['uuid'] = uuid
+ if lvs_name:
+ params['lvs_name'] = lvs_name
+ return client.call('bdev_lvol_delete_lvstore', params)
+
+
+@deprecated_alias('get_lvol_stores')
+def bdev_lvol_get_lvstores(client, uuid=None, lvs_name=None):
+ """List logical volume stores.
+
+ Args:
+ uuid: UUID of logical volume store to retrieve information about (optional)
+ lvs_name: name of logical volume store to retrieve information about (optional)
+
+ Either uuid or lvs_name may be specified, but not both.
+ If both uuid and lvs_name are omitted, information about all logical volume stores is returned.
+ """
+ if (uuid and lvs_name):
+ raise ValueError("Exactly one of uuid or lvs_name may be specified")
+ params = {}
+ if uuid:
+ params['uuid'] = uuid
+ if lvs_name:
+ params['lvs_name'] = lvs_name
+ return client.call('bdev_lvol_get_lvstores', params)
diff --git a/src/spdk/scripts/rpc/nbd.py b/src/spdk/scripts/rpc/nbd.py
new file mode 100644
index 000000000..55e57d11e
--- /dev/null
+++ b/src/spdk/scripts/rpc/nbd.py
@@ -0,0 +1,25 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('start_nbd_disk')
+def nbd_start_disk(client, bdev_name, nbd_device):
+ params = {
+ 'bdev_name': bdev_name
+ }
+ if nbd_device:
+ params['nbd_device'] = nbd_device
+ return client.call('nbd_start_disk', params)
+
+
+@deprecated_alias('stop_nbd_disk')
+def nbd_stop_disk(client, nbd_device):
+ params = {'nbd_device': nbd_device}
+ return client.call('nbd_stop_disk', params)
+
+
+@deprecated_alias('get_nbd_disks')
+def nbd_get_disks(client, nbd_device=None):
+ params = {}
+ if nbd_device:
+ params['nbd_device'] = nbd_device
+ return client.call('nbd_get_disks', params)
diff --git a/src/spdk/scripts/rpc/net.py b/src/spdk/scripts/rpc/net.py
new file mode 100644
index 000000000..7c479bba8
--- /dev/null
+++ b/src/spdk/scripts/rpc/net.py
@@ -0,0 +1,35 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('add_ip_address')
+def net_interface_add_ip_address(client, ifc_index, ip_addr):
+ """Add IP address.
+
+ Args:
+ ifc_index: ifc index of the nic device (int)
+ ip_addr: ip address will be added
+ """
+ params = {'ifc_index': ifc_index, 'ip_address': ip_addr}
+ return client.call('net_interface_add_ip_address', params)
+
+
+@deprecated_alias('delete_ip_address')
+def net_interface_delete_ip_address(client, ifc_index, ip_addr):
+ """Delete IP address.
+
+ Args:
+ ifc_index: ifc index of the nic device (int)
+ ip_addr: ip address will be deleted
+ """
+ params = {'ifc_index': ifc_index, 'ip_address': ip_addr}
+ return client.call('net_interface_delete_ip_address', params)
+
+
+@deprecated_alias('get_interfaces')
+def net_get_interfaces(client):
+ """Display current interface list
+
+ Returns:
+ List of current interface
+ """
+ return client.call('net_get_interfaces')
diff --git a/src/spdk/scripts/rpc/notify.py b/src/spdk/scripts/rpc/notify.py
new file mode 100644
index 000000000..4907447c0
--- /dev/null
+++ b/src/spdk/scripts/rpc/notify.py
@@ -0,0 +1,30 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('get_notification_types')
+def notify_get_types(client):
+ return client.call("notify_get_types")
+
+
+@deprecated_alias('get_notifications')
+def notify_get_notifications(client,
+ id=None,
+ max=None):
+ """
+
+ Args:
+ id First ID to start fetching from
+ max Maximum number of notifications to return in response
+
+ Return:
+ Notifications array
+ """
+
+ params = {}
+ if id:
+ params['id'] = id
+
+ if max:
+ params['max'] = max
+
+ return client.call("notify_get_notifications", params)
diff --git a/src/spdk/scripts/rpc/nvme.py b/src/spdk/scripts/rpc/nvme.py
new file mode 100644
index 000000000..e9a0ba6bb
--- /dev/null
+++ b/src/spdk/scripts/rpc/nvme.py
@@ -0,0 +1,87 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('send_nvme_cmd')
+def bdev_nvme_send_cmd(client, name, cmd_type, data_direction, cmdbuf,
+ data=None, metadata=None,
+ data_len=None, metadata_len=None,
+ timeout_ms=None):
+ """Send one NVMe command
+
+ Args:
+ name: Name of the operating NVMe controller
+ cmd_type: Type of nvme cmd. Valid values are: admin, io
+ data_direction: Direction of data transfer. Valid values are: c2h, h2c
+ cmdbuf: NVMe command encoded by base64 urlsafe
+ data: Data transferring to controller from host, encoded by base64 urlsafe
+ metadata: metadata transferring to controller from host, encoded by base64 urlsafe
+ data_length: Data length required to transfer from controller to host
+ metadata_length: Metadata length required to transfer from controller to host
+ timeout-ms: Command execution timeout value, in milliseconds, if 0, don't track timeout
+
+ Returns:
+ NVMe completion queue entry, requested data and metadata, all are encoded by base64 urlsafe.
+ """
+ params = {'name': name,
+ 'cmd_type': cmd_type,
+ 'data_direction': data_direction,
+ 'cmdbuf': cmdbuf}
+
+ if data:
+ params['data'] = data
+ if metadata:
+ params['metadata'] = metadata
+ if data_len:
+ params['data_len'] = data_len
+ if metadata_len:
+ params['metadata_len'] = metadata_len
+ if timeout_ms:
+ params['timeout_ms'] = timeout_ms
+
+ return client.call('bdev_nvme_send_cmd', params)
+
+
+@deprecated_alias('get_nvme_controllers')
+def bdev_nvme_get_controllers(client, name=None):
+ """Get information about NVMe controllers.
+
+ Args:
+ name: NVMe controller name to query (optional; if omitted, query all NVMe controllers)
+
+ Returns:
+ List of NVMe controller information objects.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('bdev_nvme_get_controllers', params)
+
+
+def bdev_nvme_opal_init(client, nvme_ctrlr_name, password):
+ """Init nvme opal. Take ownership and activate
+
+ Args:
+ nvme_ctrlr_name: name of nvme ctrlr
+ password: password to init opal
+ """
+ params = {
+ 'nvme_ctrlr_name': nvme_ctrlr_name,
+ 'password': password,
+ }
+
+ return client.call('bdev_nvme_opal_init', params)
+
+
+def bdev_nvme_opal_revert(client, nvme_ctrlr_name, password):
+ """Revert opal to default factory settings. Erase all data.
+
+ Args:
+ nvme_ctrlr_name: name of nvme ctrlr
+ password: password
+ """
+ params = {
+ 'nvme_ctrlr_name': nvme_ctrlr_name,
+ 'password': password,
+ }
+
+ return client.call('bdev_nvme_opal_revert', params)
diff --git a/src/spdk/scripts/rpc/nvmf.py b/src/spdk/scripts/rpc/nvmf.py
new file mode 100644
index 000000000..7b2bc3bb6
--- /dev/null
+++ b/src/spdk/scripts/rpc/nvmf.py
@@ -0,0 +1,483 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('set_nvmf_target_max_subsystems')
+def nvmf_set_max_subsystems(client,
+ max_subsystems=None):
+ """Set NVMe-oF target options.
+
+ Args:
+ max_subsystems: Maximum number of NVMe-oF subsystems (e.g. 1024)
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ params['max_subsystems'] = max_subsystems
+ return client.call('nvmf_set_max_subsystems', params)
+
+
+@deprecated_alias('set_nvmf_target_config')
+def nvmf_set_config(client,
+ acceptor_poll_rate=None,
+ conn_sched=None,
+ passthru_identify_ctrlr=None):
+ """Set NVMe-oF target subsystem configuration.
+
+ Args:
+ acceptor_poll_rate: Acceptor poll period in microseconds (optional)
+ conn_sched: (Deprecated) Ignored
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ if acceptor_poll_rate:
+ params['acceptor_poll_rate'] = acceptor_poll_rate
+ if conn_sched:
+ print("WARNING: conn_sched is deprecated and ignored.")
+ if passthru_identify_ctrlr:
+ admin_cmd_passthru = {}
+ admin_cmd_passthru['identify_ctrlr'] = passthru_identify_ctrlr
+ params['admin_cmd_passthru'] = admin_cmd_passthru
+
+ return client.call('nvmf_set_config', params)
+
+
+def nvmf_create_target(client,
+ name,
+ max_subsystems=0):
+ """Create a new NVMe-oF Target.
+
+ Args:
+ name: Must be unique within the application
+ max_subsystems: Maximum number of NVMe-oF subsystems (e.g. 1024). default: 0 (Uses SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS).
+
+ Returns:
+ The name of the new target.
+ """
+ params = {}
+
+ params['name'] = name
+ params['max_subsystems'] = max_subsystems
+ return client.call("nvmf_create_target", params)
+
+
+def nvmf_delete_target(client,
+ name):
+ """Destroy an NVMe-oF Target.
+
+ Args:
+ name: The name of the target you wish to destroy
+
+ Returns:
+ True on success or False
+ """
+ params = {}
+
+ params['name'] = name
+ return client.call("nvmf_delete_target", params)
+
+
+def nvmf_get_targets(client):
+ """Get a list of all the NVMe-oF targets in this application
+
+ Returns:
+ An array of target names.
+ """
+
+ return client.call("nvmf_get_targets")
+
+
+def nvmf_create_transport(client,
+ trtype,
+ tgt_name=None,
+ max_queue_depth=None,
+ max_qpairs_per_ctrlr=None,
+ max_io_qpairs_per_ctrlr=None,
+ in_capsule_data_size=None,
+ max_io_size=None,
+ io_unit_size=None,
+ max_aq_depth=None,
+ num_shared_buffers=None,
+ buf_cache_size=None,
+ max_srq_depth=None,
+ no_srq=False,
+ c2h_success=True,
+ dif_insert_or_strip=None,
+ sock_priority=None,
+ acceptor_backlog=None,
+ abort_timeout_sec=None):
+ """NVMf Transport Create options.
+
+ Args:
+ trtype: Transport type (ex. RDMA)
+ max_queue_depth: Max number of outstanding I/O per queue (optional)
+ max_qpairs_per_ctrlr: Max number of SQ and CQ per controller (optional, deprecated, use max_io_qpairs_per_ctrlr)
+ max_io_qpairs_per_ctrlr: Max number of IO qpairs per controller (optional)
+ in_capsule_data_size: Maximum in-capsule data size in bytes (optional)
+ max_io_size: Maximum I/O data size in bytes (optional)
+ io_unit_size: I/O unit size in bytes (optional)
+ max_aq_depth: Max size admin quque per controller (optional)
+ num_shared_buffers: The number of pooled data buffers available to the transport (optional)
+ buf_cache_size: The number of shared buffers to reserve for each poll group (optional)
+ max_srq_depth: Max number of outstanding I/O per shared receive queue - RDMA specific (optional)
+ no_srq: Boolean flag to disable SRQ even for devices that support it - RDMA specific (optional)
+ c2h_success: Boolean flag to disable the C2H success optimization - TCP specific (optional)
+ dif_insert_or_strip: Boolean flag to enable DIF insert/strip for I/O - TCP specific (optional)
+ acceptor_backlog: Pending connections allowed at one time - RDMA specific (optional)
+ abort_timeout_sec: Abort execution timeout value, in seconds (optional)
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ params['trtype'] = trtype
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+ if max_queue_depth:
+ params['max_queue_depth'] = max_queue_depth
+ if max_qpairs_per_ctrlr:
+ print("WARNING: max_qpairs_per_ctrlr is deprecated, please use max_io_qpairs_per_ctrlr.")
+ params['max_qpairs_per_ctrlr'] = max_qpairs_per_ctrlr
+ if max_io_qpairs_per_ctrlr:
+ params['max_io_qpairs_per_ctrlr'] = max_io_qpairs_per_ctrlr
+ if in_capsule_data_size:
+ params['in_capsule_data_size'] = in_capsule_data_size
+ if max_io_size:
+ params['max_io_size'] = max_io_size
+ if io_unit_size:
+ params['io_unit_size'] = io_unit_size
+ if max_aq_depth:
+ params['max_aq_depth'] = max_aq_depth
+ if num_shared_buffers:
+ params['num_shared_buffers'] = num_shared_buffers
+ if buf_cache_size:
+ params['buf_cache_size'] = buf_cache_size
+ if max_srq_depth:
+ params['max_srq_depth'] = max_srq_depth
+ if no_srq:
+ params['no_srq'] = no_srq
+ if c2h_success is not None:
+ params['c2h_success'] = c2h_success
+ if dif_insert_or_strip:
+ params['dif_insert_or_strip'] = dif_insert_or_strip
+ if sock_priority:
+ params['sock_priority'] = sock_priority
+ if acceptor_backlog is not None:
+ params['acceptor_backlog'] = acceptor_backlog
+ if abort_timeout_sec:
+ params['abort_timeout_sec'] = abort_timeout_sec
+ return client.call('nvmf_create_transport', params)
+
+
+@deprecated_alias('get_nvmf_transports')
+def nvmf_get_transports(client, tgt_name=None):
+ """Get list of NVMe-oF transports.
+ Args:
+ tgt_name: name of the parent NVMe-oF target (optional).
+
+ Returns:
+ List of NVMe-oF transport objects.
+ """
+
+ params = {}
+
+ if tgt_name:
+ params = {
+ 'tgt_name': tgt_name,
+ }
+
+ return client.call('nvmf_get_transports', params)
+
+
+@deprecated_alias('get_nvmf_subsystems')
+def nvmf_get_subsystems(client, tgt_name=None):
+ """Get list of NVMe-oF subsystems.
+ Args:
+ tgt_name: name of the parent NVMe-oF target (optional).
+
+ Returns:
+ List of NVMe-oF subsystem objects.
+ """
+
+ params = {}
+
+ if tgt_name:
+ params = {
+ 'tgt_name': tgt_name,
+ }
+
+ return client.call('nvmf_get_subsystems', params)
+
+
+@deprecated_alias('nvmf_subsystem_create')
+def nvmf_create_subsystem(client,
+ nqn,
+ serial_number,
+ tgt_name=None,
+ model_number='SPDK bdev Controller',
+ allow_any_host=False,
+ max_namespaces=0):
+ """Construct an NVMe over Fabrics target subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ tgt_name: name of the parent NVMe-oF target (optional).
+ serial_number: Serial number of virtual controller.
+ model_number: Model number of virtual controller.
+ allow_any_host: Allow any host (True) or enforce allowed host whitelist (False). Default: False.
+ max_namespaces: Maximum number of namespaces that can be attached to the subsystem (optional). Default: 0 (Unlimited).
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'nqn': nqn,
+ }
+
+ if serial_number:
+ params['serial_number'] = serial_number
+
+ if model_number:
+ params['model_number'] = model_number
+
+ if allow_any_host:
+ params['allow_any_host'] = True
+
+ if max_namespaces:
+ params['max_namespaces'] = max_namespaces
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_create_subsystem', params)
+
+
+def nvmf_subsystem_add_listener(client, nqn, trtype, traddr, trsvcid, adrfam, tgt_name=None):
+ """Add a new listen address to an NVMe-oF subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ trtype: Transport type ("RDMA").
+ traddr: Transport address.
+ trsvcid: Transport service ID.
+ tgt_name: name of the parent NVMe-oF target (optional).
+ adrfam: Address family ("IPv4", "IPv6", "IB", or "FC").
+
+ Returns:
+ True or False
+ """
+ listen_address = {'trtype': trtype,
+ 'traddr': traddr,
+ 'trsvcid': trsvcid}
+
+ if adrfam:
+ listen_address['adrfam'] = adrfam
+
+ params = {'nqn': nqn,
+ 'listen_address': listen_address}
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_subsystem_add_listener', params)
+
+
+def nvmf_subsystem_remove_listener(
+ client,
+ nqn,
+ trtype,
+ traddr,
+ trsvcid,
+ adrfam,
+ tgt_name=None):
+ """Remove existing listen address from an NVMe-oF subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ trtype: Transport type ("RDMA").
+ traddr: Transport address.
+ trsvcid: Transport service ID.
+ tgt_name: name of the parent NVMe-oF target (optional).
+ adrfam: Address family ("IPv4", "IPv6", "IB", or "FC").
+
+ Returns:
+ True or False
+ """
+ listen_address = {'trtype': trtype,
+ 'traddr': traddr,
+ 'trsvcid': trsvcid}
+
+ if adrfam:
+ listen_address['adrfam'] = adrfam
+
+ params = {'nqn': nqn,
+ 'listen_address': listen_address}
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_subsystem_remove_listener', params)
+
+
+def nvmf_subsystem_add_ns(client, nqn, bdev_name, tgt_name=None, ptpl_file=None, nsid=None, nguid=None, eui64=None, uuid=None):
+ """Add a namespace to a subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ bdev_name: Name of bdev to expose as a namespace.
+ tgt_name: name of the parent NVMe-oF target (optional).
+ nsid: Namespace ID (optional).
+ nguid: 16-byte namespace globally unique identifier in hexadecimal (optional).
+ eui64: 8-byte namespace EUI-64 in hexadecimal (e.g. "ABCDEF0123456789") (optional).
+ uuid: Namespace UUID (optional).
+
+ Returns:
+ The namespace ID
+ """
+ ns = {'bdev_name': bdev_name}
+
+ if ptpl_file:
+ ns['ptpl_file'] = ptpl_file
+
+ if nsid:
+ ns['nsid'] = nsid
+
+ if nguid:
+ ns['nguid'] = nguid
+
+ if eui64:
+ ns['eui64'] = eui64
+
+ if uuid:
+ ns['uuid'] = uuid
+
+ params = {'nqn': nqn,
+ 'namespace': ns}
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_subsystem_add_ns', params)
+
+
+def nvmf_subsystem_remove_ns(client, nqn, nsid, tgt_name=None):
+ """Remove a existing namespace from a subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ nsid: Namespace ID.
+ tgt_name: name of the parent NVMe-oF target (optional).
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn,
+ 'nsid': nsid}
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_subsystem_remove_ns', params)
+
+
+def nvmf_subsystem_add_host(client, nqn, host, tgt_name=None):
+ """Add a host NQN to the whitelist of allowed hosts.
+
+ Args:
+ nqn: Subsystem NQN.
+ host: Host NQN to add to the list of allowed host NQNs
+ tgt_name: name of the parent NVMe-oF target (optional).
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn,
+ 'host': host}
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_subsystem_add_host', params)
+
+
+def nvmf_subsystem_remove_host(client, nqn, host, tgt_name=None):
+ """Remove a host NQN from the whitelist of allowed hosts.
+
+ Args:
+ nqn: Subsystem NQN.
+ host: Host NQN to remove to the list of allowed host NQNs
+ tgt_name: name of the parent NVMe-oF target (optional).
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn,
+ 'host': host}
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_subsystem_remove_host', params)
+
+
+def nvmf_subsystem_allow_any_host(client, nqn, disable, tgt_name=None):
+ """Configure a subsystem to allow any host to connect or to enforce the host NQN whitelist.
+
+ Args:
+ nqn: Subsystem NQN.
+ disable: Allow any host (true) or enforce allowed host whitelist (false).
+ tgt_name: name of the parent NVMe-oF target (optional).
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn, 'allow_any_host': False if disable else True}
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_subsystem_allow_any_host', params)
+
+
+@deprecated_alias('delete_nvmf_subsystem')
+def nvmf_delete_subsystem(client, nqn, tgt_name=None):
+ """Delete an existing NVMe-oF subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ tgt_name: name of the parent NVMe-oF target (optional).
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn}
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_delete_subsystem', params)
+
+
+def nvmf_get_stats(client, tgt_name=None):
+ """Query NVMf statistics.
+
+ Args:
+ tgt_name: name of the parent NVMe-oF target (optional).
+
+ Returns:
+ Current NVMf statistics.
+ """
+
+ params = {}
+
+ if tgt_name:
+ params = {
+ 'tgt_name': tgt_name,
+ }
+
+ return client.call('nvmf_get_stats', params)
diff --git a/src/spdk/scripts/rpc/pmem.py b/src/spdk/scripts/rpc/pmem.py
new file mode 100644
index 000000000..403674cf1
--- /dev/null
+++ b/src/spdk/scripts/rpc/pmem.py
@@ -0,0 +1,35 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('create_pmem_pool')
+def bdev_pmem_create_pool(client, pmem_file, num_blocks, block_size):
+ """Create pmem pool at specified path.
+ Args:
+ pmem_file: path at which to create pmem pool
+ num_blocks: number of blocks for created pmem pool file
+ block_size: block size for pmem pool file
+ """
+ params = {'pmem_file': pmem_file,
+ 'num_blocks': num_blocks,
+ 'block_size': block_size}
+ return client.call('bdev_pmem_create_pool', params)
+
+
+@deprecated_alias('pmem_pool_info')
+def bdev_pmem_get_pool_info(client, pmem_file):
+ """Get details about pmem pool.
+ Args:
+ pmem_file: path to pmem pool
+ """
+ params = {'pmem_file': pmem_file}
+ return client.call('bdev_pmem_get_pool_info', params)
+
+
+@deprecated_alias('delete_pmem_pool')
+def bdev_pmem_delete_pool(client, pmem_file):
+ """Delete pmem pool.
+ Args:
+ pmem_file: path to pmem pool
+ """
+ params = {'pmem_file': pmem_file}
+ return client.call('bdev_pmem_delete_pool', params)
diff --git a/src/spdk/scripts/rpc/sock.py b/src/spdk/scripts/rpc/sock.py
new file mode 100644
index 000000000..34d7f100d
--- /dev/null
+++ b/src/spdk/scripts/rpc/sock.py
@@ -0,0 +1,41 @@
+def sock_impl_get_options(client, impl_name=None):
+ """Get parameters for the socket layer implementation.
+
+ Args:
+ impl_name: name of socket implementation, e.g. posix
+ """
+ params = {}
+
+ params['impl_name'] = impl_name
+
+ return client.call('sock_impl_get_options', params)
+
+
+def sock_impl_set_options(client,
+ impl_name=None,
+ recv_buf_size=None,
+ send_buf_size=None,
+ enable_recv_pipe=None,
+ enable_zerocopy_send=None):
+ """Set parameters for the socket layer implementation.
+
+ Args:
+ impl_name: name of socket implementation, e.g. posix
+ recv_buf_size: size of socket receive buffer in bytes (optional)
+ send_buf_size: size of socket send buffer in bytes (optional)
+ enable_recv_pipe: enable or disable receive pipe (optional)
+ enable_zerocopy_send: enable or disable zerocopy on send (optional)
+ """
+ params = {}
+
+ params['impl_name'] = impl_name
+ if recv_buf_size is not None:
+ params['recv_buf_size'] = recv_buf_size
+ if send_buf_size is not None:
+ params['send_buf_size'] = send_buf_size
+ if enable_recv_pipe is not None:
+ params['enable_recv_pipe'] = enable_recv_pipe
+ if enable_zerocopy_send is not None:
+ params['enable_zerocopy_send'] = enable_zerocopy_send
+
+ return client.call('sock_impl_set_options', params)
diff --git a/src/spdk/scripts/rpc/subsystem.py b/src/spdk/scripts/rpc/subsystem.py
new file mode 100644
index 000000000..a52adbf6b
--- /dev/null
+++ b/src/spdk/scripts/rpc/subsystem.py
@@ -0,0 +1,12 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('get_subsystems')
+def framework_get_subsystems(client):
+ return client.call('framework_get_subsystems')
+
+
+@deprecated_alias('get_subsystem_config')
+def framework_get_config(client, name):
+ params = {'name': name}
+ return client.call('framework_get_config', params)
diff --git a/src/spdk/scripts/rpc/trace.py b/src/spdk/scripts/rpc/trace.py
new file mode 100644
index 000000000..60667b5e8
--- /dev/null
+++ b/src/spdk/scripts/rpc/trace.py
@@ -0,0 +1,33 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('enable_tpoint_group')
+def trace_enable_tpoint_group(client, name):
+ """Enable trace on a specific tpoint group.
+
+ Args:
+ name: trace group name we want to enable in tpoint_group_mask. (for example "bdev").
+ """
+ params = {'name': name}
+ return client.call('trace_enable_tpoint_group', params)
+
+
+@deprecated_alias('disable_tpoint_group')
+def trace_disable_tpoint_group(client, name):
+ """Disable trace on a specific tpoint group.
+
+ Args:
+ name: trace group name we want to disable in tpoint_group_mask. (for example "bdev").
+ """
+ params = {'name': name}
+ return client.call('trace_disable_tpoint_group', params)
+
+
+@deprecated_alias('get_tpoint_group_mask')
+def trace_get_tpoint_group_mask(client):
+ """Get trace point group mask
+
+ Returns:
+ List of trace point group mask
+ """
+ return client.call('trace_get_tpoint_group_mask')
diff --git a/src/spdk/scripts/rpc/vhost.py b/src/spdk/scripts/rpc/vhost.py
new file mode 100644
index 000000000..b2e0a846c
--- /dev/null
+++ b/src/spdk/scripts/rpc/vhost.py
@@ -0,0 +1,190 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('set_vhost_controller_coalescing')
+def vhost_controller_set_coalescing(client, ctrlr, delay_base_us, iops_threshold):
+ """Set coalescing for vhost controller.
+ Args:
+ ctrlr: controller name
+ delay_base_us: base delay time
+ iops_threshold: IOPS threshold when coalescing is enabled
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'delay_base_us': delay_base_us,
+ 'iops_threshold': iops_threshold,
+ }
+ return client.call('vhost_controller_set_coalescing', params)
+
+
+@deprecated_alias('construct_vhost_scsi_controller')
+def vhost_create_scsi_controller(client, ctrlr, cpumask=None):
+ """Create a vhost scsi controller.
+ Args:
+ ctrlr: controller name
+ cpumask: cpu mask for this controller
+ """
+ params = {'ctrlr': ctrlr}
+
+ if cpumask:
+ params['cpumask'] = cpumask
+
+ return client.call('vhost_create_scsi_controller', params)
+
+
+@deprecated_alias('add_vhost_scsi_lun')
+def vhost_scsi_controller_add_target(client, ctrlr, scsi_target_num, bdev_name):
+ """Add LUN to vhost scsi controller target.
+ Args:
+ ctrlr: controller name
+ scsi_target_num: target number to use
+ bdev_name: name of bdev to add to target
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'scsi_target_num': scsi_target_num,
+ 'bdev_name': bdev_name,
+ }
+ return client.call('vhost_scsi_controller_add_target', params)
+
+
+@deprecated_alias('remove_vhost_scsi_target')
+def vhost_scsi_controller_remove_target(client, ctrlr, scsi_target_num):
+ """Remove target from vhost scsi controller.
+ Args:
+ ctrlr: controller name to remove target from
+ scsi_target_num: number of target to remove from controller
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'scsi_target_num': scsi_target_num
+ }
+ return client.call('vhost_scsi_controller_remove_target', params)
+
+
+@deprecated_alias('construct_vhost_nvme_controller')
+def vhost_create_nvme_controller(client, ctrlr, io_queues, cpumask=None):
+ """Construct vhost NVMe controller.
+ Args:
+ ctrlr: controller name
+ io_queues: number of IO queues for the controller
+ cpumask: cpu mask for this controller
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'io_queues': io_queues
+ }
+
+ if cpumask:
+ params['cpumask'] = cpumask
+
+ return client.call('vhost_create_nvme_controller', params)
+
+
+@deprecated_alias('add_vhost_nvme_ns')
+def vhost_nvme_controller_add_ns(client, ctrlr, bdev_name):
+ """Add namespace to vhost nvme controller.
+ Args:
+ ctrlr: controller name where to add a namespace
+ bdev_name: block device name for a new namespace
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'bdev_name': bdev_name,
+ }
+
+ return client.call('vhost_nvme_controller_add_ns', params)
+
+
+@deprecated_alias('construct_vhost_blk_controller')
+def vhost_create_blk_controller(client, ctrlr, dev_name, cpumask=None, readonly=None, packed_ring=None):
+ """Create vhost BLK controller.
+ Args:
+ ctrlr: controller name
+ dev_name: device name to add to controller
+ cpumask: cpu mask for this controller
+ readonly: set controller as read-only
+ packed_ring: support controller packed_ring
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'dev_name': dev_name,
+ }
+ if cpumask:
+ params['cpumask'] = cpumask
+ if readonly:
+ params['readonly'] = readonly
+ if packed_ring:
+ params['packed_ring'] = packed_ring
+ return client.call('vhost_create_blk_controller', params)
+
+
+@deprecated_alias('get_vhost_controllers')
+def vhost_get_controllers(client, name=None):
+ """Get information about configured vhost controllers.
+
+ Args:
+ name: controller name to query (optional; if omitted, query all controllers)
+
+ Returns:
+ List of vhost controllers.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('vhost_get_controllers', params)
+
+
+@deprecated_alias('remove_vhost_controller')
+def vhost_delete_controller(client, ctrlr):
+ """Delete vhost controller from configuration.
+ Args:
+ ctrlr: controller name to remove
+ """
+ params = {'ctrlr': ctrlr}
+ return client.call('vhost_delete_controller', params)
+
+
+@deprecated_alias('construct_virtio_dev')
+def bdev_virtio_attach_controller(client, name, trtype, traddr, dev_type, vq_count=None, vq_size=None):
+ """Attaches virtio controller using
+ provided transport type and device type.
+ This will also create bdevs for any block
+ devices connected to that controller.
+ Args:
+ name: name base for new created bdevs
+ trtype: virtio target transport type: pci or user
+ traddr: transport type specific target address: e.g. UNIX
+ domain socket path or BDF
+ dev_type: device type: blk or scsi
+ vq_count: number of virtual queues to be used
+ vq_size: size of each queue
+ """
+ params = {
+ 'name': name,
+ 'trtype': trtype,
+ 'traddr': traddr,
+ 'dev_type': dev_type
+ }
+ if vq_count:
+ params['vq_count'] = vq_count
+ if vq_size:
+ params['vq_size'] = vq_size
+ return client.call('bdev_virtio_attach_controller', params)
+
+
+@deprecated_alias('remove_virtio_bdev ')
+def bdev_virtio_detach_controller(client, name):
+ """Remove a Virtio device
+ This will delete all bdevs exposed by this device.
+ Args:
+ name: virtio device name
+ """
+ params = {'name': name}
+ return client.call('bdev_virtio_detach_controller', params)
+
+
+@deprecated_alias('get_virtio_scsi_devs')
+def bdev_virtio_scsi_get_devices(client):
+ """Get list of virtio scsi devices."""
+ return client.call('bdev_virtio_scsi_get_devices')
diff --git a/src/spdk/scripts/rpc/vmd.py b/src/spdk/scripts/rpc/vmd.py
new file mode 100644
index 000000000..067271ef1
--- /dev/null
+++ b/src/spdk/scripts/rpc/vmd.py
@@ -0,0 +1,3 @@
+def enable_vmd(client):
+ """Enable VMD enumeration."""
+ return client.call('enable_vmd')
diff --git a/src/spdk/scripts/rpc_http_proxy.py b/src/spdk/scripts/rpc_http_proxy.py
new file mode 100755
index 000000000..ea9d17b16
--- /dev/null
+++ b/src/spdk/scripts/rpc_http_proxy.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python3
+
+import argparse
+import base64
+import errno
+import json
+import socket
+import ssl
+import sys
+try:
+ from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
+except ImportError:
+ from http.server import HTTPServer
+ from http.server import BaseHTTPRequestHandler
+
+rpc_sock = None
+
+parser = argparse.ArgumentParser(description='http(s) proxy for SPDK RPC calls')
+parser.add_argument('host', help='Host name / IP representing proxy server')
+parser.add_argument('port', help='Port number', type=int)
+parser.add_argument('user', help='User name used for authentication')
+parser.add_argument('password', help='Password used for authentication')
+parser.add_argument('-s', dest='sock', help='RPC domain socket path', default='/var/tmp/spdk.sock')
+parser.add_argument('-c', dest='cert', help='SSL certificate')
+
+
+def print_usage_and_exit(status):
+ print('Usage: rpc_http_proxy.py <server IP> <server port> <user name>' +
+ ' <password> <SPDK RPC socket (optional, default: /var/tmp/spdk.sock)>')
+ sys.exit(status)
+
+
+def rpc_call(req):
+ global rpc_sock
+
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.connect(rpc_sock)
+ sock.sendall(req)
+
+ if 'id' not in json.loads(req.decode('ascii')):
+ sock.close()
+ return None
+
+ buf = ''
+ closed = False
+ response = None
+
+ while not closed:
+ newdata = sock.recv(1024)
+ if (newdata == b''):
+ closed = True
+ buf += newdata.decode('ascii')
+ try:
+ response = json.loads(buf)
+ except ValueError:
+ continue # incomplete response; keep buffering
+ break
+
+ sock.close()
+
+ if not response and len(buf) > 0:
+ raise
+
+ return buf
+
+
+class ServerHandler(BaseHTTPRequestHandler):
+
+ key = ""
+
+ def do_HEAD(self):
+ self.send_response(200)
+ self.send_header('Content-type', 'text/html')
+ self.end_headers()
+
+ def do_AUTHHEAD(self):
+ self.send_response(401)
+ self.send_header('WWW-Authenticate', 'text/html')
+ self.send_header('Content-type', 'text/html')
+ self.end_headers()
+
+ def do_INTERNALERROR(self):
+ self.send_response(500)
+ self.send_header('Content-type', 'text/html')
+ self.end_headers()
+
+ def do_POST(self):
+ if self.headers['Authorization'] != 'Basic ' + self.key:
+ self.do_AUTHHEAD()
+ else:
+ data_string = self.rfile.read(int(self.headers['Content-Length']))
+
+ try:
+ response = rpc_call(data_string)
+ if response is not None:
+ self.do_HEAD()
+ self.wfile.write(bytes(response.encode(encoding='ascii')))
+ except ValueError:
+ self.do_INTERNALERROR()
+
+
+def main():
+ global rpc_sock
+
+ args = parser.parse_args()
+ rpc_sock = args.sock
+
+ # encoding user name and password
+ key = base64.b64encode((args.user+':'+args.password).encode(encoding='ascii')).decode('ascii')
+
+ try:
+ ServerHandler.key = key
+ httpd = HTTPServer((args.host, args.port), ServerHandler)
+ if args.cert is not None:
+ httpd.socket = ssl.wrap_socket(httpd.socket, certfile=args.cert, server_side=True)
+ print('Started RPC http proxy server')
+ httpd.serve_forever()
+ except KeyboardInterrupt:
+ print('Shutting down server')
+ httpd.socket.close()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/spdk/scripts/rxe_cfg_small.sh b/src/spdk/scripts/rxe_cfg_small.sh
new file mode 100755
index 000000000..0674efe39
--- /dev/null
+++ b/src/spdk/scripts/rxe_cfg_small.sh
@@ -0,0 +1,265 @@
+#!/usr/bin/env bash
+[[ $(uname -s) == Linux ]] || exit 0
+
+shopt -s extglob nullglob
+
+declare -r rdma_rxe=/sys/module/rdma_rxe
+declare -r rdma_rxe_add=$rdma_rxe/parameters/add
+declare -r rdma_rxe_rm=$rdma_rxe/parameters/remove
+
+declare -r infiniband=/sys/class/infiniband
+declare -r net=/sys/class/net
+
+uevent() (
+ [[ -e $1/uevent ]] || return 0
+
+ source "$1/uevent"
+
+ if [[ -v $2 ]]; then
+ echo "${!2}"
+ elif [[ -n $3 ]]; then
+ echo "$3"
+ fi
+)
+
+modprobeq() {
+ modprobe -q "$@"
+}
+
+get_ipv4() {
+ local ip
+
+ # Get only the first ip
+ read -r _ _ _ ip _ < <(ip -o -4 addr show dev "$1")
+ if [[ -n $ip ]]; then
+ echo "${ip%/*}"
+ else
+ echo " "
+ fi
+}
+
+get_rxe_mtu() {
+ local rxe=$1
+ local mtu
+
+ [[ -c /dev/infiniband/uverbs${rxe/rxe/} ]] || return 0
+
+ [[ $(ibv_devinfo -d "$rxe") =~ active_mtu:(.*\ \(.*\)) ]]
+ echo "${BASH_REMATCH[1]:-(?)}"
+}
+
+start() {
+ local modules module
+
+ modules=(
+ "ib_core"
+ "ib_uverbs"
+ "rdma_ucm"
+ "rdma_rxe"
+ )
+
+ for module in "${modules[@]}"; do
+ [[ -e /sys/module/$module ]] && continue
+ if [[ ! -e $(modinfo -F filename "$module") ]]; then
+ return 0
+ fi
+ done 2> /dev/null
+
+ modprobeq -a "${modules[@]}" || return 1
+ add_rxe all
+}
+
+stop() {
+ local rxe
+
+ for rxe in "$infiniband/rxe"+([0-9]); do
+ remove_rxe "${rxe##*/}"
+ done
+
+ if ! modprobeq -r rdma_rxe \
+ || [[ -e $rdma_rxe ]]; then
+ printf 'unable to unload drivers, reboot required\n'
+ fi
+}
+
+status_header() {
+ local header=("Name" "Link" "Driver" "Speed" "NMTU" "IPv4_addr" "RDEV" "RMTU")
+
+ size_print_fields "${header[@]}"
+}
+
+status() {
+ if [[ ! -e $rdma_rxe ]]; then
+ printf 'rdma_rxe module not loaded\n' >&2
+ fi
+
+ local dev net_devs
+ local link_map
+
+ link_map[0]=no
+ link_map[1]=yes
+
+ status_header
+
+ local name link driver speed mtu ip rxe rxe_dev active_mtu
+ for dev in "$net/"!(bonding_masters); do
+ (($(< "$dev/type") == 1)) || continue
+
+ name="" link="" driver=""
+ speed="" mtu="" ip=""
+ rxe_dev="" active_mtu=""
+
+ name=${dev##*/}
+ for rxe in "$infiniband/rxe"+([0-9]); do
+ if [[ $(< "$rxe/parent") == "$name" ]]; then
+ rxe_dev=${rxe##*/}
+ active_mtu=$(get_rxe_mtu "$rxe_dev")
+ break
+ fi
+ done
+
+ link=${link_map[$(< "$dev/carrier")]}
+
+ if [[ -e $dev/device/driver ]]; then
+ driver=$(readlink -f "$dev/device/driver")
+ driver=${driver##*/}
+ elif [[ -e /sys/devices/virtual/net/${dev##*/} ]]; then
+ # Try to be smart and get the type of the device instead
+ driver=$(uevent "$dev" "DEVTYPE" "virtual")
+ fi
+
+ if [[ $link == yes ]]; then
+ speed=$(< "$dev/speed")
+ if ((speed >= 1000)); then
+ speed=$((speed / 1000))GigE
+ elif ((speed > 0)); then
+ speed=${speed}Mb/s
+ else
+ speed=""
+ fi
+ fi
+
+ mtu=$(< "$dev/mtu")
+ ip=$(get_ipv4 "$name")
+
+ size_print_fields \
+ "$name" \
+ "$link" \
+ "$driver" \
+ "$speed" \
+ "$mtu" \
+ "$ip" \
+ "$rxe_dev" \
+ "$active_mtu"
+ done 2> /dev/null
+ print_status
+}
+
+size_print_fields() {
+ local fields=("$@") field
+ local -g lengths lines lineno
+
+ for field in "${!fields[@]}"; do
+ if [[ -z ${fields[field]} ]]; then
+ fields[field]="###"
+ fi
+ if [[ -z ${lengths[field]} ]]; then
+ lengths[field]=${#fields[field]}
+ else
+ lengths[field]=$((lengths[field] > ${#fields[field]} ? lengths[field] : ${#fields[field]}))
+ fi
+ done
+
+ eval "local -g _line_$lineno=(\"\${fields[@]}\")"
+ lines+=("_line_${lineno}[@]")
+ ((++lineno))
+}
+
+print_status() {
+ local field field_ref fieldidx
+ local pad
+
+ for field_ref in "${lines[@]}"; do
+ printf ' '
+ fieldidx=0
+ for field in "${!field_ref}"; do
+ if [[ -n $field ]]; then
+ pad=$((lengths[fieldidx] - ${#field} + 2))
+ else
+ pad=$((lengths[fieldidx] + 2))
+ fi
+ if [[ -n $field && $field != "###" ]]; then
+ printf '%s' "$field"
+ else
+ printf ' '
+ fi
+ printf '%*s' "$pad" ""
+ ((++fieldidx))
+ done
+ printf '\n'
+ done
+}
+
+add_rxe() {
+ local dev net_devs
+
+ [[ -e $rdma_rxe/parameters ]] || return 1
+
+ if [[ -z $1 || $1 == all ]]; then
+ net_devs=("$net/"!(bonding_masters))
+ elif [[ -e $net/$1 ]]; then
+ net_devs=("$net/$1")
+ else
+ printf '%s interface does not exist\n' "$1"
+ return 1
+ fi
+
+ for dev in "${net_devs[@]}"; do
+ (($(< "$dev/type") != 1)) && continue
+ echo "${dev##*/}" > "$rdma_rxe_add"
+ done 2> /dev/null
+}
+
+remove_rxe() {
+ [[ -e $infiniband/${1##*/} ]] && echo "${1##*/}" > "$rdma_rxe_rm"
+}
+
+link_up_rxes() {
+ local rxe parent
+
+ for rxe in "$infiniband/rxe"+([0-9]); do
+ parent=$(< /"$rxe/parent")
+ link_up "$parent"
+ done
+}
+
+link_up() {
+ [[ -e $net/$1 ]] || return 0
+
+ echo $(($(< "$net/$1/flags") | 0x1)) > "$net/$1/flags"
+}
+
+case "${1:-status}" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ add)
+ add_rxe "${2:-all}"
+ ;;
+ remove)
+ remove_rxe "$2"
+ ;;
+ status)
+ IFS= read -r match < <(
+ IFS="|"
+ printf '%s\n' "${*:2}"
+ )
+ status | grep -E "${match:-.}"
+ ;;
+ *)
+ printf 'Invalid argument (%s)\n' "$1"
+ ;;
+esac
diff --git a/src/spdk/scripts/setup.sh b/src/spdk/scripts/setup.sh
new file mode 100755
index 000000000..ca271e173
--- /dev/null
+++ b/src/spdk/scripts/setup.sh
@@ -0,0 +1,885 @@
+#!/usr/bin/env bash
+
+set -e
+
+os=$(uname -s)
+
+if [[ $os != Linux && $os != FreeBSD ]]; then
+ echo "Not supported platform ($os), aborting"
+ exit 1
+fi
+
+rootdir=$(readlink -f $(dirname $0))/..
+source "$rootdir/scripts/common.sh"
+
+function usage() {
+ if [[ $os == Linux ]]; then
+ options="[config|reset|status|cleanup|help]"
+ else
+ options="[config|reset|help]"
+ fi
+
+ [[ -n $2 ]] && (
+ echo "$2"
+ echo ""
+ )
+ echo "Helper script for allocating hugepages and binding NVMe, I/OAT, VMD and Virtio devices"
+ echo "to a generic VFIO kernel driver. If VFIO is not available on the system, this script"
+ echo "will fall back to UIO. NVMe and Virtio devices with active mountpoints will be ignored."
+ echo "All hugepage operations use default hugepage size on the system (hugepagesz)."
+ echo "Usage: $(basename $1) $options"
+ echo
+ echo "$options - as following:"
+ echo "config Default mode. Allocate hugepages and bind PCI devices."
+ if [[ $os == Linux ]]; then
+ echo "cleanup Remove any orphaned files that can be left in the system after SPDK application exit"
+ fi
+ echo "reset Rebind PCI devices back to their original drivers."
+ echo " Also cleanup any leftover spdk files/resources."
+ echo " Hugepage memory size will remain unchanged."
+ if [[ $os == Linux ]]; then
+ echo "status Print status of all SPDK-compatible devices on the system."
+ fi
+ echo "help Print this help message."
+ echo
+ echo "The following environment variables can be specified."
+ echo "HUGEMEM Size of hugepage memory to allocate (in MB). 2048 by default."
+ echo " For NUMA systems, the hugepages will be evenly distributed"
+ echo " between CPU nodes"
+ echo "NRHUGE Number of hugepages to allocate. This variable overwrites HUGEMEM."
+ echo "HUGENODE Specific NUMA node to allocate hugepages on. To allocate"
+ echo " hugepages on multiple nodes run this script multiple times -"
+ echo " once for each node."
+ echo "PCI_WHITELIST"
+ echo "PCI_BLACKLIST Whitespace separated list of PCI devices (NVMe, I/OAT, VMD, Virtio)."
+ echo " Each device must be specified as a full PCI address."
+ echo " E.g. PCI_WHITELIST=\"0000:01:00.0 0000:02:00.0\""
+ echo " To blacklist all PCI devices use a non-valid address."
+ echo " E.g. PCI_WHITELIST=\"none\""
+ echo " If PCI_WHITELIST and PCI_BLACKLIST are empty or unset, all PCI devices"
+ echo " will be bound."
+ echo " Each device in PCI_BLACKLIST will be ignored (driver won't be changed)."
+ echo " PCI_BLACKLIST has precedence over PCI_WHITELIST."
+ echo "TARGET_USER User that will own hugepage mountpoint directory and vfio groups."
+ echo " By default the current user will be used."
+ echo "DRIVER_OVERRIDE Disable automatic vfio-pci/uio_pci_generic selection and forcefully"
+ echo " bind devices to the given driver."
+ echo " E.g. DRIVER_OVERRIDE=uio_pci_generic or DRIVER_OVERRIDE=/home/public/dpdk/build/kmod/igb_uio.ko"
+ exit 0
+}
+
+# In monolithic kernels the lsmod won't work. So
+# back that with a /sys/modules. We also check
+# /sys/bus/pci/drivers/ as neither lsmod nor /sys/modules might
+# contain needed info (like in Fedora-like OS).
+function check_for_driver() {
+ if lsmod | grep -q ${1//-/_}; then
+ return 1
+ fi
+
+ if [[ -d /sys/module/${1} || -d \
+ /sys/module/${1//-/_} || -d \
+ /sys/bus/pci/drivers/${1} || -d \
+ /sys/bus/pci/drivers/${1//-/_} ]]; then
+ return 2
+ fi
+ return 0
+}
+
+function pci_dev_echo() {
+ local bdf="$1"
+ local vendor
+ local device
+ vendor="$(cat /sys/bus/pci/devices/$bdf/vendor)"
+ device="$(cat /sys/bus/pci/devices/$bdf/device)"
+ shift
+ echo "$bdf (${vendor#0x} ${device#0x}): $*"
+}
+
+function linux_bind_driver() {
+ bdf="$1"
+ driver_name="$2"
+ old_driver_name="no driver"
+ ven_dev_id=$(lspci -n -s $bdf | cut -d' ' -f3 | sed 's/:/ /')
+
+ if [ -e "/sys/bus/pci/devices/$bdf/driver" ]; then
+ old_driver_name=$(basename $(readlink /sys/bus/pci/devices/$bdf/driver))
+
+ if [ "$driver_name" = "$old_driver_name" ]; then
+ pci_dev_echo "$bdf" "Already using the $old_driver_name driver"
+ return 0
+ fi
+
+ echo "$ven_dev_id" > "/sys/bus/pci/devices/$bdf/driver/remove_id" 2> /dev/null || true
+ echo "$bdf" > "/sys/bus/pci/devices/$bdf/driver/unbind"
+ fi
+
+ pci_dev_echo "$bdf" "$old_driver_name -> $driver_name"
+
+ echo "$ven_dev_id" > "/sys/bus/pci/drivers/$driver_name/new_id" 2> /dev/null || true
+ echo "$bdf" > "/sys/bus/pci/drivers/$driver_name/bind" 2> /dev/null || true
+
+ iommu_group=$(basename $(readlink -f /sys/bus/pci/devices/$bdf/iommu_group))
+ if [ -e "/dev/vfio/$iommu_group" ]; then
+ if [ -n "$TARGET_USER" ]; then
+ chown "$TARGET_USER" "/dev/vfio/$iommu_group"
+ fi
+ fi
+}
+
+function linux_unbind_driver() {
+ local bdf="$1"
+ local ven_dev_id
+ ven_dev_id=$(lspci -n -s $bdf | cut -d' ' -f3 | sed 's/:/ /')
+ local old_driver_name="no driver"
+
+ if [ -e "/sys/bus/pci/devices/$bdf/driver" ]; then
+ old_driver_name=$(basename $(readlink /sys/bus/pci/devices/$bdf/driver))
+ echo "$ven_dev_id" > "/sys/bus/pci/devices/$bdf/driver/remove_id" 2> /dev/null || true
+ echo "$bdf" > "/sys/bus/pci/devices/$bdf/driver/unbind"
+ fi
+
+ pci_dev_echo "$bdf" "$old_driver_name -> no driver"
+}
+
+function linux_hugetlbfs_mounts() {
+ mount | grep ' type hugetlbfs ' | awk '{ print $3 }'
+}
+
+function get_nvme_name_from_bdf() {
+ local blknames=()
+
+ set +e
+ nvme_devs=$(lsblk -d --output NAME | grep "^nvme")
+ set -e
+ for dev in $nvme_devs; do
+ link_name=$(readlink /sys/block/$dev/device/device) || true
+ if [ -z "$link_name" ]; then
+ link_name=$(readlink /sys/block/$dev/device)
+ fi
+ link_bdf=$(basename "$link_name")
+ if [ "$link_bdf" = "$1" ]; then
+ blknames+=($dev)
+ fi
+ done
+
+ printf '%s\n' "${blknames[@]}"
+}
+
+function get_virtio_names_from_bdf() {
+ blk_devs=$(lsblk --nodeps --output NAME)
+ virtio_names=()
+
+ for dev in $blk_devs; do
+ if readlink "/sys/block/$dev" | grep -q "$1"; then
+ virtio_names+=("$dev")
+ fi
+ done
+
+ eval "$2=( " "${virtio_names[@]}" " )"
+}
+
+function configure_linux_pci() {
+ local driver_path=""
+ driver_name=""
+ if [[ -n "${DRIVER_OVERRIDE}" ]]; then
+ driver_path="$DRIVER_OVERRIDE"
+ driver_name="${DRIVER_OVERRIDE##*/}"
+ # modprobe and the sysfs don't use the .ko suffix.
+ driver_name=${driver_name%.ko}
+ # path = name -> there is no path
+ if [[ "$driver_path" = "$driver_name" ]]; then
+ driver_path=""
+ fi
+ # igb_uio is a common driver to override with and it depends on uio.
+ if [[ "$driver_name" = "igb_uio" ]]; then
+ modprobe uio
+ fi
+ elif [[ -n "$(ls /sys/kernel/iommu_groups)" || (-e \
+ /sys/module/vfio/parameters/enable_unsafe_noiommu_mode && \
+ "$(cat /sys/module/vfio/parameters/enable_unsafe_noiommu_mode)" == "Y") ]]; then
+ driver_name=vfio-pci
+ elif modinfo uio_pci_generic > /dev/null 2>&1; then
+ driver_name=uio_pci_generic
+ elif [[ -r "$rootdir/dpdk/build/kmod/igb_uio.ko" ]]; then
+ driver_path="$rootdir/dpdk/build/kmod/igb_uio.ko"
+ driver_name="igb_uio"
+ modprobe uio
+ echo "WARNING: uio_pci_generic not detected - using $driver_name"
+ else
+ echo "No valid drivers found [vfio-pci, uio_pci_generic, igb_uio]. Please either enable the vfio-pci or uio_pci_generic"
+ echo "kernel modules, or have SPDK build the igb_uio driver by running ./configure --with-igb-uio-driver and recompiling."
+ return 1
+ fi
+
+ # modprobe assumes the directory of the module. If the user passes in a path, we should use insmod
+ if [[ -n "$driver_path" ]]; then
+ insmod $driver_path || true
+ else
+ modprobe $driver_name
+ fi
+
+ # NVMe
+ for bdf in ${pci_bus_cache["0x010802"]}; do
+ blknames=()
+ if ! pci_can_use $bdf; then
+ pci_dev_echo "$bdf" "Skipping un-whitelisted NVMe controller at $bdf"
+ continue
+ fi
+
+ mount=false
+ for blkname in $(get_nvme_name_from_bdf $bdf); do
+ mountpoints=$(lsblk /dev/$blkname --output MOUNTPOINT -n | wc -w)
+ if [ "$mountpoints" != "0" ]; then
+ mount=true
+ blknames+=($blkname)
+ fi
+ done
+
+ if ! $mount; then
+ linux_bind_driver "$bdf" "$driver_name"
+ else
+ for name in "${blknames[@]}"; do
+ pci_dev_echo "$bdf" "Active mountpoints on /dev/$name, so not binding PCI dev"
+ done
+ fi
+ done
+
+ # IOAT
+ TMP=$(mktemp)
+ #collect all the device_id info of ioat devices.
+ grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}' > $TMP
+
+ while IFS= read -r dev_id; do
+ for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
+ if ! pci_can_use $bdf; then
+ pci_dev_echo "$bdf" "Skipping un-whitelisted I/OAT device"
+ continue
+ fi
+
+ linux_bind_driver "$bdf" "$driver_name"
+ done
+ done < $TMP
+ rm $TMP
+
+ # IDXD
+ TMP=$(mktemp)
+ #collect all the device_id info of idxd devices.
+ grep "PCI_DEVICE_ID_INTEL_IDXD" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}' > $TMP
+
+ while IFS= read -r dev_id; do
+ for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
+ if ! pci_can_use $bdf; then
+ pci_dev_echo "$bdf" "Skipping un-whitelisted IDXD device"
+ continue
+ fi
+
+ linux_bind_driver "$bdf" "$driver_name"
+ done
+ done < $TMP
+ rm $TMP
+
+ # virtio
+ TMP=$(mktemp)
+ #collect all the device_id info of virtio devices.
+ grep "PCI_DEVICE_ID_VIRTIO" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}' > $TMP
+
+ while IFS= read -r dev_id; do
+ for bdf in ${pci_bus_cache["0x1af4:0x$dev_id"]}; do
+ if ! pci_can_use $bdf; then
+ pci_dev_echo "$bdf" "Skipping un-whitelisted Virtio device at $bdf"
+ continue
+ fi
+ blknames=()
+ get_virtio_names_from_bdf "$bdf" blknames
+ for blkname in "${blknames[@]}"; do
+ if [ "$(lsblk /dev/$blkname --output MOUNTPOINT -n | wc -w)" != "0" ]; then
+ pci_dev_echo "$bdf" "Active mountpoints on /dev/$blkname, so not binding"
+ continue 2
+ fi
+ done
+
+ linux_bind_driver "$bdf" "$driver_name"
+ done
+ done < $TMP
+ rm $TMP
+
+ # VMD
+ TMP=$(mktemp)
+ #collect all the device_id info of vmd devices.
+ grep "PCI_DEVICE_ID_INTEL_VMD" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}' > $TMP
+
+ while IFS= read -r dev_id; do
+ for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
+ if [[ -z "$PCI_WHITELIST" ]] || ! pci_can_use $bdf; then
+ echo "Skipping un-whitelisted VMD device at $bdf"
+ continue
+ fi
+
+ linux_bind_driver "$bdf" "$driver_name"
+ echo " VMD generic kdrv: " "$bdf" "$driver_name"
+ done
+ done < $TMP
+ rm $TMP
+
+ echo "1" > "/sys/bus/pci/rescan"
+}
+
+function cleanup_linux() {
+ shopt -s extglob nullglob
+ dirs_to_clean=""
+ dirs_to_clean="$(echo {/var/run,/tmp}/dpdk/spdk{,_pid}+([0-9])) "
+ if [[ -d $XDG_RUNTIME_DIR && $XDG_RUNTIME_DIR != *" "* ]]; then
+ dirs_to_clean+="$(readlink -e assert_not_empty $XDG_RUNTIME_DIR/dpdk/spdk{,_pid}+([0-9]) || true) "
+ fi
+
+ files_to_clean=""
+ for dir in $dirs_to_clean; do
+ files_to_clean+="$(echo $dir/*) "
+ done
+ shopt -u extglob nullglob
+
+ files_to_clean+="$(ls -1 /dev/shm/* \
+ | grep -E '(spdk_tgt|iscsi|vhost|nvmf|rocksdb|bdevio|bdevperf|vhost_fuzz|nvme_fuzz)_trace|spdk_iscsi_conns' || true) "
+ files_to_clean="$(readlink -e assert_not_empty $files_to_clean || true)"
+ if [[ -z "$files_to_clean" ]]; then
+ echo "Clean"
+ return 0
+ fi
+
+ shopt -s extglob
+ for fd_dir in $(echo /proc/+([0-9])); do
+ opened_files+="$(readlink -e assert_not_empty $fd_dir/fd/* || true)"
+ done
+ shopt -u extglob
+
+ if [[ -z "$opened_files" ]]; then
+ echo "Can't get list of opened files!"
+ exit 1
+ fi
+
+ echo 'Cleaning'
+ for f in $files_to_clean; do
+ if ! echo "$opened_files" | grep -E -q "^$f\$"; then
+ echo "Removing: $f"
+ rm $f
+ else
+ echo "Still open: $f"
+ fi
+ done
+
+ for dir in $dirs_to_clean; do
+ if ! echo "$opened_files" | grep -E -q "^$dir\$"; then
+ echo "Removing: $dir"
+ rmdir $dir
+ else
+ echo "Still open: $dir"
+ fi
+ done
+ echo "Clean"
+
+ unset dirs_to_clean files_to_clean opened_files
+}
+
+function configure_linux() {
+ configure_linux_pci
+ hugetlbfs_mounts=$(linux_hugetlbfs_mounts)
+
+ if [ -z "$hugetlbfs_mounts" ]; then
+ hugetlbfs_mounts=/mnt/huge
+ echo "Mounting hugetlbfs at $hugetlbfs_mounts"
+ mkdir -p "$hugetlbfs_mounts"
+ mount -t hugetlbfs nodev "$hugetlbfs_mounts"
+ fi
+
+ if [ -z "$HUGENODE" ]; then
+ hugepages_target="/proc/sys/vm/nr_hugepages"
+ else
+ hugepages_target="/sys/devices/system/node/node${HUGENODE}/hugepages/hugepages-${HUGEPGSZ}kB/nr_hugepages"
+ fi
+
+ echo "$NRHUGE" > "$hugepages_target"
+ allocated_hugepages=$(cat $hugepages_target)
+ if [ "$allocated_hugepages" -lt "$NRHUGE" ]; then
+ echo ""
+ echo "## ERROR: requested $NRHUGE hugepages but only $allocated_hugepages could be allocated."
+ echo "## Memory might be heavily fragmented. Please try flushing the system cache, or reboot the machine."
+ exit 1
+ fi
+
+ if [ "$driver_name" = "vfio-pci" ]; then
+ if [ -n "$TARGET_USER" ]; then
+ for mount in $hugetlbfs_mounts; do
+ chown "$TARGET_USER" "$mount"
+ chmod g+w "$mount"
+ done
+
+ MEMLOCK_AMNT=$(su "$TARGET_USER" -c "ulimit -l")
+ if [[ $MEMLOCK_AMNT != "unlimited" ]]; then
+ MEMLOCK_MB=$((MEMLOCK_AMNT / 1024))
+ cat <<- MEMLOCK
+ "$TARGET_USER" user memlock limit: $MEMLOCK_MB MB
+
+ This is the maximum amount of memory you will be
+ able to use with DPDK and VFIO if run as user "$TARGET_USER".
+ To change this, please adjust limits.conf memlock limit for user "$TARGET_USER".
+ MEMLOCK
+ if ((MEMLOCK_AMNT < 65536)); then
+ echo ""
+ echo "## WARNING: memlock limit is less than 64MB"
+ echo -n "## DPDK with VFIO may not be able to initialize "
+ echo "if run as user \"$TARGET_USER\"."
+ fi
+ fi
+ fi
+ fi
+
+ if [ ! -f /dev/cpu/0/msr ]; then
+ # Some distros build msr as a module. Make sure it's loaded to ensure
+ # DPDK can easily figure out the TSC rate rather than relying on 100ms
+ # sleeps.
+ modprobe msr || true
+ fi
+}
+
+function reset_linux_pci() {
+ # NVMe
+ set +e
+ check_for_driver nvme
+ driver_loaded=$?
+ set -e
+ for bdf in ${pci_bus_cache["0x010802"]}; do
+ if ! pci_can_use $bdf; then
+ pci_dev_echo "$bdf" "Skipping un-whitelisted NVMe controller $blkname"
+ continue
+ fi
+ if [ $driver_loaded -ne 0 ]; then
+ linux_bind_driver "$bdf" nvme
+ else
+ linux_unbind_driver "$bdf"
+ fi
+ done
+
+ # IOAT
+ TMP=$(mktemp)
+ #collect all the device_id info of ioat devices.
+ grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}' > $TMP
+
+ set +e
+ check_for_driver ioatdma
+ driver_loaded=$?
+ set -e
+ while IFS= read -r dev_id; do
+ for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
+ if ! pci_can_use $bdf; then
+ pci_dev_echo "$bdf" "Skipping un-whitelisted I/OAT device"
+ continue
+ fi
+ if [ $driver_loaded -ne 0 ]; then
+ linux_bind_driver "$bdf" ioatdma
+ else
+ linux_unbind_driver "$bdf"
+ fi
+ done
+ done < $TMP
+ rm $TMP
+
+ # IDXD
+ TMP=$(mktemp)
+ #collect all the device_id info of idxd devices.
+ grep "PCI_DEVICE_ID_INTEL_IDXD" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}' > $TMP
+ set +e
+ check_for_driver idxd
+ driver_loaded=$?
+ set -e
+ while IFS= read -r dev_id; do
+ for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
+ if ! pci_can_use $bdf; then
+ pci_dev_echo "$bdf" "Skipping un-whitelisted IDXD device"
+ continue
+ fi
+ if [ $driver_loaded -ne 0 ]; then
+ linux_bind_driver "$bdf" idxd
+ else
+ linux_unbind_driver "$bdf"
+ fi
+ done
+ done < $TMP
+ rm $TMP
+
+ # virtio
+ TMP=$(mktemp)
+ #collect all the device_id info of virtio devices.
+ grep "PCI_DEVICE_ID_VIRTIO" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}' > $TMP
+
+ # TODO: check if virtio-pci is loaded first and just unbind if it is not loaded
+ # Requires some more investigation - for example, some kernels do not seem to have
+ # virtio-pci but just virtio_scsi instead. Also need to make sure we get the
+ # underscore vs. dash right in the virtio_scsi name.
+ modprobe virtio-pci || true
+ while IFS= read -r dev_id; do
+ for bdf in ${pci_bus_cache["0x1af4:0x$dev_id"]}; do
+ if ! pci_can_use $bdf; then
+ pci_dev_echo "$bdf" "Skipping un-whitelisted Virtio device at"
+ continue
+ fi
+ linux_bind_driver "$bdf" virtio-pci
+ done
+ done < $TMP
+ rm $TMP
+
+ # VMD
+ TMP=$(mktemp)
+ #collect all the device_id info of vmd devices.
+ grep "PCI_DEVICE_ID_INTEL_VMD" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}' > $TMP
+
+ set +e
+ check_for_driver vmd
+ driver_loaded=$?
+ set -e
+ while IFS= read -r dev_id; do
+ for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
+ if ! pci_can_use $bdf; then
+ echo "Skipping un-whitelisted VMD device at $bdf"
+ continue
+ fi
+ if [ $driver_loaded -ne 0 ]; then
+ linux_bind_driver "$bdf" vmd
+ else
+ linux_unbind_driver "$bdf"
+ fi
+ done
+ done < $TMP
+ rm $TMP
+
+ echo "1" > "/sys/bus/pci/rescan"
+}
+
+function reset_linux() {
+ reset_linux_pci
+ for mount in $(linux_hugetlbfs_mounts); do
+ rm -f "$mount"/spdk*map_*
+ done
+ rm -f /run/.spdk*
+}
+
+function status_linux() {
+ echo "Hugepages"
+ printf "%-6s %10s %8s / %6s\n" "node" "hugesize" "free" "total"
+
+ numa_nodes=0
+ shopt -s nullglob
+ for path in /sys/devices/system/node/node*/hugepages/hugepages-*/; do
+ numa_nodes=$((numa_nodes + 1))
+ free_pages=$(cat $path/free_hugepages)
+ all_pages=$(cat $path/nr_hugepages)
+
+ [[ $path =~ (node[0-9]+)/hugepages/hugepages-([0-9]+kB) ]]
+
+ node=${BASH_REMATCH[1]}
+ huge_size=${BASH_REMATCH[2]}
+
+ printf "%-6s %10s %8s / %6s\n" $node $huge_size $free_pages $all_pages
+ done
+ shopt -u nullglob
+
+ # fall back to system-wide hugepages
+ if [ "$numa_nodes" = "0" ]; then
+ free_pages=$(grep HugePages_Free /proc/meminfo | awk '{ print $2 }')
+ all_pages=$(grep HugePages_Total /proc/meminfo | awk '{ print $2 }')
+ node="-"
+ huge_size="$HUGEPGSZ"
+
+ printf "%-6s %10s %8s / %6s\n" $node $huge_size $free_pages $all_pages
+ fi
+
+ echo ""
+ echo "NVMe devices"
+
+ echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver\t\tDevice name"
+ for bdf in ${pci_bus_cache["0x010802"]}; do
+ driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
+ if [ "$numa_nodes" = "0" ]; then
+ node="-"
+ else
+ node=$(cat /sys/bus/pci/devices/$bdf/numa_node)
+ if ((node == -1)); then
+ node=unknown
+ fi
+ fi
+ device=$(cat /sys/bus/pci/devices/$bdf/device)
+ vendor=$(cat /sys/bus/pci/devices/$bdf/vendor)
+ if [ "$driver" = "nvme" ] && [ -d /sys/bus/pci/devices/$bdf/nvme ]; then
+ name="\t"$(ls /sys/bus/pci/devices/$bdf/nvme)
+ else
+ name="-"
+ fi
+ echo -e "$bdf\t${vendor#0x}\t${device#0x}\t$node\t${driver:--}\t\t$name"
+ done
+
+ echo ""
+ echo "I/OAT Engine"
+
+ #collect all the device_id info of ioat devices.
+ TMP=$(grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}')
+ echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver"
+ for dev_id in $TMP; do
+ for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
+ driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
+ if [ "$numa_nodes" = "0" ]; then
+ node="-"
+ else
+ node=$(cat /sys/bus/pci/devices/$bdf/numa_node)
+ if ((node == -1)); then
+ node=unknown
+ fi
+ fi
+ device=$(cat /sys/bus/pci/devices/$bdf/device)
+ vendor=$(cat /sys/bus/pci/devices/$bdf/vendor)
+ echo -e "$bdf\t${vendor#0x}\t${device#0x}\t$node\t${driver:--}"
+ done
+ done
+
+ echo ""
+ echo "IDXD Engine"
+
+ #collect all the device_id info of idxd devices.
+ TMP=$(grep "PCI_DEVICE_ID_INTEL_IDXD" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}')
+ echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver"
+ for dev_id in $TMP; do
+ for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
+ driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
+ if [ "$numa_nodes" = "0" ]; then
+ node="-"
+ else
+ node=$(cat /sys/bus/pci/devices/$bdf/numa_node)
+ fi
+ device=$(cat /sys/bus/pci/devices/$bdf/device)
+ vendor=$(cat /sys/bus/pci/devices/$bdf/vendor)
+ echo -e "$bdf\t${vendor#0x}\t${device#0x}\t$node\t${driver:--}"
+ done
+ done
+
+ echo ""
+ echo "virtio"
+
+ #collect all the device_id info of virtio devices.
+ TMP=$(grep "PCI_DEVICE_ID_VIRTIO" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}')
+ echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver\t\tDevice name"
+ for dev_id in $TMP; do
+ for bdf in ${pci_bus_cache["0x1af4:0x$dev_id"]}; do
+ driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
+ if [ "$numa_nodes" = "0" ]; then
+ node="-"
+ else
+ node=$(cat /sys/bus/pci/devices/$bdf/numa_node)
+ if ((node == -1)); then
+ node=unknown
+ fi
+ fi
+ device=$(cat /sys/bus/pci/devices/$bdf/device)
+ vendor=$(cat /sys/bus/pci/devices/$bdf/vendor)
+ blknames=()
+ get_virtio_names_from_bdf "$bdf" blknames
+ echo -e "$bdf\t${vendor#0x}\t${device#0x}\t$node\t\t${driver:--}\t\t" "${blknames[@]}"
+ done
+ done
+
+ echo ""
+ echo "VMD"
+
+ #collect all the device_id info of vmd devices.
+ TMP=$(grep "PCI_DEVICE_ID_INTEL_VMD" $rootdir/include/spdk/pci_ids.h \
+ | awk -F"x" '{print $2}')
+ echo -e "BDF\t\tNuma Node\tDriver Name"
+ for dev_id in $TMP; do
+ for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
+ driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
+ node=$(cat /sys/bus/pci/devices/$bdf/numa_node)
+ if ((node == -1)); then
+ node=unknown
+ fi
+ echo -e "$bdf\t$node\t\t$driver"
+ done
+ done
+}
+
+function status_freebsd() {
+ local id pci
+ local ioat idxd vmd
+
+ status_print() (
+ local dev driver
+
+ echo -e "BDF\t\tVendor\tDevice\tDriver"
+
+ for id; do
+ for pci in ${pci_bus_cache["$id"]}; do
+ driver=$(pciconf -l "pci$pci")
+ driver=${driver%@*}
+ printf '%s\t%s\t%s\t%s\n' \
+ "$pci" \
+ "${pci_ids_vendor["$pci"]}" \
+ "${pci_ids_device["$pci"]}" \
+ "$driver"
+ done
+ done
+ )
+
+ devs=PCI_DEVICE_ID_INTEL_IOAT
+ devs+="|PCI_DEVICE_ID_INTEL_IDXD"
+ devs+="|PCI_DEVICE_ID_INTEL_VMD"
+
+ local dev_type dev_id
+ while read -r _ dev_type dev_id; do
+ case "$dev_type" in
+ *IOAT*) ioat+=("0x8086:$dev_id") ;;
+ *IDXD*) idxd+=("0x8086:$dev_id") ;;
+ *VMD*) vmd+=("0x8086:$dev_id") ;;
+ esac
+ done < <(grep -E "$devs" "$rootdir/include/spdk/pci_ids.h")
+
+ local contigmem=present
+ if ! kldstat -q -m contigmem; then
+ contigmem="not present"
+ fi
+
+ cat <<- BSD_INFO
+ Contigmem ($contigmem)
+ Buffer Size: $(kenv hw.contigmem.buffer_size)
+ Num Buffers: $(kenv hw.contigmem.num_buffers)
+
+ NVMe devices
+ $(status_print 0x010802)
+
+ I/IOAT DMA
+ $(status_print "${ioat[@]}")
+
+ IDXD DMA
+ $(status_print "${idxd[@]}")
+
+ VMD
+ $(status_print "${vmd[@]}")
+ BSD_INFO
+}
+
+function configure_freebsd_pci() {
+ local devs ids id
+ local BDFS
+
+ devs=PCI_DEVICE_ID_INTEL_IOAT
+ devs+="|PCI_DEVICE_ID_INTEL_IDXD"
+ devs+="|PCI_DEVICE_ID_INTEL_VMD"
+
+ ids=($(grep -E "$devs" "$rootdir/include/spdk/pci_ids.h" | awk '{print $3}'))
+
+ if [[ -n ${pci_bus_cache["0x010802"]} ]]; then
+ BDFS+=(${pci_bus_cache["0x010802"]})
+ fi
+
+ for id in "${ids[@]}"; do
+ [[ -n ${pci_bus_cache["0x8086:$id"]} ]] || continue
+ BDFS+=(${pci_bus_cache["0x8086:$id"]})
+ done
+
+ # Drop the domain part from all the addresses
+ BDFS=("${BDFS[@]#*:}")
+
+ local IFS=","
+ kldunload nic_uio.ko || true
+ kenv hw.nic_uio.bdfs="${BDFS[*]}"
+ kldload nic_uio.ko
+}
+
+function configure_freebsd() {
+ configure_freebsd_pci
+ # If contigmem is already loaded but the HUGEMEM specified doesn't match the
+ # previous value, unload contigmem so that we can reload with the new value.
+ if kldstat -q -m contigmem; then
+ if [ $(kenv hw.contigmem.num_buffers) -ne "$((HUGEMEM / 256))" ]; then
+ kldunload contigmem.ko
+ fi
+ fi
+ if ! kldstat -q -m contigmem; then
+ kenv hw.contigmem.num_buffers=$((HUGEMEM / 256))
+ kenv hw.contigmem.buffer_size=$((256 * 1024 * 1024))
+ kldload contigmem.ko
+ fi
+}
+
+function reset_freebsd() {
+ kldunload contigmem.ko || true
+ kldunload nic_uio.ko || true
+}
+
+CMD=reset cache_pci_bus
+
+mode=$1
+
+if [ -z "$mode" ]; then
+ mode="config"
+fi
+
+: ${HUGEMEM:=2048}
+: ${PCI_WHITELIST:=""}
+: ${PCI_BLACKLIST:=""}
+
+if [ -n "$NVME_WHITELIST" ]; then
+ PCI_WHITELIST="$PCI_WHITELIST $NVME_WHITELIST"
+fi
+
+if [ -n "$SKIP_PCI" ]; then
+ PCI_WHITELIST="none"
+fi
+
+if [ -z "$TARGET_USER" ]; then
+ TARGET_USER="$SUDO_USER"
+ if [ -z "$TARGET_USER" ]; then
+ TARGET_USER=$(logname 2> /dev/null) || true
+ fi
+fi
+
+if [[ $os == Linux ]]; then
+ HUGEPGSZ=$(($(grep Hugepagesize /proc/meminfo | cut -d : -f 2 | tr -dc '0-9')))
+ HUGEPGSZ_MB=$((HUGEPGSZ / 1024))
+ : ${NRHUGE=$(((HUGEMEM + HUGEPGSZ_MB - 1) / HUGEPGSZ_MB))}
+
+ if [ "$mode" == "config" ]; then
+ configure_linux
+ elif [ "$mode" == "cleanup" ]; then
+ cleanup_linux
+ elif [ "$mode" == "reset" ]; then
+ reset_linux
+ elif [ "$mode" == "status" ]; then
+ status_linux
+ elif [ "$mode" == "help" ]; then
+ usage $0
+ else
+ usage $0 "Invalid argument '$mode'"
+ fi
+else
+ if [ "$mode" == "config" ]; then
+ configure_freebsd
+ elif [ "$mode" == "reset" ]; then
+ reset_freebsd
+ elif [ "$mode" == "cleanup" ]; then
+ echo "setup.sh cleanup function not yet supported on $os"
+ elif [ "$mode" == "status" ]; then
+ status_freebsd
+ elif [ "$mode" == "help" ]; then
+ usage $0
+ else
+ usage $0 "Invalid argument '$mode'"
+ fi
+fi
diff --git a/src/spdk/scripts/spdkcli.py b/src/spdk/scripts/spdkcli.py
new file mode 100755
index 000000000..3d7c63baa
--- /dev/null
+++ b/src/spdk/scripts/spdkcli.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python3
+import sys
+import argparse
+from configshell_fb import ConfigShell, shell, ExecutionError
+from pyparsing import (alphanums, Optional, Suppress, Word, Regex,
+ removeQuotes, dblQuotedString, OneOrMore)
+from rpc.client import JSONRPCException, JSONRPCClient
+from spdkcli import UIRoot
+
+
+def add_quotes_to_shell(spdk_shell):
+ command = shell.locatedExpr(Word(alphanums + '_'))('command')
+ value = dblQuotedString.addParseAction(removeQuotes)
+ value_word = Word(alphanums + r';,=_\+/.<>()~@:-%[]')
+ keyword = Word(alphanums + r'_\-')
+ kparam = shell.locatedExpr(keyword + Suppress('=') +
+ Optional(value | value_word, default=''))('kparams*')
+ pparam = shell.locatedExpr(value | value_word)('pparams*')
+ parameters = OneOrMore(kparam | pparam)
+ bookmark = Regex(r'@([A-Za-z0-9:_.]|-)+')
+ pathstd = Regex(r'([A-Za-z0-9:_.\[\]]|-)*' + '/' + r'([A-Za-z0-9:_.\[\]/]|-)*') \
+ | '..' | '.'
+ path = shell.locatedExpr(bookmark | pathstd | '*')('path')
+ spdk_shell._parser = Optional(path) + Optional(command) + Optional(parameters)
+
+
+def main():
+ """
+ Start SPDK CLI
+ :return:
+ """
+ spdk_shell = ConfigShell("~/.scripts")
+ spdk_shell.interactive = True
+ add_quotes_to_shell(spdk_shell)
+
+ parser = argparse.ArgumentParser(description="SPDK command line interface")
+ parser.add_argument('-s', dest='server_addr',
+ help='RPC domain socket path or IP address', default='/var/tmp/spdk.sock')
+ parser.add_argument('-p', dest='port',
+ help='RPC port number (if server_addr is IP address)',
+ default=None, type=int)
+ parser.add_argument("-v", dest="verbose", help="Print request/response JSON for configuration calls",
+ default=False, action="store_true")
+ parser.add_argument("commands", metavar="command", type=str, nargs="*", default="",
+ help="commands to execute by SPDKCli as one-line command")
+ args = parser.parse_args()
+
+ try:
+ client = JSONRPCClient(args.server_addr, port=args.port)
+ except JSONRPCException as e:
+ spdk_shell.log.error("%s. SPDK not running?" % e)
+ sys.exit(1)
+
+ with client:
+ root_node = UIRoot(client, spdk_shell)
+ root_node.verbose = args.verbose
+ try:
+ root_node.refresh()
+ except BaseException:
+ pass
+
+ if args.commands:
+ try:
+ spdk_shell.interactive = False
+ spdk_shell.run_cmdline(" ".join(args.commands))
+ except Exception as e:
+ sys.stderr.write("%s\n" % e)
+ sys.exit(1)
+ sys.exit(0)
+
+ spdk_shell.con.display("SPDK CLI v0.1")
+ spdk_shell.con.display("")
+
+ while not spdk_shell._exit:
+ try:
+ spdk_shell.run_interactive()
+ except (JSONRPCException, ExecutionError) as e:
+ spdk_shell.log.error("%s" % e)
+ except BrokenPipeError as e:
+ spdk_shell.log.error("Lost connection with SPDK: %s" % e)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/spdk/scripts/spdkcli/__init__.py b/src/spdk/scripts/spdkcli/__init__.py
new file mode 100644
index 000000000..571d49a8f
--- /dev/null
+++ b/src/spdk/scripts/spdkcli/__init__.py
@@ -0,0 +1 @@
+from .ui_root import UIRoot
diff --git a/src/spdk/scripts/spdkcli/ui_node.py b/src/spdk/scripts/spdkcli/ui_node.py
new file mode 100644
index 000000000..c681c0660
--- /dev/null
+++ b/src/spdk/scripts/spdkcli/ui_node.py
@@ -0,0 +1,861 @@
+from configshell_fb import ConfigNode, ExecutionError
+from uuid import UUID
+from rpc.client import JSONRPCException
+import json
+
+
+def convert_bytes_to_human(size):
+ if not size:
+ return ""
+ for x in ["bytes", "K", "M", "G", "T"]:
+ if size < 1024.0:
+ return "%3.1f%s" % (size, x)
+ size /= 1024.0
+
+
+class UINode(ConfigNode):
+ def __init__(self, name, parent=None, shell=None):
+ ConfigNode.__init__(self, name, parent, shell)
+
+ def refresh(self):
+ for child in self.children:
+ child.refresh()
+
+ def refresh_node(self):
+ self.refresh()
+
+ def ui_command_refresh(self):
+ self.refresh()
+
+ def ui_command_ll(self, path=None, depth=None):
+ """
+ Alias for ls.
+ """
+ self.ui_command_ls(path, depth)
+
+ def execute_command(self, command, pparams=[], kparams={}):
+ try:
+ result = ConfigNode.execute_command(self, command,
+ pparams, kparams)
+ except Exception as e:
+ raise e
+ else:
+ self.shell.log.debug("Command %s succeeded." % command)
+ return result
+ finally:
+ if self.shell.interactive and\
+ command in ["create", "delete", "delete_all", "add_initiator",
+ "allow_any_host", "bdev_split_create", "add_lun",
+ "iscsi_target_node_add_pg_ig_maps", "remove_target", "add_secret",
+ "bdev_split_delete", "bdev_pmem_delete_pool",
+ "bdev_pmem_create_pool", "delete_secret_all",
+ "delete_initiator", "set_auth", "delete_secret",
+ "iscsi_target_node_remove_pg_ig_maps", "load_config",
+ "load_subsystem_config"]:
+ self.get_root().refresh()
+ self.refresh_node()
+
+
+class UIBdevs(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "bdevs", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ UIMallocBdev(self)
+ UIAIOBdev(self)
+ UILvolBdev(self)
+ UINvmeBdev(self)
+ UINullBdev(self)
+ UIErrorBdev(self)
+ UISplitBdev(self)
+ UIPmemBdev(self)
+ UIRbdBdev(self)
+ UIiSCSIBdev(self)
+ UIVirtioBlkBdev(self)
+ UIVirtioScsiBdev(self)
+ UIRaidBdev(self)
+
+
+class UILvolStores(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "lvol_stores", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for lvs in self.get_root().bdev_lvol_get_lvstores():
+ UILvsObj(lvs, self)
+
+ def delete(self, name, uuid):
+ if name is None and uuid is None:
+ self.shell.log.error("Please specify one of the identifiers: "
+ "lvol store name or UUID")
+ self.get_root().bdev_lvol_delete_lvstore(lvs_name=name, uuid=uuid)
+
+ def ui_command_create(self, name, bdev_name, cluster_size=None):
+ """
+ Creates logical volume store on target bdev.
+
+ Arguments:
+ name - Friendly name to use alongside with UUID identifier.
+ bdev_name - On which bdev to create the lvol store.
+ cluster_size - Cluster size to use when creating lvol store, in bytes. Default: 4194304.
+ """
+
+ cluster_size = self.ui_eval_param(cluster_size, "number", None)
+ self.get_root().bdev_lvol_create_lvstore(lvs_name=name, bdev_name=bdev_name, cluster_sz=cluster_size)
+
+ def ui_command_delete(self, name=None, uuid=None):
+ """
+ Deletes logical volume store from configuration.
+ This will also delete all logical volume bdevs created on this lvol store!
+
+ Arguments:
+ name - Friendly name of the logical volume store to be deleted.
+ uuid - UUID number of the logical volume store to be deleted.
+ """
+ self.delete(name, uuid)
+
+ def ui_command_delete_all(self):
+ rpc_messages = ""
+ for lvs in self._children:
+ try:
+ self.delete(None, lvs.lvs.uuid)
+ except JSONRPCException as e:
+ rpc_messages += e.message
+ if rpc_messages:
+ raise JSONRPCException(rpc_messages)
+
+ def summary(self):
+ return "Lvol stores: %s" % len(self.children), None
+
+
+class UIBdev(UINode):
+ def __init__(self, name, parent):
+ UINode.__init__(self, name, parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for bdev in self.get_root().bdev_get_bdevs(self.name):
+ UIBdevObj(bdev, self)
+
+ def ui_command_get_bdev_iostat(self, name=None):
+ ret = self.get_root().bdev_get_iostat(name=name)
+ self.shell.log.info(json.dumps(ret, indent=2))
+
+ def ui_command_delete_all(self):
+ """Delete all bdevs from this tree node."""
+ rpc_messages = ""
+ for bdev in self._children:
+ try:
+ self.delete(bdev.name)
+ except JSONRPCException as e:
+ rpc_messages += e.message
+ if rpc_messages:
+ raise JSONRPCException(rpc_messages)
+
+ def summary(self):
+ return "Bdevs: %d" % len(self.children), None
+
+
+class UIMallocBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "malloc", parent)
+
+ def delete(self, name):
+ self.get_root().bdev_malloc_delete(name=name)
+
+ def ui_command_create(self, size, block_size, name=None, uuid=None):
+ """
+ Construct a Malloc bdev.
+
+ Arguments:
+ size - Size in megabytes.
+ block_size - Integer, block size to use when constructing bdev.
+ name - Optional argument. Custom name to use for bdev. If not provided
+ then name will be "MallocX" where X is next available ID.
+ uuid - Optional parameter. Custom UUID to use. If empty then random
+ will be generated.
+ """
+
+ size = self.ui_eval_param(size, "number", None)
+ block_size = self.ui_eval_param(block_size, "number", None)
+ ret_name = self.get_root().create_malloc_bdev(num_blocks=size * 1024 * 1024 // block_size,
+ block_size=block_size,
+ name=name, uuid=uuid)
+ self.shell.log.info(ret_name)
+
+ def ui_command_delete(self, name):
+ """
+ Deletes malloc bdev from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the malloc bdev to be deleted - UUID number or name alias.
+ """
+ self.delete(name)
+
+
+class UIAIOBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "aio", parent)
+
+ def delete(self, name):
+ self.get_root().bdev_aio_delete(name=name)
+
+ def ui_command_create(self, name, filename, block_size):
+ """
+ Construct an AIO bdev.
+ Backend file must exist before trying to create an AIO bdev.
+
+ Arguments:
+ name - Optional argument. Custom name to use for bdev. If not provided
+ then name will be "MallocX" where X is next available ID.
+ filename - Path to AIO backend.
+ block_size - Integer, block size to use when constructing bdev.
+ """
+
+ block_size = self.ui_eval_param(block_size, "number", None)
+ ret_name = self.get_root().bdev_aio_create(name=name,
+ block_size=int(block_size),
+ filename=filename)
+ self.shell.log.info(ret_name)
+
+ def ui_command_delete(self, name):
+ """
+ Deletes aio bdev from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the aio bdev to be deleted - UUID number or name alias.
+ """
+ self.delete(name)
+
+
+class UILvolBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "logical_volume", parent)
+
+ def delete(self, name):
+ self.get_root().bdev_lvol_delete(name=name)
+
+ def ui_command_create(self, name, size, lvs, thin_provision=None):
+ """
+ Construct a Logical Volume bdev.
+
+ Arguments:
+ name - Friendly name to use for creating logical volume bdev.
+ size - Size in megabytes.
+ lvs - Identifier of logical volume store on which the bdev should be
+ created. Can be either a friendly name or UUID.
+ thin_provision - Whether the bdev should be thick or thin provisioned.
+ Default is False, and created bdevs are thick-provisioned.
+ """
+ uuid = None
+ lvs_name = None
+ try:
+ UUID(lvs)
+ uuid = lvs
+ except ValueError:
+ lvs_name = lvs
+
+ size = self.ui_eval_param(size, "number", None)
+ size *= (1024 * 1024)
+ thin_provision = self.ui_eval_param(thin_provision, "bool", False)
+
+ ret_uuid = self.get_root().create_lvol_bdev(lvol_name=name, size=size,
+ lvs_name=lvs_name, uuid=uuid,
+ thin_provision=thin_provision)
+ self.shell.log.info(ret_uuid)
+
+ def ui_command_delete(self, name):
+ """
+ Deletes lvol bdev from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the lvol bdev to be deleted - UUID number or name alias.
+ """
+ self.delete(name)
+
+
+class UINvmeBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "nvme", parent)
+
+ def delete(self, name):
+ self.get_root().bdev_nvme_detach_controller(name=name)
+
+ def ui_command_create(self, name, trtype, traddr,
+ adrfam=None, trsvcid=None, subnqn=None):
+ if "rdma" in trtype and None in [adrfam, trsvcid, subnqn]:
+ self.shell.log.error("Using RDMA transport type."
+ "Please provide arguments for adrfam, trsvcid and subnqn.")
+ ret_name = self.get_root().create_nvme_bdev(name=name, trtype=trtype,
+ traddr=traddr, adrfam=adrfam,
+ trsvcid=trsvcid, subnqn=subnqn)
+ self.shell.log.info(ret_name)
+
+ def ui_command_delete_all(self):
+ rpc_messages = ""
+ ctrlrs = [x.name for x in self._children]
+ ctrlrs = [x.rsplit("n", 1)[0] for x in ctrlrs]
+ ctrlrs = set(ctrlrs)
+ for ctrlr in ctrlrs:
+ try:
+ self.delete(ctrlr)
+ except JSONRPCException as e:
+ rpc_messages += e.messages
+ if rpc_messages:
+ raise JSONRPCException(rpc_messages)
+
+ def ui_command_delete(self, name):
+ """
+ Deletes NVMe controller from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the NVMe controller to be deleted.
+ """
+ self.delete(name)
+
+
+class UINullBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "null", parent)
+
+ def delete(self, name):
+ self.get_root().bdev_null_delete(name=name)
+
+ def ui_command_create(self, name, size, block_size, uuid=None):
+ """
+ Construct a Null bdev.
+
+ Arguments:
+ name - Name to use for bdev.
+ size - Size in megabytes.
+ block_size - Integer, block size to use when constructing bdev.
+ uuid - Optional parameter. Custom UUID to use. If empty then random
+ will be generated.
+ """
+
+ size = self.ui_eval_param(size, "number", None)
+ block_size = self.ui_eval_param(block_size, "number", None)
+ num_blocks = size * 1024 * 1024 // block_size
+ ret_name = self.get_root().bdev_null_create(num_blocks=num_blocks,
+ block_size=block_size,
+ name=name, uuid=uuid)
+ self.shell.log.info(ret_name)
+
+ def ui_command_delete(self, name):
+ """
+ Deletes null bdev from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the null bdev to be deleted - UUID number or name alias.
+ """
+ self.delete(name)
+
+
+class UIErrorBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "error", parent)
+
+ def delete(self, name):
+ self.get_root().bdev_error_delete(name=name)
+
+ def ui_command_create(self, base_name):
+ """
+ Construct a error injection bdev.
+
+ Arguments:
+ base_name - base bdev name on top of which error bdev will be created.
+ """
+
+ self.get_root().create_error_bdev(base_name=base_name)
+
+ def ui_command_delete(self, name):
+ """
+ Deletes error bdev from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the error bdev to be deleted - UUID number or name alias.
+ """
+ self.delete(name)
+
+
+class UISplitBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "split_disk", parent)
+
+ def delete(self, name):
+ pass
+
+ def ui_command_bdev_split_create(self, base_bdev, split_count, split_size_mb=None):
+ """
+ Create split block devices from a base bdev.
+
+ Arguments:
+ base_bdev - Name of bdev to split
+ split_count - Number of split bdevs to create
+ split_size_mb- Size of each split volume in MiB (optional)
+ """
+
+ split_count = self.ui_eval_param(split_count, "number", None)
+ split_size_mb = self.ui_eval_param(split_size_mb, "number", None)
+
+ ret_name = self.get_root().bdev_split_create(base_bdev=base_bdev,
+ split_count=split_count,
+ split_size_mb=split_size_mb)
+ self.shell.log.info(ret_name)
+
+ def ui_command_bdev_split_delete(self, base_bdev):
+ """Delete split block devices associated with base bdev.
+
+ Args:
+ base_bdev: name of previously split bdev
+ """
+
+ self.get_root().bdev_split_delete(base_bdev=base_bdev)
+
+
+class UIPmemBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "pmemblk", parent)
+
+ def delete(self, name):
+ self.get_root().bdev_pmem_delete(name=name)
+
+ def ui_command_bdev_pmem_create_pool(self, pmem_file, total_size, block_size):
+ total_size = self.ui_eval_param(total_size, "number", None)
+ block_size = self.ui_eval_param(block_size, "number", None)
+ num_blocks = int((total_size * 1024 * 1024) / block_size)
+
+ self.get_root().bdev_pmem_create_pool(pmem_file=pmem_file,
+ num_blocks=num_blocks,
+ block_size=block_size)
+
+ def ui_command_bdev_pmem_delete_pool(self, pmem_file):
+ self.get_root().bdev_pmem_delete_pool(pmem_file=pmem_file)
+
+ def ui_command_bdev_pmem_get_pool_info(self, pmem_file):
+ ret = self.get_root().bdev_pmem_get_pool_info(pmem_file=pmem_file)
+ self.shell.log.info(json.dumps(ret, indent=2))
+
+ def ui_command_create(self, pmem_file, name):
+ ret_name = self.get_root().bdev_pmem_create(pmem_file=pmem_file,
+ name=name)
+ self.shell.log.info(ret_name)
+
+ def ui_command_delete(self, name):
+ """
+ Deletes pmem bdev from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the pmem bdev to be deleted - UUID number or name alias.
+ """
+ self.delete(name)
+
+
+class UIRbdBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "rbd", parent)
+
+ def delete(self, name):
+ self.get_root().bdev_rbd_delete(name=name)
+
+ def ui_command_create(self, pool_name, rbd_name, block_size, name=None):
+ block_size = self.ui_eval_param(block_size, "number", None)
+
+ ret_name = self.get_root().create_rbd_bdev(pool_name=pool_name,
+ rbd_name=rbd_name,
+ block_size=block_size,
+ name=name)
+ self.shell.log.info(ret_name)
+
+ def ui_command_delete(self, name):
+ """
+ Deletes rbd bdev from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the rbd bdev to be deleted - UUID number or name alias.
+ """
+ self.delete(name)
+
+
+class UIiSCSIBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "iscsi", parent)
+
+ def delete(self, name):
+ self.get_root().bdev_iscsi_delete(name=name)
+
+ def ui_command_create(self, name, url, initiator_iqn):
+ """
+ Create iSCSI bdev in configuration by connecting to remote
+ iSCSI target.
+
+ Arguments:
+ name - name to be used as an ID for created iSCSI bdev.
+ url - iscsi url pointing to LUN on remote iSCSI target.
+ Example: iscsi://127.0.0.1:3260/iqn.2018-06.org.spdk/0.
+ initiator_iqn - IQN to use for initiating connection with the target.
+ """
+ ret_name = self.get_root().create_iscsi_bdev(name=name,
+ url=url,
+ initiator_iqn=initiator_iqn)
+ self.shell.log.info(ret_name)
+
+ def ui_command_delete(self, name):
+ """
+ Deletes iSCSI bdev from configuration.
+
+ Arguments:
+ name - name of the iscsi bdev to be deleted.
+ """
+ self.delete(name)
+
+
+class UIVirtioBlkBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "virtioblk_disk", parent)
+
+ def ui_command_create(self, name, trtype, traddr,
+ vq_count=None, vq_size=None):
+
+ vq_count = self.ui_eval_param(vq_count, "number", None)
+ vq_size = self.ui_eval_param(vq_size, "number", None)
+
+ ret = self.get_root().create_virtio_dev(name=name,
+ trtype=trtype,
+ traddr=traddr,
+ dev_type="blk",
+ vq_count=vq_count,
+ vq_size=vq_size)
+
+ self.shell.log.info(ret)
+
+ def ui_command_delete(self, name):
+ """
+ Deletes virtio scsi bdev from configuration.
+
+ Arguments:
+ name - Is a unique identifier of the virtio scsi bdev to be deleted - UUID number or name alias.
+ """
+ self.get_root().bdev_virtio_detach_controller(name=name)
+
+
+class UIVirtioScsiBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "virtioscsi_disk", parent)
+
+ def refresh(self):
+ self._children = set([])
+ for bdev in self.get_root().bdev_virtio_scsi_get_devices():
+ UIVirtioScsiBdevObj(bdev, self)
+
+ def ui_command_create(self, name, trtype, traddr,
+ vq_count=None, vq_size=None):
+
+ vq_count = self.ui_eval_param(vq_count, "number", None)
+ vq_size = self.ui_eval_param(vq_size, "number", None)
+
+ ret = self.get_root().create_virtio_dev(name=name,
+ trtype=trtype,
+ traddr=traddr,
+ dev_type="scsi",
+ vq_count=vq_count,
+ vq_size=vq_size)
+
+ self.shell.log.info(ret)
+
+ def ui_command_delete(self, name):
+ self.get_root().bdev_virtio_detach_controller(name=name)
+
+
+class UIBdevObj(UINode):
+ def __init__(self, bdev, parent):
+ self.bdev = bdev
+ # Using bdev name also for lvol bdevs, which results in displying
+ # UUID instead of alias. This is because alias naming convention
+ # (lvol_store_name/lvol_bdev_name) conflicts with configshell paths
+ # ("/" as separator).
+ # Solution: show lvol alias in "summary field" for now.
+ # TODO: Possible next steps:
+ # - Either change default separator in tree for smth else
+ # - or add a UI command which would be able to autocomplete
+ # "cd" command based on objects alias and match is to the
+ # "main" bdev name.
+ UINode.__init__(self, self.bdev.name, parent)
+
+ def ui_command_show_details(self):
+ self.shell.log.info(json.dumps(vars(self.bdev), indent=2))
+
+ def summary(self):
+ size = convert_bytes_to_human(self.bdev.block_size * self.bdev.num_blocks)
+ size = "=".join(["Size", size])
+
+ in_use = "Not claimed"
+ if bool(self.bdev.claimed):
+ in_use = "Claimed"
+
+ alias = None
+ if self.bdev.aliases:
+ alias = self.bdev.aliases[0]
+
+ info = ", ".join([_f for _f in [alias, size, in_use] if _f])
+ return info, True
+
+
+class UIVirtioScsiBdevObj(UIBdevObj):
+ def __init__(self, bdev, parent):
+ UIBdevObj.__init__(self, bdev, parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for bdev in self.get_root().bdev_get_bdevs("virtio_scsi_disk"):
+ if self.bdev.name in bdev.name:
+ UIBdevObj(bdev, self)
+
+ def summary(self):
+ if "socket" in list(self.bdev.virtio.keys()):
+ info = self.bdev.virtio["socket"]
+ if "pci_address" in list(self.bdev.virtio.keys()):
+ info = self.bdev.virtio["pci_address"]
+ return info, True
+
+
+class UILvsObj(UINode):
+ def __init__(self, lvs, parent):
+ UINode.__init__(self, lvs.name, parent)
+ self.lvs = lvs
+
+ def ui_command_show_details(self):
+ self.shell.log.info(json.dumps(vars(self.lvs), indent=2))
+
+ def summary(self):
+ size = convert_bytes_to_human(self.lvs.total_data_clusters * self.lvs.cluster_size)
+ free = convert_bytes_to_human(self.lvs.free_clusters * self.lvs.cluster_size)
+ if not free:
+ free = "0"
+ size = "=".join(["Size", size])
+ free = "=".join(["Free", free])
+ info = ", ".join([str(size), str(free)])
+ return info, True
+
+
+class UIVhosts(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "vhost", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ self.get_root().list_vhost_ctrls()
+ UIVhostBlk(self)
+ UIVhostScsi(self)
+
+
+class UIVhost(UINode):
+ def __init__(self, name, parent):
+ UINode.__init__(self, name, parent)
+ self.refresh()
+
+ def ui_command_delete(self, name):
+ """
+ Delete a Vhost controller from configuration.
+
+ Arguments:
+ name - Controller name.
+ """
+ self.get_root().vhost_delete_controller(ctrlr=name)
+
+
+class UIVhostBlk(UIVhost):
+ def __init__(self, parent):
+ UIVhost.__init__(self, "block", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for ctrlr in self.get_root().vhost_get_controllers(ctrlr_type=self.name):
+ UIVhostBlkCtrlObj(ctrlr, self)
+
+ def ui_command_create(self, name, bdev, cpumask=None, readonly=False):
+ """
+ Create a Vhost BLK controller.
+
+ Arguments:
+ name - Controller name.
+ bdev - Which bdev to attach to the controller.
+ cpumask - Optional. Integer to specify mask of CPUs to use.
+ Default: 1.
+ readonly - Whether controller should be read only or not.
+ Default: False.
+ """
+ self.get_root().vhost_create_blk_controller(ctrlr=name,
+ dev_name=bdev,
+ cpumask=cpumask,
+ readonly=bool(readonly))
+
+
+class UIVhostScsi(UIVhost):
+ def __init__(self, parent):
+ UIVhost.__init__(self, "scsi", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for ctrlr in self.get_root().vhost_get_controllers(ctrlr_type=self.name):
+ UIVhostScsiCtrlObj(ctrlr, self)
+
+ def ui_command_create(self, name, cpumask=None):
+ """
+ Create a Vhost SCSI controller.
+
+ Arguments:
+ name - Controller name.
+ cpumask - Optional. Integer to specify mask of CPUs to use.
+ Default: 1.
+ """
+ self.get_root().vhost_create_scsi_controller(ctrlr=name,
+ cpumask=cpumask)
+
+
+class UIVhostCtrl(UINode):
+ # Base class for SCSI and BLK controllers, do not instantiate
+ def __init__(self, ctrlr, parent):
+ self.ctrlr = ctrlr
+ UINode.__init__(self, self.ctrlr.ctrlr, parent)
+ self.refresh()
+
+ def ui_command_show_details(self):
+ self.shell.log.info(json.dumps(vars(self.ctrlr), indent=2))
+
+ def ui_command_set_coalescing(self, delay_base_us, iops_threshold):
+ delay_base_us = self.ui_eval_param(delay_base_us, "number", None)
+ iops_threshold = self.ui_eval_param(iops_threshold, "number", None)
+
+ self.get_root().vhost_controller_set_coalescing(ctrlr=self.ctrlr.ctrlr,
+ delay_base_us=delay_base_us,
+ iops_threshold=iops_threshold)
+
+
+class UIVhostScsiCtrlObj(UIVhostCtrl):
+ def refresh(self):
+ self._children = set([])
+ for lun in self.ctrlr.backend_specific["scsi"]:
+ UIVhostTargetObj(lun, self)
+
+ def ui_command_remove_target(self, target_num):
+ """
+ Remove target node from SCSI controller.
+
+ Arguments:
+ target_num - Integer identifier of target node to delete.
+ """
+ self.get_root().vhost_scsi_controller_remove_target(ctrlr=self.ctrlr.ctrlr,
+ scsi_target_num=int(target_num))
+ for ctrlr in self.get_root().vhost_get_controllers(ctrlr_type="scsi"):
+ if ctrlr.ctrlr == self.ctrlr.ctrlr:
+ self.ctrlr = ctrlr
+
+ def ui_command_add_lun(self, target_num, bdev_name):
+ """
+ Add LUN to SCSI target node.
+ Currently only one LUN (which is LUN ID 0) per target is supported.
+ Adding LUN to not existing target node will create that node.
+
+ Arguments:
+ target_num - Integer identifier of target node to modify.
+ bdev - Which bdev to add as LUN.
+ """
+ self.get_root().vhost_scsi_controller_add_target(ctrlr=self.ctrlr.ctrlr,
+ scsi_target_num=int(target_num),
+ bdev_name=bdev_name)
+ for ctrlr in self.get_root().vhost_get_controllers(ctrlr_type="scsi"):
+ if ctrlr.ctrlr == self.ctrlr.ctrlr:
+ self.ctrlr = ctrlr
+
+ def summary(self):
+ info = self.ctrlr.socket
+ return info, True
+
+
+class UIVhostBlkCtrlObj(UIVhostCtrl):
+ def refresh(self):
+ self._children = set([])
+ UIVhostLunDevObj(self.ctrlr.backend_specific["block"]["bdev"], self)
+
+ def summary(self):
+ ro = None
+ if self.ctrlr.backend_specific["block"]["readonly"]:
+ ro = "Readonly"
+ info = ", ".join([_f for _f in [self.ctrlr.socket, ro] if _f])
+ return info, True
+
+
+class UIVhostTargetObj(UINode):
+ def __init__(self, target, parent):
+ self.target = target
+ # Next line: configshell does not allow paths with spaces.
+ UINode.__init__(self, target["target_name"].replace(" ", "_"), parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for target in self.target["luns"]:
+ UIVhostLunDevObj(target["bdev_name"], self)
+
+ def ui_command_show_details(self):
+ self.shell.log.info(json.dumps(self.target, indent=2))
+
+ def summary(self):
+ luns = "LUNs: %s" % len(self.target["luns"])
+ id = "TargetID: %s" % self.target["scsi_dev_num"]
+ info = ",".join([luns, id])
+ return info, True
+
+
+class UIVhostLunDevObj(UINode):
+ def __init__(self, name, parent):
+ UINode.__init__(self, name, parent)
+
+
+class UIRaidBdev(UIBdev):
+ def __init__(self, parent):
+ UIBdev.__init__(self, "raid_volume", parent)
+
+ def delete(self, name):
+ self.get_root().bdev_raid_delete(name=name)
+
+ def ui_command_create(self, name, raid_level, base_bdevs, strip_size_kb):
+ """
+ Creates a raid bdev of the provided base_bdevs
+
+ Arguments:
+ name - raid bdev name
+ raid_level - raid level, supported values 0
+ base_bdevs - base bdevs name, whitespace separated list in quotes
+ strip_size_kb - strip size of raid bdev in KB, supported values like 8, 16, 32, 64, 128, 256, etc
+ """
+ base_bdevs_array = []
+ for u in base_bdevs.strip().split(" "):
+ base_bdevs_array.append(u)
+
+ strip_size_kb = self.ui_eval_param(strip_size_kb, "number", None)
+
+ ret_name = self.get_root().bdev_raid_create(name=name,
+ raid_level=raid_level,
+ base_bdevs=base_bdevs_array,
+ strip_size_kb=strip_size_kb)
+ self.shell.log.info(ret_name)
+
+ def ui_command_delete(self, name):
+ """
+ Deletes this raid bdev object
+
+ Arguments:
+ name - raid bdev name
+ """
+ self.delete(name)
diff --git a/src/spdk/scripts/spdkcli/ui_node_iscsi.py b/src/spdk/scripts/spdkcli/ui_node_iscsi.py
new file mode 100644
index 000000000..938cb7ab4
--- /dev/null
+++ b/src/spdk/scripts/spdkcli/ui_node_iscsi.py
@@ -0,0 +1,639 @@
+from configshell_fb import ExecutionError
+from rpc.client import JSONRPCException
+from .ui_node import UINode
+
+
+class UIISCSI(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "iscsi", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ UIISCSIDevices(self)
+ UIPortalGroups(self)
+ UIInitiatorGroups(self)
+ UIISCSIConnections(self)
+ UIISCSIAuthGroups(self)
+ UIISCSIGlobalParams(self)
+
+
+class UIISCSIGlobalParams(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "global_params", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ iscsi_global_params = self.get_root().iscsi_get_options()
+ if not iscsi_global_params:
+ return
+ for param, val in iscsi_global_params.items():
+ UIISCSIGlobalParam("%s: %s" % (param, val), self)
+
+ def ui_command_set_auth(self, g=None, d=None, r=None, m=None):
+ """Set CHAP authentication for discovery service.
+
+ Optional arguments:
+ g = chap_group: Authentication group ID for discovery session
+ d = disable_chap: CHAP for discovery session should be disabled
+ r = require_chap: CHAP for discovery session should be required
+ m = mutual_chap: CHAP for discovery session should be mutual
+ """
+ chap_group = self.ui_eval_param(g, "number", None)
+ disable_chap = self.ui_eval_param(d, "bool", None)
+ require_chap = self.ui_eval_param(r, "bool", None)
+ mutual_chap = self.ui_eval_param(m, "bool", None)
+ self.get_root().iscsi_set_discovery_auth(
+ chap_group=chap_group, disable_chap=disable_chap,
+ require_chap=require_chap, mutual_chap=mutual_chap)
+
+
+class UIISCSIGlobalParam(UINode):
+ def __init__(self, param, parent):
+ UINode.__init__(self, param, parent)
+
+
+class UIISCSIDevices(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "target_nodes", parent)
+ self.scsi_devices = list()
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ self.target_nodes = list(self.get_root().iscsi_get_target_nodes())
+ self.scsi_devices = list(self.get_root().scsi_get_devices())
+ for device in self.scsi_devices:
+ for node in self.target_nodes:
+ if hasattr(device, "device_name") and node['name'] \
+ == device.device_name:
+ UIISCSIDevice(device, node, self)
+
+ def delete(self, name):
+ self.get_root().iscsi_delete_target_node(target_node_name=name)
+
+ def ui_command_create(self, name, alias_name, bdev_name_id_pairs,
+ pg_ig_mappings, queue_depth, g=None, d=None, r=None,
+ m=None, h=None, t=None):
+ """Create target node
+
+ Positional args:
+ name: Target node name (ASCII)
+ alias_name: Target node alias name (ASCII)
+ bdev_name_id_pairs: List of bdev_name_id_pairs
+ pg_ig_mappings: List of pg_ig_mappings
+ queue_depth: Desired target queue depth
+ Optional args:
+ g = chap_group: Authentication group ID for this target node
+ d = disable_chap: CHAP authentication should be disabled for this target node
+ r = require_chap: CHAP authentication should be required for this target node
+ m = mutual_chap: CHAP authentication should be mutual/bidirectional
+ h = header_digest: Header Digest should be required for this target node
+ t = data_digest: Data Digest should be required for this target node
+ """
+ luns = []
+ print("bdev_name_id_pairs: %s" % bdev_name_id_pairs)
+ print("pg_ig_mappings: %s" % pg_ig_mappings)
+ for u in bdev_name_id_pairs.strip().split(" "):
+ bdev_name, lun_id = u.split(":")
+ luns.append({"bdev_name": bdev_name, "lun_id": int(lun_id)})
+ pg_ig_maps = []
+ for u in pg_ig_mappings.strip().split(" "):
+ pg, ig = u.split(":")
+ pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
+ queue_depth = self.ui_eval_param(queue_depth, "number", None)
+ chap_group = self.ui_eval_param(g, "number", None)
+ disable_chap = self.ui_eval_param(d, "bool", None)
+ require_chap = self.ui_eval_param(r, "bool", None)
+ mutual_chap = self.ui_eval_param(m, "bool", None)
+ header_digest = self.ui_eval_param(h, "bool", None)
+ data_digest = self.ui_eval_param(t, "bool", None)
+ self.get_root().iscsi_create_target_node(
+ name=name, alias_name=alias_name, luns=luns,
+ pg_ig_maps=pg_ig_maps, queue_depth=queue_depth,
+ chap_group=chap_group, disable_chap=disable_chap,
+ require_chap=require_chap, mutual_chap=mutual_chap,
+ header_digest=header_digest, data_digest=data_digest)
+
+ def ui_command_delete(self, name=None):
+ """Delete a target node. If name is not specified delete all target nodes.
+
+ Arguments:
+ name - Target node name.
+ """
+ self.delete(name)
+
+ def ui_command_delete_all(self):
+ """Delete all target nodes"""
+ rpc_messages = ""
+ for device in self.scsi_devices:
+ try:
+ self.delete(device.device_name)
+ except JSONRPCException as e:
+ rpc_messages += e.message
+ if rpc_messages:
+ raise JSONRPCException(rpc_messages)
+
+ def ui_command_add_lun(self, name, bdev_name, lun_id=None):
+ """Add lun to the target node.
+
+ Required args:
+ name: Target node name (ASCII)
+ bdev_name: bdev name
+ Positional args:
+ lun_id: LUN ID (integer >= 0)
+ """
+ if lun_id:
+ lun_id = self.ui_eval_param(lun_id, "number", None)
+ self.get_root().iscsi_target_node_add_lun(
+ name=name, bdev_name=bdev_name, lun_id=lun_id)
+
+ def summary(self):
+ count = 0
+ for device in self.scsi_devices:
+ for node in self.target_nodes:
+ if hasattr(device, "device_name") and node['name'] \
+ == device.device_name:
+ count = count + 1
+ return "Target nodes: %d" % count, None
+
+
+class UIISCSIDevice(UINode):
+ def __init__(self, device, target, parent):
+ UINode.__init__(self, device.device_name, parent)
+ self.device = device
+ self.target = target
+ self.refresh()
+
+ def ui_command_set_auth(self, g=None, d=None, r=None, m=None):
+ """Set CHAP authentication for the target node.
+
+ Optionals args:
+ g = chap_group: Authentication group ID for this target node
+ d = disable_chap: CHAP authentication should be disabled for this target node
+ r = require_chap: CHAP authentication should be required for this target node
+ m = mutual_chap: CHAP authentication should be mutual/bidirectional
+ """
+ chap_group = self.ui_eval_param(g, "number", None)
+ disable_chap = self.ui_eval_param(d, "bool", None)
+ require_chap = self.ui_eval_param(r, "bool", None)
+ mutual_chap = self.ui_eval_param(m, "bool", None)
+ self.get_root().iscsi_target_node_set_auth(
+ name=self.device.device_name, chap_group=chap_group,
+ disable_chap=disable_chap,
+ require_chap=require_chap, mutual_chap=mutual_chap)
+
+ def ui_command_iscsi_target_node_add_pg_ig_maps(self, pg_ig_mappings):
+ """Add PG-IG maps to the target node.
+
+ Args:
+ pg_ig_maps: List of pg_ig_mappings, e.g. pg_tag:ig_tag pg_tag2:ig_tag2
+ """
+ pg_ig_maps = []
+ for u in pg_ig_mappings.strip().split(" "):
+ pg, ig = u.split(":")
+ pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
+ self.get_root().iscsi_target_node_add_pg_ig_maps(
+ pg_ig_maps=pg_ig_maps, name=self.device.device_name)
+
+ def ui_command_iscsi_target_node_remove_pg_ig_maps(self, pg_ig_mappings):
+ """Remove PG-IG maps from the target node.
+
+ Args:
+ pg_ig_maps: List of pg_ig_mappings, e.g. pg_tag:ig_tag pg_tag2:ig_tag2
+ """
+ pg_ig_maps = []
+ for u in pg_ig_mappings.strip().split(" "):
+ pg, ig = u.split(":")
+ pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
+ self.get_root().iscsi_target_node_remove_pg_ig_maps(
+ pg_ig_maps=pg_ig_maps, name=self.device.device_name)
+
+ def refresh(self):
+ self._children = set([])
+ UIISCSILuns(self.target['luns'], self)
+ UIISCSIPgIgMaps(self.target['pg_ig_maps'], self)
+ auths = {"disable_chap": self.target["disable_chap"],
+ "require_chap": self.target["require_chap"],
+ "mutual_chap": self.target["mutual_chap"],
+ "chap_group": self.target["chap_group"],
+ "data_digest": self.target["data_digest"]}
+ UIISCSIAuth(auths, self)
+
+ def summary(self):
+ return "Id: %s, QueueDepth: %s" % (self.device.id,
+ self.target['queue_depth']), None
+
+
+class UIISCSIAuth(UINode):
+ def __init__(self, auths, parent):
+ UINode.__init__(self, "auths", parent)
+ self.auths = auths
+ self.refresh()
+
+ def summary(self):
+ return "disable_chap: %s, require_chap: %s, mutual_chap: %s, chap_group: %s" % (
+ self.auths['disable_chap'], self.auths['require_chap'],
+ self.auths['mutual_chap'], self.auths['chap_group']), None
+
+
+class UIISCSILuns(UINode):
+ def __init__(self, luns, parent):
+ UINode.__init__(self, "luns", parent)
+ self.luns = luns
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for lun in self.luns:
+ UIISCSILun(lun, self)
+
+ def summary(self):
+ return "Luns: %d" % len(self.luns), None
+
+
+class UIISCSILun(UINode):
+ def __init__(self, lun, parent):
+ UINode.__init__(self, "lun %s" % lun['lun_id'], parent)
+ self.lun = lun
+ self.refresh()
+
+ def summary(self):
+ return "%s" % self.lun['bdev_name'], None
+
+
+class UIISCSIPgIgMaps(UINode):
+ def __init__(self, pg_ig_maps, parent):
+ UINode.__init__(self, "pg_ig_maps", parent)
+ self.pg_ig_maps = pg_ig_maps
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for pg_ig in self.pg_ig_maps:
+ UIISCSIPgIg(pg_ig, self)
+
+ def summary(self):
+ return "Pg_ig_maps: %d" % len(self.pg_ig_maps), None
+
+
+class UIISCSIPgIg(UINode):
+ def __init__(self, pg_ig, parent):
+ UINode.__init__(self, "portal_group%s - initiator_group%s" %
+ (pg_ig['pg_tag'], pg_ig['ig_tag']), parent)
+ self.pg_ig = pg_ig
+ self.refresh()
+
+
+class UIPortalGroups(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "portal_groups", parent)
+ self.refresh()
+
+ def delete(self, tag):
+ self.get_root().iscsi_delete_portal_group(tag=tag)
+
+ def ui_command_create(self, tag, portal_list):
+ """Add a portal group.
+
+ Args:
+ portals: List of portals e.g. ip:port ip2:port2
+ tag: Portal group tag (unique, integer > 0)
+ """
+ portals = []
+ for portal in portal_list.strip().split(" "):
+ host = portal
+ cpumask = None
+ if "@" in portal:
+ host, cpumask = portal.split("@")
+ if ":" not in host:
+ raise ExecutionError("Incorrect format of portal group. Port is missing."
+ "Use 'help create' to see the command syntax.")
+ host, port = host.rsplit(":", -1)
+ portals.append({'host': host, 'port': port})
+ if cpumask:
+ print("WARNING: Specifying a CPU mask for portal groups is no longer supported. Ignoring.")
+ tag = self.ui_eval_param(tag, "number", None)
+ self.get_root().construct_portal_group(tag=tag, portals=portals)
+
+ def ui_command_delete(self, tag):
+ """Delete a portal group with given tag (unique, integer > 0))"""
+ tag = self.ui_eval_param(tag, "number", None)
+ self.delete(tag)
+
+ def ui_command_delete_all(self):
+ """Delete all portal groups"""
+ rpc_messages = ""
+ for pg in self.pgs:
+ try:
+ self.delete(pg.tag)
+ except JSONRPCException as e:
+ rpc_messages += e.message
+ if rpc_messages:
+ raise JSONRPCException(rpc_messages)
+
+ def refresh(self):
+ self._children = set([])
+ self.pgs = list(self.get_root().iscsi_get_portal_groups())
+ for pg in self.pgs:
+ try:
+ UIPortalGroup(pg, self)
+ except JSONRPCException as e:
+ self.shell.log.error(e.message)
+
+ def summary(self):
+ return "Portal groups: %d" % len(self.pgs), None
+
+
+class UIPortalGroup(UINode):
+ def __init__(self, pg, parent):
+ UINode.__init__(self, "portal_group%s" % pg.tag, parent)
+ self.pg = pg
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for portal in self.pg.portals:
+ UIPortal(portal['host'], portal['port'], self)
+
+ def summary(self):
+ return "Portals: %d" % len(self.pg.portals), None
+
+
+class UIPortal(UINode):
+ def __init__(self, host, port, parent):
+ UINode.__init__(self, "host=%s, port=%s" % (
+ host, port), parent)
+ self.refresh()
+
+
+class UIInitiatorGroups(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "initiator_groups", parent)
+ self.refresh()
+
+ def delete(self, tag):
+ self.get_root().iscsi_delete_initiator_group(tag=tag)
+
+ def ui_command_create(self, tag, initiator_list, netmask_list):
+ """Add an initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses
+ separated with whitespaces, e.g. 127.0.0.1 192.168.200.100
+ netmasks: List of initiator netmasks separated with whitespaces,
+ e.g. 255.255.0.0 255.248.0.0
+ """
+ tag = self.ui_eval_param(tag, "number", None)
+ self.get_root().construct_initiator_group(
+ tag=tag, initiators=initiator_list.split(" "),
+ netmasks=netmask_list.split(" "))
+
+ def ui_command_delete(self, tag):
+ """Delete an initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ """
+ tag = self.ui_eval_param(tag, "number", None)
+ self.delete(tag)
+
+ def ui_command_delete_all(self):
+ """Delete all initiator groups"""
+ rpc_messages = ""
+ for ig in self.igs:
+ try:
+ self.delete(ig.tag)
+ except JSONRPCException as e:
+ rpc_messages += e.message
+ if rpc_messages:
+ raise JSONRPCException(rpc_messages)
+
+ def ui_command_add_initiator(self, tag, initiators, netmasks):
+ """Add initiators to an existing initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses,
+ e.g. 127.0.0.1 192.168.200.100
+ netmasks: List of initiator netmasks,
+ e.g. 255.255.0.0 255.248.0.0
+ """
+ tag = self.ui_eval_param(tag, "number", None)
+ self.get_root().iscsi_initiator_group_add_initiators(
+ tag=tag, initiators=initiators.split(" "),
+ netmasks=netmasks.split(" "))
+
+ def ui_command_delete_initiator(self, tag, initiators=None, netmasks=None):
+ """Delete initiators from an existing initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses, e.g. 127.0.0.1 192.168.200.100
+ netmasks: List of initiator netmasks, e.g. 255.255.0.0 255.248.0.0
+ """
+ tag = self.ui_eval_param(tag, "number", None)
+ if initiators:
+ initiators = initiators.split(" ")
+ if netmasks:
+ netmasks = netmasks.split(" ")
+ self.get_root().iscsi_initiator_group_remove_initiators(
+ tag=tag, initiators=initiators,
+ netmasks=netmasks)
+
+ def refresh(self):
+ self._children = set([])
+ self.igs = list(self.get_root().iscsi_get_initiator_groups())
+ for ig in self.igs:
+ UIInitiatorGroup(ig, self)
+
+ def summary(self):
+ return "Initiator groups: %d" % len(self.igs), None
+
+
+class UIInitiatorGroup(UINode):
+ def __init__(self, ig, parent):
+ UINode.__init__(self, "initiator_group%s" % ig.tag, parent)
+ self.ig = ig
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for initiator, netmask in zip(self.ig.initiators, self.ig.netmasks):
+ UIInitiator(initiator, netmask, self)
+
+ def summary(self):
+ return "Initiators: %d" % len(self.ig.initiators), None
+
+
+class UIInitiator(UINode):
+ def __init__(self, initiator, netmask, parent):
+ UINode.__init__(self, "hostname=%s, netmask=%s" % (initiator, netmask), parent)
+ self.refresh()
+
+
+class UIISCSIConnections(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "iscsi_connections", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ self.iscsicons = list(self.get_root().iscsi_get_connections())
+ for ic in self.iscsicons:
+ UIISCSIConnection(ic, self)
+
+ def summary(self):
+ return "Connections: %d" % len(self.iscsicons), None
+
+
+class UIISCSIConnection(UINode):
+ def __init__(self, ic, parent):
+ UINode.__init__(self, "%s" % ic['id'], parent)
+ self.ic = ic
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for key, val in self.ic.items():
+ if key == "id":
+ continue
+ UIISCSIConnectionDetails("%s: %s" % (key, val), self)
+
+
+class UIISCSIConnectionDetails(UINode):
+ def __init__(self, info, parent):
+ UINode.__init__(self, "%s" % info, parent)
+ self.refresh()
+
+
+class UIISCSIAuthGroups(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "auth_groups", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ self.iscsi_auth_groups = list(self.get_root().iscsi_get_auth_groups())
+ if self.iscsi_auth_groups is None:
+ self.iscsi_auth_groups = []
+ for ag in self.iscsi_auth_groups:
+ UIISCSIAuthGroup(ag, self)
+
+ def delete(self, tag):
+ self.get_root().iscsi_delete_auth_group(tag=tag)
+
+ def delete_secret(self, tag, user):
+ self.get_root().iscsi_auth_group_remove_secret(
+ tag=tag, user=user)
+
+ def ui_command_create(self, tag, secrets=None):
+ """Add authentication group for CHAP authentication.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0).
+ Optional args:
+ secrets: Array of secrets objects separated by comma sign,
+ e.g. user:test secret:test muser:mutual_test msecret:mutual_test
+ """
+ tag = self.ui_eval_param(tag, "number", None)
+ if secrets:
+ secrets = [dict(u.split(":") for u in a.split(" "))
+ for a in secrets.split(",")]
+ self.get_root().iscsi_create_auth_group(tag=tag, secrets=secrets)
+
+ def ui_command_delete(self, tag):
+ """Delete an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+ """
+ tag = self.ui_eval_param(tag, "number", None)
+ self.delete(tag)
+
+ def ui_command_delete_all(self):
+ """Delete all authentication groups."""
+ rpc_messages = ""
+ for iscsi_auth_group in self.iscsi_auth_groups:
+ try:
+ self.delete(iscsi_auth_group['tag'])
+ except JSONRPCException as e:
+ rpc_messages += e.message
+ if rpc_messages:
+ raise JSONRPCException(rpc_messages)
+
+ def ui_command_add_secret(self, tag, user, secret,
+ muser=None, msecret=None):
+ """Add a secret to an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+ user: User name for one-way CHAP authentication
+ secret: Secret for one-way CHAP authentication
+ Optional args:
+ muser: User name for mutual CHAP authentication
+ msecret: Secret for mutual CHAP authentication
+ """
+ tag = self.ui_eval_param(tag, "number", None)
+ self.get_root().iscsi_auth_group_add_secret(
+ tag=tag, user=user, secret=secret,
+ muser=muser, msecret=msecret)
+
+ def ui_command_delete_secret(self, tag, user):
+ """Delete a secret from an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+ user: User name for one-way CHAP authentication
+ """
+ tag = self.ui_eval_param(tag, "number", None)
+ self.delete_secret(tag, user)
+
+ def ui_command_delete_secret_all(self, tag):
+ """Delete all secrets from an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+ """
+ rpc_messages = ""
+ tag = self.ui_eval_param(tag, "number", None)
+ for ag in self.iscsi_auth_groups:
+ if ag['tag'] == tag:
+ for secret in ag['secrets']:
+ try:
+ self.delete_secret(tag, secret['user'])
+ except JSONRPCException as e:
+ rpc_messages += e.message
+ if rpc_messages:
+ raise JSONRPCException(rpc_messages)
+
+ def summary(self):
+ return "Groups: %s" % len(self.iscsi_auth_groups), None
+
+
+class UIISCSIAuthGroup(UINode):
+ def __init__(self, ag, parent):
+ UINode.__init__(self, "group" + str(ag['tag']), parent)
+ self.ag = ag
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for secret in self.ag['secrets']:
+ UISCSIAuthSecret(secret, self)
+
+ def summary(self):
+ return "Secrets: %s" % len(self.ag['secrets']), None
+
+
+class UISCSIAuthSecret(UINode):
+ def __init__(self, secret, parent):
+ info_list = ["%s=%s" % (key, val)
+ for key, val in secret.items()]
+ info_list.sort(reverse=True)
+ info = ", ".join(info_list)
+ UINode.__init__(self, info, parent)
+ self.secret = secret
+ self.refresh()
diff --git a/src/spdk/scripts/spdkcli/ui_node_nvmf.py b/src/spdk/scripts/spdkcli/ui_node_nvmf.py
new file mode 100644
index 000000000..1b25298d1
--- /dev/null
+++ b/src/spdk/scripts/spdkcli/ui_node_nvmf.py
@@ -0,0 +1,363 @@
+from rpc.client import JSONRPCException
+from .ui_node import UINode
+
+
+class UINVMf(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "nvmf", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ UINVMfSubsystems(self)
+ UINVMfTransports(self)
+
+
+class UINVMfTransports(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "transport", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for transport in self.get_root().nvmf_get_transports():
+ UINVMfTransport(transport, self)
+
+ def ui_command_create(self, trtype, max_queue_depth=None, max_io_qpairs_per_ctrlr=None,
+ in_capsule_data_size=None, max_io_size=None, io_unit_size=None, max_aq_depth=None):
+ """Create a transport with given parameters
+
+ Arguments:
+ trtype - Example: 'RDMA'.
+ max_queue_depth - Optional parameter. Integer, max value 65535.
+ max_io_qpairs_per_ctrlr - Optional parameter. 16 bit Integer, max value 65535.
+ in_capsule_data_size - Optional parameter. 32 bit Integer, max value 4294967295
+ max_io_size - Optional parameter. 32 bit integer, max value 4294967295
+ io_unit_size - Optional parameter. 32 bit integer, max value 4294967295
+ max_aq_depth - Optional parameter. 32 bit integer, max value 4294967295
+ """
+ max_queue_depth = self.ui_eval_param(max_queue_depth, "number", None)
+ max_io_qpairs_per_ctrlr = self.ui_eval_param(max_io_qpairs_per_ctrlr, "number", None)
+ in_capsule_data_size = self.ui_eval_param(in_capsule_data_size, "number", None)
+ max_io_size = self.ui_eval_param(max_io_size, "number", None)
+ io_unit_size = self.ui_eval_param(io_unit_size, "number", None)
+ max_aq_depth = self.ui_eval_param(max_aq_depth, "number", None)
+
+ self.get_root().create_nvmf_transport(trtype=trtype,
+ max_queue_depth=max_queue_depth,
+ max_io_qpairs_per_ctrlr=max_io_qpairs_per_ctrlr,
+ in_capsule_data_size=in_capsule_data_size,
+ max_io_size=max_io_size,
+ io_unit_size=io_unit_size,
+ max_aq_depth=max_aq_depth)
+
+ def summary(self):
+ return "Transports: %s" % len(self.children), None
+
+
+class UINVMfTransport(UINode):
+ def __init__(self, transport, parent):
+ UINode.__init__(self, transport.trtype, parent)
+ self.transport = transport
+
+
+class UINVMfSubsystems(UINode):
+ def __init__(self, parent):
+ UINode.__init__(self, "subsystem", parent)
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for subsystem in self.get_root().nvmf_get_subsystems():
+ UINVMfSubsystem(subsystem, self)
+
+ def delete(self, subsystem_nqn):
+ self.get_root().nvmf_delete_subsystem(nqn=subsystem_nqn)
+
+ def ui_command_create(self, nqn, serial_number=None,
+ max_namespaces=None, allow_any_host="false"):
+ """Create subsystem with given parameteres.
+
+ Arguments:
+ nqn - Target nqn(ASCII).
+ serial_number - Example: 'SPDK00000000000001'.
+ max_namespaces - Optional parameter. Maximum number of namespaces allowed to added during
+ active connection
+ allow_any_host - Optional parameter. Allow any host to connect (don't enforce host NQN
+ whitelist)
+ """
+ allow_any_host = self.ui_eval_param(allow_any_host, "bool", False)
+ max_namespaces = self.ui_eval_param(max_namespaces, "number", 0)
+ self.get_root().create_nvmf_subsystem(nqn=nqn, serial_number=serial_number,
+ allow_any_host=allow_any_host,
+ max_namespaces=max_namespaces)
+
+ def ui_command_delete(self, subsystem_nqn):
+ """Delete subsystem with given nqn.
+
+ Arguments:
+ nqn_subsystem - Name of susbsytem to delete
+ """
+ self.delete(subsystem_nqn)
+
+ def ui_command_delete_all(self):
+ """Delete all subsystems"""
+ rpc_messages = ""
+ for child in self._children:
+ try:
+ self.delete(child.subsystem.nqn)
+ except JSONRPCException as e:
+ rpc_messages += e.message
+ if rpc_messages:
+ raise JSONRPCException(rpc_messages)
+
+ def summary(self):
+ return "Subsystems: %s" % len(self.children), None
+
+
+class UINVMfSubsystem(UINode):
+ def __init__(self, subsystem, parent):
+ UINode.__init__(self, subsystem.nqn, parent)
+ self.subsystem = subsystem
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ UINVMfSubsystemListeners(self.subsystem.listen_addresses, self)
+ UINVMfSubsystemHosts(self.subsystem.hosts, self)
+ if hasattr(self.subsystem, 'namespaces'):
+ UINVMfSubsystemNamespaces(self.subsystem.namespaces, self)
+
+ def refresh_node(self):
+ for subsystem in self.get_root().nvmf_get_subsystems():
+ if subsystem.nqn == self.subsystem.nqn:
+ self.subsystem = subsystem
+ self.refresh()
+
+ def ui_command_show_details(self):
+ self.shell.log.info(json.dumps(vars(self.lvs), indent=2))
+
+ def ui_command_allow_any_host(self, disable="false"):
+ """Disable or or enable allow_any_host flag.
+
+ Arguments:
+ disable - Optional parameter. If false then enable, if true disable
+ """
+ disable = self.ui_eval_param(disable, "bool", None)
+ self.get_root().nvmf_subsystem_allow_any_host(
+ nqn=self.subsystem.nqn, disable=disable)
+
+ def summary(self):
+ sn = None
+ if hasattr(self.subsystem, 'serial_number'):
+ sn = "sn=%s" % self.subsystem.serial_number
+ st = None
+ if hasattr(self.subsystem, 'subtype'):
+ st = "st=%s" % self.subsystem.subtype
+ allow_any_host = None
+ if self.subsystem.allow_any_host:
+ allow_any_host = "Allow any host"
+ info = ", ".join(filter(None, [sn, st, allow_any_host]))
+ return info, None
+
+
+class UINVMfSubsystemListeners(UINode):
+ def __init__(self, listen_addresses, parent):
+ UINode.__init__(self, "listen_addresses", parent)
+ self.listen_addresses = listen_addresses
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for address in self.listen_addresses:
+ UINVMfSubsystemListener(address, self)
+
+ def refresh_node(self):
+ for subsystem in self.get_root().nvmf_get_subsystems():
+ if subsystem.nqn == self.parent.subsystem.nqn:
+ self.listen_addresses = subsystem.listen_addresses
+ self.refresh()
+
+ def delete(self, trtype, traddr, trsvcid, adrfam=None):
+ self.get_root().nvmf_subsystem_remove_listener(
+ nqn=self.parent.subsystem.nqn, trtype=trtype,
+ traddr=traddr, trsvcid=trsvcid, adrfam=adrfam)
+
+ def ui_command_create(self, trtype, traddr, trsvcid, adrfam):
+ """Create address listener for subsystem.
+
+ Arguments:
+ trtype - NVMe-oF transport type: e.g., rdma.
+ traddr - NVMe-oF transport address: e.g., an ip address.
+ trsvcid - NVMe-oF transport service id: e.g., a port number.
+ adrfam - NVMe-oF transport adrfam: e.g., ipv4, ipv6, ib, fc.
+ """
+ self.get_root().nvmf_subsystem_add_listener(
+ nqn=self.parent.subsystem.nqn, trtype=trtype, traddr=traddr,
+ trsvcid=trsvcid, adrfam=adrfam)
+
+ def ui_command_delete(self, trtype, traddr, trsvcid, adrfam=None):
+ """Remove address listener for subsystem.
+
+ Arguments:
+ trtype - Transport type (RDMA)
+ traddr - NVMe-oF transport address: e.g., an ip address.
+ trsvcid - NVMe-oF transport service id: e.g., a port number.
+ adrfam - Optional argument. Address family ("IPv4", "IPv6", "IB" or "FC").
+ """
+ self.delete(trtype, traddr, trsvcid, adrfam)
+
+ def ui_command_delete_all(self):
+ """Remove all address listeners from subsystem."""
+ rpc_messages = ""
+ for la in self.listen_addresses:
+ try:
+ self.delete(la['trtype'], la['traddr'], la['trsvcid'], la['adrfam'])
+ except JSONRPCException as e:
+ rpc_messages += e.message
+ if rpc_messages:
+ raise JSONRPCException(rpc_messages)
+
+ def summary(self):
+ return "Addresses: %s" % len(self.listen_addresses), None
+
+
+class UINVMfSubsystemListener(UINode):
+ def __init__(self, address, parent):
+ UINode.__init__(self, "%s:%s" % (address['traddr'], address['trsvcid']),
+ parent)
+ self.address = address
+
+ def summary(self):
+ return "%s" % self.address['trtype'], True
+
+
+class UINVMfSubsystemHosts(UINode):
+ def __init__(self, hosts, parent):
+ UINode.__init__(self, "hosts", parent)
+ self.hosts = hosts
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for host in self.hosts:
+ UINVMfSubsystemHost(host, self)
+
+ def refresh_node(self):
+ for subsystem in self.get_root().nvmf_get_subsystems():
+ if subsystem.nqn == self.parent.subsystem.nqn:
+ self.hosts = subsystem.hosts
+ self.refresh()
+
+ def delete(self, host):
+ self.get_root().nvmf_subsystem_remove_host(
+ nqn=self.parent.subsystem.nqn, host=host)
+
+ def ui_command_create(self, host):
+ """Add a host NQN to the whitelist of allowed hosts.
+
+ Args:
+ host: Host NQN to add to the list of allowed host NQNs
+ """
+ self.get_root().nvmf_subsystem_add_host(
+ nqn=self.parent.subsystem.nqn, host=host)
+
+ def ui_command_delete(self, host):
+ """Delete host from subsystem.
+
+ Arguments:
+ host - NQN of host to remove.
+ """
+ self.delete(host)
+
+ def ui_command_delete_all(self):
+ """Delete host from subsystem"""
+ rpc_messages = ""
+ for host in self.hosts:
+ try:
+ self.delete(host['nqn'])
+ except JSONRPCException as e:
+ rpc_messages += e.message
+ if rpc_messages:
+ raise JSONRPCException(rpc_messages)
+
+ def summary(self):
+ return "Hosts: %s" % len(self.hosts), None
+
+
+class UINVMfSubsystemHost(UINode):
+ def __init__(self, host, parent):
+ UINode.__init__(self, "%s" % host['nqn'], parent)
+ self.host = host
+
+
+class UINVMfSubsystemNamespaces(UINode):
+ def __init__(self, namespaces, parent):
+ UINode.__init__(self, "namespaces", parent)
+ self.namespaces = namespaces
+ self.refresh()
+
+ def refresh(self):
+ self._children = set([])
+ for namespace in self.namespaces:
+ UINVMfSubsystemNamespace(namespace, self)
+
+ def refresh_node(self):
+ for subsystem in self.get_root().nvmf_get_subsystems():
+ if subsystem.nqn == self.parent.subsystem.nqn:
+ self.namespaces = subsystem.namespaces
+ self.refresh()
+
+ def delete(self, nsid):
+ self.get_root().nvmf_subsystem_remove_ns(
+ nqn=self.parent.subsystem.nqn, nsid=nsid)
+
+ def ui_command_create(self, bdev_name, nsid=None,
+ nguid=None, eui64=None, uuid=None):
+ """Add a namespace to a subsystem.
+
+ Args:
+ bdev_name: Name of bdev to expose as a namespace.
+ Optional args:
+ nsid: Namespace ID.
+ nguid: 16-byte namespace globally unique identifier in hexadecimal.
+ eui64: 8-byte namespace EUI-64 in hexadecimal (e.g. "ABCDEF0123456789").
+ uuid: Namespace UUID.
+ """
+ nsid = self.ui_eval_param(nsid, "number", None)
+ self.get_root().nvmf_subsystem_add_ns(
+ nqn=self.parent.subsystem.nqn, bdev_name=bdev_name,
+ nsid=nsid, nguid=nguid, eui64=eui64, uuid=uuid)
+
+ def ui_command_delete(self, nsid):
+ """Delete namespace from subsystem.
+
+ Arguments:
+ nsid - Id of namespace to remove.
+ """
+ nsid = self.ui_eval_param(nsid, "number", None)
+ self.delete(nsid)
+
+ def ui_command_delete_all(self):
+ """Delete all namespaces from subsystem."""
+ rpc_messages = ""
+ for namespace in self.namespaces:
+ try:
+ self.delete(namespace['nsid'])
+ except JSONRPCException as e:
+ rpc_messages += e.message
+ if rpc_messages:
+ raise JSONRPCException(rpc_messages)
+
+ def summary(self):
+ return "Namespaces: %s" % len(self.namespaces), None
+
+
+class UINVMfSubsystemNamespace(UINode):
+ def __init__(self, namespace, parent):
+ UINode.__init__(self, namespace['bdev_name'], parent)
+ self.namespace = namespace
+
+ def summary(self):
+ info = ", ".join([self.namespace['name'], str(self.namespace['nsid'])])
+ return info, None
diff --git a/src/spdk/scripts/spdkcli/ui_root.py b/src/spdk/scripts/spdkcli/ui_root.py
new file mode 100644
index 000000000..cec8eb5f9
--- /dev/null
+++ b/src/spdk/scripts/spdkcli/ui_root.py
@@ -0,0 +1,560 @@
+from .ui_node import UINode, UIBdevs, UILvolStores, UIVhosts
+from .ui_node_nvmf import UINVMf
+from .ui_node_iscsi import UIISCSI
+import rpc.client
+import rpc
+from functools import wraps
+
+
+class UIRoot(UINode):
+ """
+ Root node for CLI menu tree structure. Refreshes running config on startup.
+ """
+ def __init__(self, client, shell):
+ UINode.__init__(self, "/", shell=shell)
+ self.current_bdevs = []
+ self.current_lvol_stores = []
+ self.current_vhost_ctrls = []
+ self.current_nvmf_transports = []
+ self.current_nvmf_subsystems = []
+ self.set_rpc_target(client)
+ self.verbose = False
+ self.is_init = self.check_init()
+ self.methods = []
+
+ def refresh(self):
+ self.methods = self.rpc_get_methods(current=True)
+ if self.is_init is False:
+ methods = "\n".join(self.methods)
+ self.shell.log.warning("SPDK Application is not yet initialized.\n"
+ "Please initialize subsystems with framework_start_init command.\n"
+ "List of available commands in current state:\n"
+ "%s" % methods)
+ else:
+ # Pass because we'd like to build main tree structure for "ls"
+ # even if state is uninitialized
+ pass
+
+ self._children = set([])
+ UIBdevs(self)
+ UILvolStores(self)
+ if self.has_subsystem("vhost"):
+ UIVhosts(self)
+ if self.has_subsystem("nvmf"):
+ UINVMf(self)
+ if self.has_subsystem("iscsi"):
+ UIISCSI(self)
+
+ def set_rpc_target(self, client):
+ self.client = client
+
+ def print_array(self, a):
+ return " ".join(a)
+
+ def verbose(f):
+ # For any configuration calls (create, delete, construct, etc.)
+ # Check if verbose option is to be used and set appropriately.
+ # Do not use for "get_*" methods so that output is not
+ # flooded.
+ def w(self, **kwargs):
+ self.client.log_set_level("INFO" if self.verbose else "ERROR")
+ r = f(self, **kwargs)
+ self.client.log_set_level("ERROR")
+ return r
+ return w
+
+ def is_method_available(f):
+ # Check if method f is available for given spdk target
+ def w(self, **kwargs):
+ if f.__name__ in self.methods:
+ r = f(self, **kwargs)
+ return r
+ # If given method is not avaialble return empty list
+ # similar to real get_* like rpc
+ return []
+ return w
+
+ def ui_command_framework_start_init(self):
+ if rpc.framework_start_init(self.client):
+ self.is_init = True
+ self.refresh()
+
+ def ui_command_load_config(self, filename):
+ with open(filename, "r") as fd:
+ rpc.load_config(self.client, fd)
+
+ def ui_command_load_subsystem_config(self, filename):
+ with open(filename, "r") as fd:
+ rpc.load_subsystem_config(self.client, fd)
+
+ def ui_command_save_config(self, filename, indent=2):
+ with open(filename, "w") as fd:
+ rpc.save_config(self.client, fd, indent)
+
+ def ui_command_save_subsystem_config(self, filename, subsystem, indent=2):
+ with open(filename, "w") as fd:
+ rpc.save_subsystem_config(self.client, fd, indent, subsystem)
+
+ def rpc_get_methods(self, current=False):
+ return rpc.rpc_get_methods(self.client, current=current)
+
+ def check_init(self):
+ return "framework_start_init" not in self.rpc_get_methods(current=True)
+
+ def bdev_get_bdevs(self, bdev_type):
+ if self.is_init:
+ self.current_bdevs = rpc.bdev.bdev_get_bdevs(self.client)
+ # Following replace needs to be done in order for some of the bdev
+ # listings to work: logical volumes, split disk.
+ # For example logical volumes: listing in menu is "Logical_Volume"
+ # (cannot have space), but the product name in SPDK is "Logical Volume"
+ bdev_type = bdev_type.replace("_", " ")
+ for bdev in [x for x in self.current_bdevs if bdev_type in x["product_name"].lower()]:
+ test = Bdev(bdev)
+ yield test
+
+ def bdev_get_iostat(self, **kwargs):
+ return rpc.bdev.bdev_get_iostat(self.client, **kwargs)
+
+ @verbose
+ def bdev_split_create(self, **kwargs):
+ response = rpc.bdev.bdev_split_create(self.client, **kwargs)
+ return self.print_array(response)
+
+ @verbose
+ def bdev_split_delete(self, **kwargs):
+ rpc.bdev.bdev_split_delete(self.client, **kwargs)
+
+ @verbose
+ def create_malloc_bdev(self, **kwargs):
+ response = rpc.bdev.bdev_malloc_create(self.client, **kwargs)
+ return response
+
+ @verbose
+ def bdev_malloc_delete(self, **kwargs):
+ rpc.bdev.bdev_malloc_delete(self.client, **kwargs)
+
+ @verbose
+ def create_iscsi_bdev(self, **kwargs):
+ response = rpc.bdev.bdev_iscsi_create(self.client, **kwargs)
+ return response
+
+ @verbose
+ def bdev_iscsi_delete(self, **kwargs):
+ rpc.bdev.bdev_iscsi_delete(self.client, **kwargs)
+
+ @verbose
+ def bdev_aio_create(self, **kwargs):
+ response = rpc.bdev.bdev_aio_create(self.client, **kwargs)
+ return response
+
+ @verbose
+ def bdev_aio_delete(self, **kwargs):
+ rpc.bdev.bdev_aio_delete(self.client, **kwargs)
+
+ @verbose
+ def create_lvol_bdev(self, **kwargs):
+ response = rpc.lvol.bdev_lvol_create(self.client, **kwargs)
+ return response
+
+ @verbose
+ def bdev_lvol_delete(self, **kwargs):
+ response = rpc.lvol.bdev_lvol_delete(self.client, **kwargs)
+ return response
+
+ @verbose
+ def create_nvme_bdev(self, **kwargs):
+ response = rpc.bdev.bdev_nvme_attach_controller(self.client, **kwargs)
+ return response
+
+ @verbose
+ def bdev_nvme_detach_controller(self, **kwargs):
+ rpc.bdev.bdev_nvme_detach_controller(self.client, **kwargs)
+
+ @verbose
+ def bdev_null_create(self, **kwargs):
+ response = rpc.bdev.bdev_null_create(self.client, **kwargs)
+ return response
+
+ @verbose
+ def bdev_null_delete(self, **kwargs):
+ rpc.bdev.bdev_null_delete(self.client, **kwargs)
+
+ @verbose
+ def create_error_bdev(self, **kwargs):
+ response = rpc.bdev.bdev_error_create(self.client, **kwargs)
+
+ @verbose
+ def bdev_error_delete(self, **kwargs):
+ rpc.bdev.bdev_error_delete(self.client, **kwargs)
+
+ @verbose
+ @is_method_available
+ def bdev_lvol_get_lvstores(self):
+ if self.is_init:
+ self.current_lvol_stores = rpc.lvol.bdev_lvol_get_lvstores(self.client)
+ for lvs in self.current_lvol_stores:
+ yield LvolStore(lvs)
+
+ @verbose
+ def bdev_lvol_create_lvstore(self, **kwargs):
+ response = rpc.lvol.bdev_lvol_create_lvstore(self.client, **kwargs)
+ return response
+
+ @verbose
+ def bdev_lvol_delete_lvstore(self, **kwargs):
+ rpc.lvol.bdev_lvol_delete_lvstore(self.client, **kwargs)
+
+ @verbose
+ def bdev_pmem_create_pool(self, **kwargs):
+ response = rpc.pmem.bdev_pmem_create_pool(self.client, **kwargs)
+ return response
+
+ @verbose
+ def bdev_pmem_delete_pool(self, **kwargs):
+ rpc.pmem.bdev_pmem_delete_pool(self.client, **kwargs)
+
+ @verbose
+ def bdev_pmem_get_pool_info(self, **kwargs):
+ response = rpc.pmem.bdev_pmem_get_pool_info(self.client, **kwargs)
+ return response
+
+ @verbose
+ def bdev_pmem_create(self, **kwargs):
+ response = rpc.bdev.bdev_pmem_create(self.client, **kwargs)
+ return response
+
+ @verbose
+ def bdev_pmem_delete(self, **kwargs):
+ response = rpc.bdev.bdev_pmem_delete(self.client, **kwargs)
+ return response
+
+ @verbose
+ def create_rbd_bdev(self, **kwargs):
+ response = rpc.bdev.bdev_rbd_create(self.client, **kwargs)
+ return response
+
+ @verbose
+ def bdev_rbd_delete(self, **kwargs):
+ response = rpc.bdev.bdev_rbd_delete(self.client, **kwargs)
+ return response
+
+ @verbose
+ def create_virtio_dev(self, **kwargs):
+ response = rpc.vhost.bdev_virtio_attach_controller(self.client, **kwargs)
+ return self.print_array(response)
+
+ @verbose
+ def bdev_virtio_detach_controller(self, **kwargs):
+ response = rpc.vhost.bdev_virtio_detach_controller(self.client, **kwargs)
+ return response
+
+ @verbose
+ def bdev_raid_create(self, **kwargs):
+ rpc.bdev.bdev_raid_create(self.client, **kwargs)
+
+ @verbose
+ def bdev_raid_delete(self, **kwargs):
+ rpc.bdev.bdev_raid_delete(self.client, **kwargs)
+
+ @verbose
+ @is_method_available
+ def bdev_virtio_scsi_get_devices(self):
+ if self.is_init:
+ for bdev in rpc.vhost.bdev_virtio_scsi_get_devices(self.client):
+ test = Bdev(bdev)
+ yield test
+
+ def list_vhost_ctrls(self):
+ if self.is_init:
+ self.current_vhost_ctrls = rpc.vhost.vhost_get_controllers(self.client)
+
+ @verbose
+ @is_method_available
+ def vhost_get_controllers(self, ctrlr_type):
+ if self.is_init:
+ self.list_vhost_ctrls()
+ for ctrlr in [x for x in self.current_vhost_ctrls if ctrlr_type in list(x["backend_specific"].keys())]:
+ yield VhostCtrlr(ctrlr)
+
+ @verbose
+ def vhost_delete_controller(self, **kwargs):
+ rpc.vhost.vhost_delete_controller(self.client, **kwargs)
+
+ @verbose
+ def vhost_create_scsi_controller(self, **kwargs):
+ rpc.vhost.vhost_create_scsi_controller(self.client, **kwargs)
+
+ @verbose
+ def vhost_create_blk_controller(self, **kwargs):
+ rpc.vhost.vhost_create_blk_controller(self.client, **kwargs)
+
+ @verbose
+ def vhost_scsi_controller_remove_target(self, **kwargs):
+ rpc.vhost.vhost_scsi_controller_remove_target(self.client, **kwargs)
+
+ @verbose
+ def vhost_scsi_controller_add_target(self, **kwargs):
+ rpc.vhost.vhost_scsi_controller_add_target(self.client, **kwargs)
+
+ def vhost_controller_set_coalescing(self, **kwargs):
+ rpc.vhost.vhost_controller_set_coalescing(self.client, **kwargs)
+
+ @verbose
+ def create_nvmf_transport(self, **kwargs):
+ rpc.nvmf.nvmf_create_transport(self.client, **kwargs)
+
+ def list_nvmf_transports(self):
+ if self.is_init:
+ self.current_nvmf_transports = rpc.nvmf.nvmf_get_transports(self.client)
+
+ @verbose
+ @is_method_available
+ def nvmf_get_transports(self):
+ if self.is_init:
+ self.list_nvmf_transports()
+ for transport in self.current_nvmf_transports:
+ yield NvmfTransport(transport)
+
+ def list_nvmf_subsystems(self):
+ if self.is_init:
+ self.current_nvmf_subsystems = rpc.nvmf.nvmf_get_subsystems(self.client)
+
+ @verbose
+ @is_method_available
+ def nvmf_get_subsystems(self):
+ if self.is_init:
+ self.list_nvmf_subsystems()
+ for subsystem in self.current_nvmf_subsystems:
+ yield NvmfSubsystem(subsystem)
+
+ @verbose
+ def create_nvmf_subsystem(self, **kwargs):
+ rpc.nvmf.nvmf_create_subsystem(self.client, **kwargs)
+
+ @verbose
+ def nvmf_delete_subsystem(self, **kwargs):
+ rpc.nvmf.nvmf_delete_subsystem(self.client, **kwargs)
+
+ @verbose
+ def nvmf_subsystem_add_listener(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_add_listener(self.client, **kwargs)
+
+ @verbose
+ def nvmf_subsystem_remove_listener(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_remove_listener(self.client, **kwargs)
+
+ @verbose
+ def nvmf_subsystem_add_host(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_add_host(self.client, **kwargs)
+
+ @verbose
+ def nvmf_subsystem_remove_host(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_remove_host(self.client, **kwargs)
+
+ @verbose
+ def nvmf_subsystem_allow_any_host(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_allow_any_host(self.client, **kwargs)
+
+ @verbose
+ def nvmf_subsystem_add_ns(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_add_ns(self.client, **kwargs)
+
+ @verbose
+ def nvmf_subsystem_remove_ns(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_remove_ns(self.client, **kwargs)
+
+ @verbose
+ def nvmf_subsystem_allow_any_host(self, **kwargs):
+ rpc.nvmf.nvmf_subsystem_allow_any_host(self.client, **kwargs)
+
+ @verbose
+ @is_method_available
+ def scsi_get_devices(self):
+ if self.is_init:
+ for device in rpc.iscsi.scsi_get_devices(self.client):
+ yield ScsiObj(device)
+
+ @verbose
+ @is_method_available
+ def iscsi_get_target_nodes(self):
+ if self.is_init:
+ for tg in rpc.iscsi.iscsi_get_target_nodes(self.client):
+ yield tg
+
+ @verbose
+ def iscsi_create_target_node(self, **kwargs):
+ rpc.iscsi.iscsi_create_target_node(self.client, **kwargs)
+
+ @verbose
+ def iscsi_delete_target_node(self, **kwargs):
+ rpc.iscsi.iscsi_delete_target_node(self.client, **kwargs)
+
+ @verbose
+ @is_method_available
+ def iscsi_get_portal_groups(self):
+ if self.is_init:
+ for pg in rpc.iscsi.iscsi_get_portal_groups(self.client):
+ yield ScsiObj(pg)
+
+ @verbose
+ @is_method_available
+ def iscsi_get_initiator_groups(self):
+ if self.is_init:
+ for ig in rpc.iscsi.iscsi_get_initiator_groups(self.client):
+ yield ScsiObj(ig)
+
+ @verbose
+ def construct_portal_group(self, **kwargs):
+ rpc.iscsi.iscsi_create_portal_group(self.client, **kwargs)
+
+ @verbose
+ def iscsi_delete_portal_group(self, **kwargs):
+ rpc.iscsi.iscsi_delete_portal_group(self.client, **kwargs)
+
+ @verbose
+ def construct_initiator_group(self, **kwargs):
+ rpc.iscsi.iscsi_create_initiator_group(self.client, **kwargs)
+
+ @verbose
+ def iscsi_delete_initiator_group(self, **kwargs):
+ rpc.iscsi.iscsi_delete_initiator_group(self.client, **kwargs)
+
+ @verbose
+ @is_method_available
+ def iscsi_get_connections(self, **kwargs):
+ if self.is_init:
+ for ic in rpc.iscsi.iscsi_get_connections(self.client, **kwargs):
+ yield ic
+
+ @verbose
+ def iscsi_initiator_group_add_initiators(self, **kwargs):
+ rpc.iscsi.iscsi_initiator_group_add_initiators(self.client, **kwargs)
+
+ @verbose
+ def iscsi_initiator_group_remove_initiators(self, **kwargs):
+ rpc.iscsi.iscsi_initiator_group_remove_initiators(self.client, **kwargs)
+
+ @verbose
+ def iscsi_target_node_add_pg_ig_maps(self, **kwargs):
+ rpc.iscsi.iscsi_target_node_add_pg_ig_maps(self.client, **kwargs)
+
+ @verbose
+ def iscsi_target_node_remove_pg_ig_maps(self, **kwargs):
+ rpc.iscsi.iscsi_target_node_remove_pg_ig_maps(self.client, **kwargs)
+
+ @verbose
+ def iscsi_auth_group_add_secret(self, **kwargs):
+ rpc.iscsi.iscsi_auth_group_add_secret(self.client, **kwargs)
+
+ @verbose
+ def iscsi_auth_group_remove_secret(self, **kwargs):
+ rpc.iscsi.iscsi_auth_group_remove_secret(self.client, **kwargs)
+
+ @verbose
+ @is_method_available
+ def iscsi_get_auth_groups(self, **kwargs):
+ return rpc.iscsi.iscsi_get_auth_groups(self.client, **kwargs)
+
+ @verbose
+ def iscsi_create_auth_group(self, **kwargs):
+ rpc.iscsi.iscsi_create_auth_group(self.client, **kwargs)
+
+ @verbose
+ def iscsi_delete_auth_group(self, **kwargs):
+ rpc.iscsi.iscsi_delete_auth_group(self.client, **kwargs)
+
+ @verbose
+ def iscsi_target_node_set_auth(self, **kwargs):
+ rpc.iscsi.iscsi_target_node_set_auth(self.client, **kwargs)
+
+ @verbose
+ def iscsi_target_node_add_lun(self, **kwargs):
+ rpc.iscsi.iscsi_target_node_add_lun(self.client, **kwargs)
+
+ @verbose
+ def iscsi_set_discovery_auth(self, **kwargs):
+ rpc.iscsi.iscsi_set_discovery_auth(self.client, **kwargs)
+
+ @verbose
+ @is_method_available
+ def iscsi_get_options(self, **kwargs):
+ return rpc.iscsi.iscsi_get_options(self.client, **kwargs)
+
+ def has_subsystem(self, subsystem):
+ for system in rpc.subsystem.framework_get_subsystems(self.client):
+ if subsystem.lower() == system["subsystem"].lower():
+ return True
+ return False
+
+
+class Bdev(object):
+ def __init__(self, bdev_info):
+ """
+ All class attributes are set based on what information is received
+ from bdev_get_bdevs RPC call.
+ # TODO: Document in docstring parameters which describe bdevs.
+ # TODO: Possible improvement: JSON schema might be used here in future
+ """
+ for i in list(bdev_info.keys()):
+ setattr(self, i, bdev_info[i])
+
+
+class LvolStore(object):
+ def __init__(self, lvs_info):
+ """
+ All class attributes are set based on what information is received
+ from bdev_get_bdevs RPC call.
+ # TODO: Document in docstring parameters which describe bdevs.
+ # TODO: Possible improvement: JSON schema might be used here in future
+ """
+ for i in list(lvs_info.keys()):
+ setattr(self, i, lvs_info[i])
+
+
+class VhostCtrlr(object):
+ def __init__(self, ctrlr_info):
+ """
+ All class attributes are set based on what information is received
+ from vhost_get_controllers RPC call.
+ # TODO: Document in docstring parameters which describe bdevs.
+ # TODO: Possible improvement: JSON schema might be used here in future
+ """
+ for i in list(ctrlr_info.keys()):
+ setattr(self, i, ctrlr_info[i])
+
+
+class NvmfTransport(object):
+ def __init__(self, transport_info):
+ """
+ All class attributes are set based on what information is received
+ from get_nvmf_transport RPC call.
+ # TODO: Document in docstring parameters which describe bdevs.
+ # TODO: Possible improvement: JSON schema might be used here in future
+ """
+ for i in transport_info.keys():
+ setattr(self, i, transport_info[i])
+
+
+class NvmfSubsystem(object):
+ def __init__(self, subsystem_info):
+ """
+ All class attributes are set based on what information is received
+ from get_nvmf_subsystem RPC call.
+ # TODO: Document in docstring parameters which describe bdevs.
+ # TODO: Possible improvement: JSON schema might be used here in future
+ """
+ for i in subsystem_info.keys():
+ setattr(self, i, subsystem_info[i])
+
+
+class ScsiObj(object):
+ def __init__(self, device_info):
+ """
+ All class attributes are set based on what information is received
+ from iscsi related RPC calls.
+ # TODO: Document in docstring parameters which describe bdevs.
+ # TODO: Possible improvement: JSON schema might be used here in future
+ """
+ for i in device_info.keys():
+ setattr(self, i, device_info[i])
diff --git a/src/spdk/scripts/vagrant/README.md b/src/spdk/scripts/vagrant/README.md
new file mode 100644
index 000000000..323a26d2d
--- /dev/null
+++ b/src/spdk/scripts/vagrant/README.md
@@ -0,0 +1,237 @@
+# SPDK Vagrant and VirtualBox
+
+The following guide explains how to use the scripts in the `spdk/scripts/vagrant`. Mac, Windows, and Linux platforms are supported.
+
+1. Install and configure [Git](https://git-scm.com/) on your platform.
+2. Install [VirtualBox 5.1](https://www.virtualbox.org/wiki/Downloads) or newer
+3. Install* [VirtualBox Extension Pack](https://www.virtualbox.org/wiki/Downloads)
+4. Install and configure [Vagrant 1.9.4](https://www.vagrantup.com) or newer
+
+* Note: The extension pack has different licensing than main VirtualBox, please
+ review them carefully as the evaluation license is for personal use only.
+
+## Mac OSX Setup (High Sierra)
+
+Quick start instructions for OSX:
+
+1. Install Homebrew
+2. Install Virtual Box Cask
+3. Install Virtual Box Extension Pack*
+4. Install Vagrant Cask
+
+* Note: The extension pack has different licensing than main VirtualBox, please
+ review them carefully as the evaluation license is for personal use only.
+
+```
+ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
+ brew doctor
+ brew update
+ brew cask install virtualbox
+ brew cask install virtualbox-extension-pack
+ brew cask install vagrant
+```
+
+## Windows 10 Setup
+
+1. Windows platforms should install some form of git.
+2. Install [VirtualBox 5.1](https://www.virtualbox.org/wiki/Downloads) or newer
+3. Install* [VirtualBox Extension Pack](https://www.virtualbox.org/wiki/Downloads)
+4. Install and configure [Vagrant 1.9.4](https://www.vagrantup.com) or newer
+
+* Note: The extension pack has different licensing than main VirtualBox, please
+ review them carefully as the evaluation license is for personal use only.
+
+- Note: VirtualBox requires virtualization to be enabled in the BIOS.
+- Note: You should disable Hyper-V in Windows RS 3 laptop. Search `windows features` un-check Hyper-V, restart laptop
+
+## Linux Setup
+
+Following the generic instructions should be sufficient for most Linux distributions. For more thorough instructions on installing VirtualBox on your distribution of choice, please see the following [guide](https://www.virtualbox.org/wiki/Linux_Downloads).
+
+ Examples on Fedora26/Fedora27/Fedora28
+
+1. yum check-update
+2. yum update -y
+3. yum install qt*
+4. yum install libsdl*
+5. rpm -ivh VirtualBox-5.2-5.2.16_123759_fedora26-1.x86_64.rpm (select the right version in https://www.virtualbox.org/wiki/Linux_Downloads)
+6. VBoxManage extpack install Oracle_VM_VirtualBox_Extension_Pack-5.2.16.vbox-extpack(install the same pack* as your installed version of VirtualBox)
+7. rpm -ivh vagrant_2.1.2_x86_64.rpm
+
+* Note: The extension pack has different licensing than main VirtualBox, please
+ review them carefully as the evaluation license is for personal use only.
+
+## Configure Vagrant
+
+If you are behind a corporate firewall, configure the following proxy settings.
+
+1. Set the http_proxy and https_proxy
+2. Install the proxyconf plugin
+
+```
+ $ export http_proxy=....
+ $ export https_proxy=....
+ $ vagrant plugin install vagrant-proxyconf
+```
+
+## Download SPDK from GitHub
+
+Use git to clone a new spdk repository. GerritHub can also be used. See the instructions at [spdk.io](http://www.spdk.io/development/#gerrithub) to setup your GerritHub account. Note that this spdk repository will be rsync'd into your VM, so you can use this repository to continue development within the VM.
+
+## Create a Virtual Box
+
+Use the `spdk/scripts/vagrant/create_vbox.sh` script to create a VM of your choice. Supported VM platforms are:
+
+- centos7
+- ubuntu16
+- ubuntu18
+- fedora26
+- fedora27
+- fedora28
+- freebsd11
+
+```
+$ spdk/scripts/vagrant/create_vbox.sh -h
+ Usage: create_vbox.sh [-n <num-cpus>] [-s <ram-size>] [-x <http-proxy>] [-hvrld] <distro>
+
+ distro = <centos7 | ubuntu16 | ubuntu18 | fedora26 | fedora27 | fedora28 | freebsd11>
+
+ -s <ram-size> in kb default: 4096
+ -n <num-cpus> 1 to 4 default: 4
+ -x <http-proxy> default: ""
+ -p <provider> libvirt or virtualbox
+ --vhost-host-dir=<path> directory path with vhost test dependencies
+ (test VM qcow image, fio binary, ssh keys)
+ --vhost-vm-dir=<path> directory where to put vhost dependencies in VM
+ -r dry-run
+ -l use a local copy of spdk, don't try to rsync from the host.
+ -d deploy a test vm by provisioning all prerequisites for spdk autotest
+ -h help
+ -v verbose
+
+ Examples:
+
+ ./scripts/vagrant/create_vbox.sh -x http://user:password@host:port fedora27
+ ./scripts/vagrant/create_vbox.sh -s 2048 -n 2 ubuntu16
+ ./scripts/vagrant/create_vbox.sh -rv freebsd
+ ./scripts/vagrant/create_vbox.sh fedora26
+```
+
+It is recommended that you call the `create_vbox.sh` script from outside of the spdk repository. Call this script from a parent directory. This will allow the creation of multiple VMs in separate <distro> directories, all using the same spdk repository. For example:
+
+```
+ $ spdk/scripts/vagrant/create_vbox.sh -s 2048 -n 2 fedora26
+```
+
+This script will:
+
+1. create a subdirectory named <distro> in your $PWD
+2. copy the needed files from `spdk/scripts/vagrant/` into the <distro> directory
+3. create a working virtual box in the <distro> directory
+4. rsync the `~/.gitconfig` file to `/home/vagrant/` in the newly provisioned virtual box
+5. rsync a copy of the source `spdk` repository to `/home/vagrant/spdk_repo/spdk` (optional)
+6. rsync a copy of the `~/vagrant_tools` directory to `/home/vagrant/tools` (optional)
+7. execute vm_setup.sh on the guest to install all spdk dependencies (optional)
+
+This arrangement allows the provisioning of multiple, different VMs within that same directory hierarchy using the same spdk repository. Following the creation of the vm you'll need to ssh into your virtual box and finish the VM initialization.
+
+```
+ $ cd <distro>
+ $ vagrant ssh
+```
+
+## Finish VM Initialization
+
+A copy of the `spdk` repository you cloned will exist in the `spdk_repo` directory of the `/home/vagrant` user account. After using `vagrant ssh` to enter your VM you must complete the initialization of your VM by running the `scripts/vagrant/update.sh` script. For example:
+
+```
+ $ script -c 'sudo spdk_repo/spdk/scripts/vagrant/update.sh' update.log
+```
+
+The `update.sh` script completes initialization of the VM by automating the following steps.
+
+1. Runs yum/apt-get update (Linux)
+2. Runs the scripts/pdkdep.sh script
+3. Installs the FreeBSD source in /usr/sys (FreeBSD only)
+
+This only needs to be done once. This is also not necessary for Fedora VMs provisioned with the -d flag. The `vm_setup` script performs these operations instead.
+
+## Post VM Initialization
+
+Following VM initialization you must:
+
+1. Verify you have an emulated NVMe device
+2. Compile your spdk source tree
+3. Run the hello_world example to validate the environment is set up correctly
+
+### Verify you have an emulated NVMe device
+
+```
+ $ lspci | grep "Non-Volatile"
+ 00:0e.0 Non-Volatile memory controller: InnoTek Systemberatung GmbH Device 4e56
+```
+
+### Compile SPDK
+
+```
+ $ cd spdk_repo/spdk
+ $ git submodule update --init
+ $ ./configure --enable-debug
+ $ make
+```
+
+### Run the hello_world example script
+
+```
+ $ sudo scripts/setup.sh
+ $ sudo ./build/examples/hello_bdev
+```
+
+### Running autorun.sh with vagrant
+
+After running vm_setup.sh the `run-autorun.sh` can be used to run `spdk/autorun.sh` on a Fedora vagrant machine. Note that the `spdk/scripts/vagrant/autorun-spdk.conf` should be copied to `~/autorun-spdk.conf` before starting your tests.
+
+```
+ $ cp spdk/scripts/vagrant/autorun-spdk.conf ~/
+ $ spdk/scripts/vagrant/run-autorun.sh -h
+ Usage: scripts/vagrant/run-autorun.sh -d <path_to_spdk_tree> [-h] | [-q] | [-n]
+ -d : Specify a path to an SPDK source tree
+ -q : No output to screen
+ -n : Noop - dry-run
+ -h : This help
+
+ Examples:
+ run-spdk-autotest.sh -d . -q
+ run-spdk-autotest.sh -d /home/vagrant/spdk_repo/spdk
+```
+
+## FreeBSD Appendix
+
+---
+**NOTE:** As of this writing the FreeBSD Virtualbox instance does not correctly support the vagrant-proxyconf feature.
+---
+
+The following steps are done by the `update.sh` script. It is recommended that you capture the output of `update.sh` with a typescript. E.g.:
+
+```
+ $ script update.log sudo spdk_repo/spdk/scripts/vagrant/update.sh
+```
+
+1. Updates the pkg catalog
+1. Installs the needed FreeBSD packages on the system by calling pkgdep.sh
+2. Installs the FreeBSD source in /usr/src
+
+```
+ $ sudo pkg upgrade -f
+ $ sudo spdk_repo/spdk/scripts/pkgdep.sh --all
+ $ sudo git clone --depth 10 -b releases/11.1.0 https://github.com/freebsd/freebsd.git /usr/src
+```
+
+To build spdk on FreeBSD use `gmake MAKE=gmake`. E.g.:
+
+```
+ $ cd spdk_repo/spdk
+ $ git submodule update --init
+ $ ./configure --enable-debug
+ $ gmake MAKE=gmake
+```
diff --git a/src/spdk/scripts/vagrant/Vagrantfile b/src/spdk/scripts/vagrant/Vagrantfile
new file mode 100644
index 000000000..ccf6d632e
--- /dev/null
+++ b/src/spdk/scripts/vagrant/Vagrantfile
@@ -0,0 +1,291 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+require 'open3'
+def checkboxtype(distro)
+ localboxes, stderr, status = Open3.capture3("vagrant box list")
+ if localboxes.include? "spdk/"+distro
+ return "spdk/"+distro
+ else
+ case distro
+ when "centos7"
+ return "centos/7"
+ when "centos8"
+ return "centos/8"
+ when "ubuntu1604"
+ return "peru/ubuntu-16.04-server-amd64"
+ when "ubuntu1804"
+ return "peru/ubuntu-18.04-server-amd64"
+ when "fedora30"
+ return "generic/fedora30"
+ when "fedora31"
+ return "generic/fedora31"
+ when "fedora32"
+ return "generic/fedora32"
+ when "arch"
+ return "generic/arch"
+ when "freebsd11"
+ return "generic/freebsd11"
+ when "freebsd12"
+ return "generic/freebsd12"
+ when "clearlinux"
+ return "AntonioMeireles/ClearLinux"
+ else
+ "Invalid argument #{distro}"
+ abort("Invalid argument!")
+ end
+ end
+end
+
+Vagrant.configure(2) do |config|
+
+ # Pick the right distro and bootstrap, default is fedora30
+ distro = ( ENV['SPDK_VAGRANT_DISTRO'] || "fedora30")
+ provider = (ENV['SPDK_VAGRANT_PROVIDER'] || "virtualbox")
+
+ # Get all variables for creating vm
+ vmcpu=(ENV['SPDK_VAGRANT_VMCPU'] || 2)
+ vmram=(ENV['SPDK_VAGRANT_VMRAM'] || 4096)
+ spdk_dir=(ENV['SPDK_DIR'] || "none")
+ vmemulator=(ENV['SPDK_QEMU_EMULATOR'] || "")
+ emulated_nvme_types=(ENV['NVME_DISKS_TYPE'] || "nvme").split(',')
+ nvme_namespaces=(ENV['NVME_DISKS_NAMESPACES'] || "").split(',')
+ nvme_file=(ENV['NVME_FILE'] || "").split(',')
+ nvme_cmbs=(ENV['NVME_CMB'] || "").split(',')
+ vagrantfile_dir=(ENV['VAGRANTFILE_DIR'] || "none")
+
+ # generic/freebsd boxes do not work properly with vagrant-proxyconf and
+ # have issues installing rsync and sshfs for syncing files. NFS is
+ # pre-installed, so use it.
+ # generic/fedora boxes on the other hand have problems running NFS
+ # service so use sshfs+rsync combo instead.
+ plugins_sync_backend = {type: :sshfs}
+ # Remove --copy-links from default rsync cmdline since we do want to sync
+ # actual symlinks as well. Also, since copy is made between host and its
+ # local VM we don't need to worry about saturating the local link so skip
+ # the compression to speed up the whole transfer.
+ files_sync_backend = {type: "rsync", rsync__auto: false, rsync__args: ["--archive", "--verbose", "--delete"]}
+ if (distro.include? "freebsd") || (distro.include? "clearlinux")
+ plugins_sync_backend = {type: :nfs, nfs_udp: false}
+ files_sync_backend = {type: :nfs, nfs_udp: false, mount_options: ['ro']}
+ end
+ config.vm.box = checkboxtype(distro)
+ config.vm.box_check_update = false
+ config.vm.synced_folder '.', '/vagrant', disabled: true
+
+ # Copy in the .gitconfig if it exists
+ if File.file?(File.expand_path("~/.gitconfig"))
+ config.vm.provision "file", source: "~/.gitconfig", destination: ".gitconfig"
+ end
+
+ # Copy the tsocks configuration file for use when installing some spdk test pool dependencies
+ if File.file?("/etc/tsocks.conf")
+ $tsocks_copy = <<-SCRIPT
+ sudo -s
+ mv -f tsocks.conf /etc/tsocks.conf
+ chown root /etc/tsocks.conf
+ chmod 644 /etc/tsocks.conf
+ SCRIPT
+ config.vm.provision "file", source: "/etc/tsocks.conf", destination: "tsocks.conf"
+ config.vm.provision "shell", inline: $tsocks_copy
+ end
+
+ # vagrant-cachier caches apt/yum etc to speed subsequent
+ # vagrant up
+ # to enable, run
+ # vagrant plugin install vagrant-cachier
+ #
+ if Vagrant.has_plugin?("vagrant-cachier")
+ config.cache.scope = :box
+ config.cache.synced_folder_opts = plugins_sync_backend
+ end
+
+ # use http proxy if avaiable
+ if ENV['http_proxy']
+ if Vagrant.has_plugin?("vagrant-proxyconf")
+ config.proxy.http = ENV['http_proxy']
+ config.proxy.https = ENV['https_proxy']
+ config.proxy.no_proxy = "localhost,127.0.0.1"
+ end
+
+ # Proxyconf does not seem to support FreeBSD boxes or at least it's
+ # docs do not mention that. Set up proxy configuration manually.
+ if distro.include?("freebsd")
+ $freebsd_proxy = <<-SCRIPT
+ sudo -s
+ echo "export http_proxy=#{ENV['http_proxy']}" >> /etc/profile
+ echo "export https_proxy=#{ENV['http_proxy']}" >> /etc/profile
+ echo "pkg_env: {http_proxy: #{ENV['http_proxy']}}" > /usr/local/etc/pkg.conf
+ chown root:wheel /usr/local/etc/pkg.conf
+ chmod 644 /usr/local/etc/pkg.conf
+ SCRIPT
+ config.vm.provision "shell", inline: $freebsd_proxy
+ end
+ end
+
+ # freebsd and clearlinux boxes in order to have spdk sources synced from
+ # host properly will use NFS with "ro" option enabled to prevent changes
+ # on host filesystem.
+ # To make sources usable in the guest VM we need to unmount them and use
+ # local copy.
+ if distro.include? "freebsd"
+ $freebsd_spdk_repo = <<-SCRIPT
+ sudo -s
+ cp -R /home/vagrant/spdk_repo/spdk /tmp/spdk
+ umount /home/vagrant/spdk_repo/spdk && rm -rf /home/vagrant/spdk_repo/spdk
+ mv /tmp/spdk /home/vagrant/spdk_repo/spdk
+ chown -R vagrant:vagrant /home/vagrant/spdk_repo/spdk
+ SCRIPT
+ config.vm.provision "shell", inline: $freebsd_spdk_repo
+ elsif distro.include? "clearlinux"
+ $clearlinux_spdk_repo = <<-SCRIPT
+ sudo -s
+ cp -R /home/vagrant/spdk_repo/spdk /tmp/spdk
+ umount /home/vagrant/spdk_repo/spdk && rm -rf /home/vagrant/spdk_repo/spdk
+ mv /tmp/spdk /home/vagrant/spdk_repo/spdk
+ chown -R clear:clear /home/vagrant/spdk_repo/spdk
+ SCRIPT
+ config.vm.provision "shell", inline: $clearlinux_spdk_repo
+ end
+
+ config.ssh.forward_agent = true
+ config.ssh.forward_x11 = true
+ if ENV['VAGRANT_PASSWORD_AUTH'] == "1"
+ config.ssh.username = "vagrant"
+ config.ssh.password = "vagrant"
+ end
+
+ config.vm.provider "virtualbox" do |vb|
+ vb.customize ["modifyvm", :id, "--ioapic", "on"]
+ vb.memory = "#{vmram}"
+ vb.cpus = "#{vmcpu}"
+
+ nvme_disk=(ENV['NVME_FILE'] || "nvme_disk.img")
+ unless File.exist? (nvme_disk)
+ vb.customize ["createhd", "--filename", nvme_disk, "--variant", "Fixed", "--size", "1024"]
+ vb.customize ["storagectl", :id, "--name", "nvme", "--add", "pcie", "--controller", "NVMe", "--portcount", "1", "--bootable", "off"]
+ vb.customize ["storageattach", :id, "--storagectl", "nvme", "--type", "hdd", "--medium", nvme_disk, "--port", "0"]
+ end
+
+ #support for the SSE4.x instruction is required in some versions of VB.
+ vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.1", "1"]
+ vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.2", "1"]
+ end
+
+ # This setup was Tested on Fedora 27
+ # libvirt configuration need modern Qemu(tested on 2.10) & vagrant-libvirt in version 0.0.39+
+ # There are few limitation for SElinux - The file added outside libvirt must have proper SE ACL policy or setenforce 0
+ config.vm.provider "libvirt" do |libvirt, override|
+ libvirt.random_hostname = "1"
+ libvirt.disk_bus = "virtio"
+
+ # generic/freebsd boxes need to be explicitly run with SCSI bus,
+ # otherwise boot process fails on mounting the disk
+ if (distro.include?("freebsd"))
+ libvirt.disk_bus = "scsi"
+ end
+
+ # Run generic/arch boxes explicitly with IDE bus,
+ # otherwise boot process fails on mounting the disk
+ if (distro.include?("arch"))
+ libvirt.disk_bus = "ide"
+ end
+
+ if not vmemulator.empty?
+ libvirt.emulator_path = "#{vmemulator}"
+ libvirt.machine_type = "pc"
+ end
+
+ # we put nvme_disk inside default pool to eliminate libvirt/SELinux Permissions Problems
+ # and to be able to run vagrant from user $HOME directory
+
+ # Loop to create all emulated disks set
+ emulated_nvme_types.each_with_index { |disk, index|
+ if ENV['NVME_FILE']
+ nvme_disk_id="#{disk}" + "-#{index}"
+ nvme_disk="#{nvme_file["#{index}".to_i]}"
+ else
+ nvme_disk="/var/lib/libvirt/images/nvme_disk.img"
+ end
+
+ unless File.exist? (nvme_disk)
+ puts "If run with libvirt provider please execute create_nvme_img.sh"
+ end
+
+ if disk == "nvme"
+ libvirt.qemuargs :value => "-drive"
+ libvirt.qemuargs :value => "format=raw,file=#{nvme_disk},if=none,id=#{nvme_disk_id}"
+ libvirt.qemuargs :value => "-device"
+ nvme_drive = "nvme,drive=#{nvme_disk_id},serial=1234#{index}"
+ if !nvme_namespaces["#{index}".to_i].nil? && nvme_namespaces["#{index}".to_i] != 1
+ nvme_drive << ",namespaces=#{nvme_namespaces["#{index}".to_i]}"
+ end
+ if !nvme_cmbs["#{index}".to_i].nil? && nvme_cmbs["#{index}".to_i] == "true"
+ # Fix the size of the buffer to 128M
+ nvme_drive << ",cmb_size_mb=128"
+ end
+ libvirt.qemuargs :value => nvme_drive
+ elsif disk == "ocssd"
+ libvirt.qemuargs :value => "-drive"
+ libvirt.qemuargs :value => "format=raw,file=#{nvme_disk},if=none,id=#{nvme_disk_id}"
+ libvirt.qemuargs :value => "-device"
+ # create ocssd drive with special parameters
+ # lba_index=4 it is LBA namespace format, 4 means that block size is 4K and have 64B metadata
+ # lnum_lun, lnum_pln, lpgs_per_blk, lsecs_per_pg, lblks_per_pln this are parameters describing the device geometry
+ # we need to multiply these parameters by ourselves to have backend file minimal size:
+ # in our case: 4K * 8 * 2 * 1536 * 2 * 45 = 8640 MB
+ libvirt.qemuargs :value => "nvme,drive=#{nvme_disk_id},serial=deadbeef,oacs=0,namespaces=1,lver=2,lba_index=4,mdts=10,lnum_lun=8,lnum_pln=2,lpgs_per_blk=1536,lsecs_per_pg=2,lblks_per_pln=45,metadata=#{nvme_disk}_ocssd_md,nsdatafile=#{nvme_disk}_ocssd_blknvme.ns,laer_thread_sleep=3000,stride=4"
+ end
+ }
+
+ libvirt.driver = "kvm"
+ libvirt.graphics_type = "vnc"
+ libvirt.memory = "#{vmram}"
+ libvirt.cpus = "#{vmcpu}"
+ libvirt.video_type = "cirrus"
+
+ if ENV['VAGRANT_HUGE_MEM'] == "1"
+ libvirt.memorybacking :hugepages
+ end
+
+ # Optional field if we want use other storage pools than default
+ # libvirt.storage_pool_name = "vm"
+ end
+
+ # rsync the spdk directory if provision hasn't happened yet
+ # Warning: rsync does not work with freebsd boxes, so this step is disabled
+ if ENV['COPY_SPDK_DIR'] == "1" && spdk_dir != "none"
+ config.vm.synced_folder "#{spdk_dir}", "/home/vagrant/spdk_repo/spdk", files_sync_backend
+ end
+
+ # rsync artifacts from build
+ if ENV['COPY_SPDK_ARTIFACTS'] == "1"
+ config.vm.synced_folder "#{vagrantfile_dir}/output", "/home/vagrant/spdk_repo/output", plugins_sync_backend
+ end
+
+ # provision the vm with all of the necessary spdk dependencies for running the autorun.sh tests
+ if ENV['DEPLOY_TEST_VM'] == "1" && spdk_dir != "none"
+ config.vm.provision "shell" do |setup|
+ setup.path = "#{spdk_dir}/test/common/config/vm_setup.sh"
+ setup.privileged = false
+ setup.args = ["-u", "-i"]
+ end
+ end
+
+ # Clear CFLAGS in clear linux
+ if distro == "clearlinux"
+ $clearcflags = <<-SCRIPT
+ echo "export CFLAGS=" >> /etc/profile.d/clearcflags.sh
+ echo "export CFFLAGS=" >> /etc/profile.d/clearcflags.sh
+ echo "export CXXFLAGS=" >> /etc/profile.d/clearcflags.sh
+ echo "export FFLAGS=" >> /etc/profile.d/clearcflags.sh
+ echo "export THEANO_FLAGS=" >> /etc/profile.d/clearcflags.sh
+ SCRIPT
+ config.vm.provision "shell", inline: $clearcflags, run: "always"
+ end
+
+ # Copy in the user's tools if they exists
+ if File.directory?(File.expand_path("~/vagrant_tools"))
+ config.vm.synced_folder "~/vagrant_tools", "/home/vagrant/tools", files_sync_backend
+ end
+end
diff --git a/src/spdk/scripts/vagrant/Vagrantfile_openstack_vm b/src/spdk/scripts/vagrant/Vagrantfile_openstack_vm
new file mode 100644
index 000000000..c66405a2c
--- /dev/null
+++ b/src/spdk/scripts/vagrant/Vagrantfile_openstack_vm
@@ -0,0 +1,82 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant.configure(2) do |config|
+
+ # See: https://app.vagrantup.com/bento/boxes/ubuntu-18.04
+ config.vm.box = "bento/ubuntu-18.04"
+ config.vm.box_check_update = false
+
+ config.vm.provider :virtualbox do |vb|
+ vb.customize ["modifyvm", :id, "--hwvirtex", "off"]
+ end
+ if Vagrant.has_plugin?("vagrant-cachier")
+ config.cache.scope = :box
+ end
+
+ # use http proxy if avaiable
+ if ENV['http_proxy'] && Vagrant.has_plugin?("vagrant-proxyconf")
+ config.proxy.http = ENV['http_proxy']
+ config.proxy.https = ENV['http_proxy']
+ config.proxy.no_proxy = "localhost,127.0.0.1,10.0.2.15"
+ end
+
+ vmcpu=(ENV['SPDK_VAGRANT_VMCPU'] || 10)
+ vmram=(ENV['SPDK_VAGRANT_VMRAM'] || 8192)
+ spdk_dir=(ENV['SPDK_DIR'] || "none")
+
+ config.ssh.forward_agent = true
+ config.ssh.forward_x11 = true
+
+ # Change root passwd and allow root SSH
+ config.vm.provision "shell", inline: 'echo -e "root\nroot" | sudo passwd root'
+ config.vm.provision "shell", inline: 'sudo sh -c "echo \"PermitRootLogin yes\" >> /etc/ssh/sshd_config"'
+ config.vm.provision "shell", inline: 'useradd -m -p sys_sgci -s /bin/bash sys_sgci'
+ config.vm.provision "shell", inline: 'usermod -aG sudo sys_sgci'
+
+ # Install needed deps
+ $apt_script = <<-SCRIPT
+ sudo apt -y update
+ sudo DEBIAN_FRONTEND=noninteractive apt -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade
+ SCRIPT
+ config.vm.provision "shell", inline: $apt_script
+
+ # TODO: Next 2 lines break any future ssh communication via "vagrant ssh"
+ # I'd be good to check NIC names in ifconfig and then sed them in /etc/network/interfaces to eht0, eht1, and so on
+ config.vm.provision "shell", inline: 'sudo sh -c "echo \"auto eth0\" >> /etc/network/interfaces"'
+ config.vm.provision "shell", inline: 'sudo sh -c "echo \"iface eth0 inet dhcp\" >> /etc/network/interfaces"'
+
+ # This is to avoid annoying "Start job is running for wait for network to be configured" 2 minute timeout
+ # in case of not-so-perfect NIC and virtual network configuration for the VM
+ config.vm.provision "shell", inline: 'systemctl disable systemd-networkd-wait-online.service'
+ config.vm.provision "shell", inline: 'systemctl mask systemd-networkd-wait-online.service'
+
+ config.vm.provider "virtualbox" do |vb|
+ vb.customize ["modifyvm", :id, "--ioapic", "on"]
+ vb.memory = "#{vmram}"
+ vb.cpus = "#{vmcpu}"
+
+ #support for the SSE4.x instruction is required in some versions of VB.
+ vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.1", "1"]
+ vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.2", "1"]
+ end
+
+ if spdk_dir != "none"
+ config.vm.synced_folder "#{spdk_dir}", "/home/vagrant/spdk", type: "rsync",
+ rsync__exclude: ["ubuntu18"]
+ end
+
+ # Install needed drivers and tools
+ config.vm.provision "shell", inline: 'sudo apt-get install -y libibverbs* librdmacm* libnuma-dev libaio-dev libcunit1-dev ibverbs-utils rdma-core'
+
+ # Copy in the user's tools if they exists
+ if File.directory?(File.expand_path("~/vagrant_tools"))
+ config.vm.synced_folder "~/vagrant_tools", "/home/vagrant/tools", type: "rsync", rsync__auto: false
+ end
+
+ config.vm.provision "shell", inline: 'sudo useradd -s /bin/bash -d /opt/stack -m stack | echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack'
+ config.vm.provision "shell", inline: 'sudo su - stack bash -c "git clone -b stable/stein https://git.openstack.org/openstack-dev/devstack /opt/stack/devstack"'
+ config.vm.provision "file", source: "#{spdk_dir}/scripts/vagrant/local.conf", destination: "/home/vagrant/local.conf"
+ config.vm.provision "shell", inline: 'sudo su - stack bash -c "cp /home/vagrant/local.conf /opt/stack/devstack"'
+ config.vm.provision "shell", inline: 'sudo su - stack bash -c "cd /opt/stack/devstack; ./stack.sh"'
+end
diff --git a/src/spdk/scripts/vagrant/Vagrantfile_vhost_vm b/src/spdk/scripts/vagrant/Vagrantfile_vhost_vm
new file mode 100644
index 000000000..4daeb105f
--- /dev/null
+++ b/src/spdk/scripts/vagrant/Vagrantfile_vhost_vm
@@ -0,0 +1,138 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant.configure(2) do |config|
+
+ # Pick the right distro and bootstrap, default is ubuntu1604
+ distro = ( ENV['SPDK_VAGRANT_DISTRO'] || "ubuntu16")
+ case distro
+ when "ubuntu16"
+ # See: https://app.vagrantup.com/puppetlabs/boxes/ubuntu-16.04-64-nocm
+ config.vm.box = "puppetlabs/ubuntu-16.04-64-nocm"
+ config.vm.box_version = "1.0.0"
+ when "ubuntu18"
+ # See: https://app.vagrantup.com/bento/boxes/ubuntu-18.04
+ config.vm.box = "bento/ubuntu-18.04"
+ config.vm.box_version = "201808.24.0"
+ when "fedora31"
+ # See: https://app.vagrantup.com/generic/boxes/fedora31
+ config.vm.box = "generic/fedora31"
+ config.vm.box_version = "2.0.6"
+ else
+ "Invalid argument #{distro}"
+ abort("Invalid argument!")
+ end
+ config.vm.box_check_update = false
+
+ # vagrant-cachier caches apt/yum etc to speed subsequent
+ # vagrant up
+ # to enable, run
+ # vagrant plugin install vagrant-cachier
+ #
+ if Vagrant.has_plugin?("vagrant-cachier")
+ config.cache.scope = :box
+ end
+
+ # use http proxy if avaiable
+ if ENV['http_proxy'] && Vagrant.has_plugin?("vagrant-proxyconf")
+ config.proxy.http = ENV['http_proxy']
+ config.proxy.https = ENV['https_proxy']
+ config.proxy.no_proxy = "localhost,127.0.0.1"
+ end
+
+ vmcpu=(ENV['SPDK_VAGRANT_VMCPU'] || 2)
+ vmram=(ENV['SPDK_VAGRANT_VMRAM'] || 4096)
+ ssh_key_dir=(ENV['SPDK_VAGRANT_SSH_KEY'])
+ spdk_dir=(ENV['SPDK_DIR'] || "none")
+ install_deps=(ENV['INSTALL_DEPS'] || "false")
+
+ config.ssh.forward_agent = true
+ config.ssh.forward_x11 = true
+
+ # Change root passwd and allow root SSH
+ config.vm.provision "shell", inline: 'echo -e "root\nroot" | sudo passwd root'
+ config.vm.provision "shell", inline: 'sudo sh -c "echo \"PermitRootLogin yes\" >> /etc/ssh/sshd_config"'
+
+ # Use previously generated SSH keys for setting up a key pair
+ $ssh_key_gen_script = <<-SCRIPT
+ sudo mkdir -p /root/.ssh
+ cat /vagrant/ssh_keys/spdk_vhost_id_rsa.pub > /root/.ssh/authorized_keys
+ sudo chmod 644 /root/.ssh/authorized_keys
+ SCRIPT
+ config.vm.provision "shell", inline: $ssh_key_gen_script
+
+ # Install needed deps
+ $apt_script = <<-SCRIPT
+ sudo apt -y update
+ sudo DEBIAN_FRONTEND=noninteractive apt -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade
+ sudo apt -y install -y fio sg3-utils bc
+ SCRIPT
+
+ $dnf_script = <<-SCRIPT
+ sudo dnf -y update
+ sudo dnf -y install fio sg3_utils bc
+ SCRIPT
+
+ $install_script = case distro
+ when "ubuntu16" then $apt_script
+ when "ubuntu18" then $apt_script
+ when "fedora31" then $dnf_script
+ else abort("#{distro} distribution is not supported yet")
+ end
+
+ config.vm.provision "shell", inline: $install_script
+
+ # Modify GRUB options
+ # console=ttyS0 earlyprintk=ttyS0 - reroute output to serial dev, so that QEMU can write output to file
+ # scsi_mod.use_blk_mq=1 - for multiqueue use
+ # net.ifnames=0 biosdevname=0 - do not rename NICs on boot. That way we ensure that addded NIC is always eth0.
+ # Reason for these options is that NIC can have different udev name during provisioning with Vagrant
+ # and then some other name while running SPDK tests which use Qemu without any hypervisor like vbox or libvirt
+ # so no corresponding configuration for this NIC name will be present in /etc.
+ config.vm.provision "shell", inline: 'sudo sed -ir s#GRUB_CMDLINE_LINUX=#GRUB_CMDLINE_LINUX=\"console=ttyS0\ earlyprintk=ttyS0\ scsi_mod.use_blk_mq=1\ net.ifnames=0\ biosdevname=0\"#g /etc/default/grub'
+ config.vm.provision "shell", inline: 'sudo sed -ir s#\"\"#\ #g /etc/default/grub'
+
+ update_grub_command = case distro
+ when "ubuntu16" then 'sudo update-grub'
+ when "ubuntu18" then 'sudo update-grub'
+ when "fedora31" then 'sudo grub2-mkconfig -o /boot/grub2/grub.cfg ; sudo grub2-mkconfig -o /boot/efi/EFI/fedora/grub.cfg'
+ else abort("#{distro} distribution is not supported yet")
+ end
+ config.vm.provision "shell", inline: update_grub_command
+
+ if distro.include? "ubuntu"
+ # TODO: Next 2 lines break any future ssh communication via "vagrant ssh"
+ # I'd be good to check NIC names in ifconfig and then sed them in /etc/network/interfaces to eht0, eht1, and so on
+ config.vm.provision "shell", inline: 'sudo sh -c "echo \"auto eth0\" >> /etc/network/interfaces"'
+ config.vm.provision "shell", inline: 'sudo sh -c "echo \"iface eth0 inet dhcp\" >> /etc/network/interfaces"'
+ end
+
+ if distro.include? "ubuntu18"
+ # This is to avoid annoying "Start job is running for wait for network to be configured" 2 minute timeout
+ # in case of not-so-perfect NIC and virtual network configuration for the VM
+ config.vm.provision "shell", inline: 'systemctl disable systemd-networkd-wait-online.service'
+ config.vm.provision "shell", inline: 'systemctl mask systemd-networkd-wait-online.service'
+ end
+
+ config.vm.provider "virtualbox" do |vb|
+ vb.customize ["modifyvm", :id, "--ioapic", "on"]
+ vb.memory = "#{vmram}"
+ vb.cpus = "#{vmcpu}"
+
+ #support for the SSE4.x instruction is required in some versions of VB.
+ vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.1", "1"]
+ vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.2", "1"]
+ end
+
+ if spdk_dir != "none"
+ config.vm.synced_folder "#{spdk_dir}", "/home/vagrant/spdk_repo/spdk", type: "rsync", rsync__auto: false
+ if install_deps.include? "true"
+ config.vm.provision "shell", inline: 'sudo /home/vagrant/spdk_repo/spdk/scripts/pkgdep.sh --all'
+ end
+ end
+
+ # Copy in the user's tools if they exists
+ if File.directory?(File.expand_path("~/vagrant_tools"))
+ config.vm.synced_folder "~/vagrant_tools", "/home/vagrant/tools", type: "rsync", rsync__auto: false
+ end
+end
diff --git a/src/spdk/scripts/vagrant/autorun-spdk.conf b/src/spdk/scripts/vagrant/autorun-spdk.conf
new file mode 100644
index 000000000..067522c6f
--- /dev/null
+++ b/src/spdk/scripts/vagrant/autorun-spdk.conf
@@ -0,0 +1,31 @@
+# assign a value of 1 to all of the pertinent tests
+SPDK_RUN_VALGRIND=1
+SPDK_RUN_FUNCTIONAL_TEST=1
+SPDK_TEST_UNITTEST=1
+SPDK_TEST_AUTOBUILD=1
+SPDK_TEST_ISAL=1
+SPDK_TEST_ISCSI=0
+SPDK_TEST_ISCSI_INITIATOR=0
+SPDK_TEST_NVME=0
+SPDK_TEST_NVME_CLI=0
+SPDK_TEST_NVMF=1
+SPDK_TEST_RBD=0
+SPDK_TEST_CRYPTO=0
+SPDK_TEST_OCF=0
+# requires some extra configuration. see TEST_ENV_SETUP_README
+SPDK_TEST_VHOST=0
+SPDK_TEST_VHOST_INIT=0
+SPDK_TEST_BLOCKDEV=1
+SPDK_TEST_URING=0
+# doesn't work on vm
+SPDK_TEST_IOAT=0
+SPDK_TEST_BLOBFS=0
+SPDK_TEST_PMDK=0
+SPDK_TEST_LVOL=0
+SPDK_TEST_REDUCE=0
+SPDK_RUN_ASAN=1
+SPDK_RUN_UBSAN=1
+# Reduce the size of the hugepages
+HUGEMEM=1024
+# Set up the DEPENDENCY_DIR
+DEPENDENCY_DIR=/home/vagrant
diff --git a/src/spdk/scripts/vagrant/create_nvme_img.sh b/src/spdk/scripts/vagrant/create_nvme_img.sh
new file mode 100755
index 000000000..db758bd87
--- /dev/null
+++ b/src/spdk/scripts/vagrant/create_nvme_img.sh
@@ -0,0 +1,77 @@
+#!/usr/bin/env bash
+SYSTEM=$(uname -s)
+size="1024M"
+nvme_disk="/var/lib/libvirt/images/nvme_disk.img"
+type="nvme"
+
+function usage() {
+ echo "Usage: ${0##*/} [-s <disk_size>] [-n <backing file name>]"
+ echo "-s <disk_size> with postfix e.g. 2G default: 1024M"
+ echo " for OCSSD default: 9G"
+ echo "-n <backing file name> backing file path with name"
+ echo " default: /var/lib/libvirt/images/nvme_disk.img"
+ echo "-t <type> default: nvme available: ocssd"
+}
+
+while getopts "s:n:t:h-:" opt; do
+ case "${opt}" in
+ -)
+ echo " Invalid argument: $OPTARG"
+ usage
+ exit 1
+ ;;
+ s)
+ size=$OPTARG
+ ;;
+ n)
+ nvme_disk=$OPTARG
+ ;;
+ t)
+ type=$OPTARG
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ echo " Invalid argument: $OPTARG"
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+if [ ! "${SYSTEM}" = "FreeBSD" ]; then
+ WHICH_OS=$(lsb_release -i | awk '{print $3}')
+ case $type in
+ "nvme")
+ qemu-img create -f raw $nvme_disk $size
+ ;;
+ "ocssd")
+ if [ $size == "1024M" ]; then
+ size="9G"
+ fi
+ fallocate -l $size $nvme_disk
+ touch ${nvme_disk}_ocssd_md
+ ;;
+ *)
+ echo "We support only nvme and ocssd disks types"
+ exit 1
+ ;;
+ esac
+ #Change SE Policy on Fedora
+ if [ $WHICH_OS == "Fedora" ]; then
+ sudo chcon -t svirt_image_t $nvme_disk
+ fi
+
+ chmod 777 $nvme_disk
+ if [ $WHICH_OS == "Fedora" ]; then
+ chown qemu:qemu $nvme_disk
+ elif [ $WHICH_OS == "Ubuntu" ]; then
+ chown libvirt-qemu:kvm $nvme_disk
+ else
+ # That's just a wild guess for now
+ # TODO: needs improvement for other distros
+ chown libvirt-qemu:kvm $nvme_disk
+ fi
+fi
diff --git a/src/spdk/scripts/vagrant/create_openstack_vm.sh b/src/spdk/scripts/vagrant/create_openstack_vm.sh
new file mode 100755
index 000000000..a747b2808
--- /dev/null
+++ b/src/spdk/scripts/vagrant/create_openstack_vm.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+
+set -e
+
+testdir=$(readlink -f $(dirname $0))
+SPDK_DIR=$(readlink -f $testdir/../..)
+VAGRANT_TARGET="$PWD"
+VAGRANT_DISTRO="ubuntu18"
+
+export SPDK_DIR
+export SPDK_VAGRANT_VMRAM=8192
+export SPDK_VAGRANT_VMCPU=10
+
+mkdir -vp "${VAGRANT_TARGET}/${VAGRANT_DISTRO}"
+cp "${testdir}/Vagrantfile_openstack_vm" "${VAGRANT_TARGET}/${VAGRANT_DISTRO}/Vagrantfile"
+
+pushd "${VAGRANT_TARGET}/${VAGRANT_DISTRO}"
+if [ -n "${http_proxy}" ]; then
+ export http_proxy
+fi
+
+VBoxManage setproperty machinefolder "${VAGRANT_TARGET}/${VAGRANT_DISTRO}"
+vagrant up
+vagrant halt
+VBoxManage setproperty machinefolder default
+
+# Convert Vbox .vmdk image to qcow2
+vmdk_img=$(find ${VAGRANT_TARGET}/${VAGRANT_DISTRO} -name "ubuntu-18.04-amd64-disk001.vmdk")
+qemu-img convert -f vmdk -O qcow2 ${vmdk_img} ${VAGRANT_TARGET}/${VAGRANT_DISTRO}/openstack_vm_image.qcow2
+
+echo ""
+echo " SUCCESS!"
+echo ""
diff --git a/src/spdk/scripts/vagrant/create_vbox.sh b/src/spdk/scripts/vagrant/create_vbox.sh
new file mode 100755
index 000000000..71fd46d12
--- /dev/null
+++ b/src/spdk/scripts/vagrant/create_vbox.sh
@@ -0,0 +1,334 @@
+#!/usr/bin/env bash
+
+# create_vbox.sh
+#
+# Creates a virtual box with vagrant in the $PWD.
+#
+# This script creates a subdirectory called $PWD/<distro> and copies the Vagrantfile
+# into that directory before running 'vagrant up'
+
+set -e
+
+VAGRANT_TARGET="$PWD"
+
+DIR="$(cd "$(dirname $0)" && pwd)"
+SPDK_DIR="$(cd "${DIR}/../../" && pwd)"
+
+# The command line help
+display_help() {
+ echo
+ echo " Usage: ${0##*/} [-b nvme-backing-file] [-n <num-cpus>] [-s <ram-size>] [-x <http-proxy>] [-hvrldcu] <distro>"
+ echo
+ echo " distro = <centos7 | centos8| ubuntu1604 | ubuntu1804 | fedora30 |"
+ echo " fedora31 | fedora32 | freebsd11 | freebsd12 | arch | clearlinux>"
+ echo
+ echo " -s <ram-size> in kb Default: ${SPDK_VAGRANT_VMRAM}"
+ echo " -n <num-cpus> 1 to 4 Default: ${SPDK_VAGRANT_VMCPU}"
+ echo " -x <http-proxy> Default: \"${SPDK_VAGRANT_HTTP_PROXY}\""
+ echo " -p <provider> \"libvirt\" or \"virtualbox\". Default: ${SPDK_VAGRANT_PROVIDER}"
+ echo " -b <nvme-backing-file> Emulated NVMe options."
+ echo " If no -b option is specified then this option defaults to emulating single"
+ echo " NVMe with 1 namespace and assumes path: /var/lib/libvirt/images/nvme_disk.img"
+ echo " -b option can be used multiple times for attaching multiple files to the VM"
+ echo " Parameters for -b option: <path>,<type>,<namespaces>,<cmb>"
+ echo " Available types: nvme, ocssd."
+ echo " -c Create all above disk, default 0"
+ echo " -H Use hugepages for allocating VM memory. Only for libvirt provider. Default: false."
+ echo " -u Use password authentication to the VM instead of SSH keys."
+ echo " -l Use a local copy of spdk, don't try to rsync from the host."
+ echo " -a Copy spdk/autorun.sh artifacts from VM to host system."
+ echo " -d Deploy a test vm by provisioning all prerequisites for spdk autotest"
+ echo " --qemu-emulator=<path> Path to custom QEMU binary. Only works with libvirt provider"
+ echo " --vagrantfiles-dir=<path> Destination directory to put Vagrantfile into."
+ echo " --package-box Install all dependencies for SPDK and create a local vagrant box version."
+ echo " -r dry-run"
+ echo " -h help"
+ echo " -v verbose"
+ echo
+ echo " Examples:"
+ echo
+ echo " $0 -x http://user:password@host:port fedora30"
+ echo " $0 -s 2048 -n 2 ubuntu16"
+ echo " $0 -rv freebsd"
+ echo " $0 fedora30"
+ echo " $0 -b /var/lib/libvirt/images/nvme1.img,nvme,1 fedora30"
+ echo " $0 -b /var/lib/libvirt/images/ocssd.img,ocssd fedora30"
+ echo " $0 -b /var/lib/libvirt/images/nvme5.img,nvme,5 -b /var/lib/libvirt/images/ocssd.img,ocssd fedora30"
+ echo
+}
+
+# Set up vagrant proxy. Assumes git-bash on Windows
+# https://stackoverflow.com/questions/19872591/how-to-use-vagrant-in-a-proxy-environment
+SPDK_VAGRANT_HTTP_PROXY=""
+
+VERBOSE=0
+HELP=0
+COPY_SPDK_DIR=1
+COPY_SPDK_ARTIFACTS=0
+DRY_RUN=0
+DEPLOY_TEST_VM=0
+SPDK_VAGRANT_DISTRO="distro"
+SPDK_VAGRANT_VMCPU=4
+SPDK_VAGRANT_VMRAM=4096
+SPDK_VAGRANT_PROVIDER="virtualbox"
+SPDK_QEMU_EMULATOR=""
+OPTIND=1
+NVME_DISKS_TYPE=""
+NVME_DISKS_NAMESPACES=""
+NVME_FILE=""
+NVME_AUTO_CREATE=0
+VAGRANTFILE_DIR=""
+VAGRANT_PASSWORD_AUTH=0
+VAGRANT_PACKAGE_BOX=0
+VAGRANT_HUGE_MEM=0
+
+while getopts ":b:n:s:x:p:u:vcraldHh-:" opt; do
+ case "${opt}" in
+ -)
+ case "${OPTARG}" in
+ package-box) VAGRANT_PACKAGE_BOX=1 ;;
+ qemu-emulator=*) SPDK_QEMU_EMULATOR="${OPTARG#*=}" ;;
+ vagrantfiles-dir=*) VAGRANTFILE_DIR="${OPTARG#*=}" ;;
+ *) echo "Invalid argument '$OPTARG'" ;;
+ esac
+ ;;
+ x)
+ http_proxy=$OPTARG
+ https_proxy=$http_proxy
+ SPDK_VAGRANT_HTTP_PROXY="${http_proxy}"
+ ;;
+ n)
+ SPDK_VAGRANT_VMCPU=$OPTARG
+ ;;
+ s)
+ SPDK_VAGRANT_VMRAM=$OPTARG
+ ;;
+ p)
+ SPDK_VAGRANT_PROVIDER=$OPTARG
+ ;;
+ v)
+ VERBOSE=1
+ ;;
+ c)
+ NVME_AUTO_CREATE=1
+ ;;
+ r)
+ DRY_RUN=1
+ ;;
+ h)
+ display_help >&2
+ exit 0
+ ;;
+ a)
+ COPY_SPDK_ARTIFACTS=1
+ ;;
+ l)
+ COPY_SPDK_DIR=0
+ ;;
+ d)
+ DEPLOY_TEST_VM=1
+ ;;
+ b)
+ NVME_FILE+="${OPTARG#*=} "
+ ;;
+ u)
+ VAGRANT_PASSWORD_AUTH=1
+ ;;
+ H)
+ VAGRANT_HUGE_MEM=1
+ ;;
+ *)
+ echo " Invalid argument: -$OPTARG" >&2
+ echo " Try: \"$0 -h\"" >&2
+ exit 1
+ ;;
+ esac
+done
+
+shift "$((OPTIND - 1))" # Discard the options and sentinel --
+
+SPDK_VAGRANT_DISTRO="$*"
+
+case "${SPDK_VAGRANT_DISTRO}" in
+ centos7)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ centos8)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ ubuntu1604)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ ubuntu1804)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ fedora30)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ fedora31)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ fedora32)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ freebsd11)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ freebsd12)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ arch)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ clearlinux)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ *)
+ echo " Invalid argument \"${SPDK_VAGRANT_DISTRO}\""
+ echo " Try: \"$0 -h\"" >&2
+ exit 1
+ ;;
+esac
+
+if ! echo "$SPDK_VAGRANT_DISTRO" | grep -q fedora && [ $DEPLOY_TEST_VM -eq 1 ]; then
+ echo "Warning: Test machine deployment is only available on fedora distros. Disabling it for this build"
+ DEPLOY_TEST_VM=0
+fi
+if [ -z "$NVME_FILE" ]; then
+ TMP="/var/lib/libvirt/images/nvme_disk.img"
+ NVME_DISKS_TYPE="nvme"
+else
+ TMP=""
+ for args in $NVME_FILE; do
+ while IFS=, read -r path type namespace cmb; do
+ TMP+="$path,"
+ if [ -z "$type" ]; then
+ type="nvme"
+ fi
+ if [[ -n $cmb ]]; then
+ NVME_CMB=${NVME_CMB:+$NVME_CMB,}true
+ fi
+ NVME_DISKS_TYPE+="$type,"
+ if [ -z "$namespace" ] && [ -n "$SPDK_QEMU_EMULATOR" ]; then
+ namespace="1"
+ fi
+ NVME_DISKS_NAMESPACES+="$namespace,"
+ if [ ${NVME_AUTO_CREATE} = 1 ]; then
+ $SPDK_DIR/scripts/vagrant/create_nvme_img.sh -t $type -n $path
+ fi
+ done <<< $args
+ done
+fi
+NVME_FILE=$TMP
+
+if [ ${VERBOSE} = 1 ]; then
+ echo
+ echo DIR=${DIR}
+ echo SPDK_DIR=${SPDK_DIR}
+ echo VAGRANT_TARGET=${VAGRANT_TARGET}
+ echo HELP=$HELP
+ echo DRY_RUN=$DRY_RUN
+ echo NVME_FILE=$NVME_FILE
+ echo NVME_DISKS_TYPE=$NVME_DISKS_TYPE
+ echo NVME_AUTO_CREATE=$NVME_AUTO_CREATE
+ echo NVME_DISKS_NAMESPACES=$NVME_DISKS_NAMESPACES
+ echo NVME_CMB=$NVME_CMB
+ echo SPDK_VAGRANT_DISTRO=$SPDK_VAGRANT_DISTRO
+ echo SPDK_VAGRANT_VMCPU=$SPDK_VAGRANT_VMCPU
+ echo SPDK_VAGRANT_VMRAM=$SPDK_VAGRANT_VMRAM
+ echo SPDK_VAGRANT_PROVIDER=$SPDK_VAGRANT_PROVIDER
+ echo SPDK_VAGRANT_HTTP_PROXY=$SPDK_VAGRANT_HTTP_PROXY
+ echo SPDK_QEMU_EMULATOR=$SPDK_QEMU_EMULATOR
+ echo VAGRANT_PACKAGE_BOX=$VAGRANT_PACKAGE_BOX
+ echo
+fi
+
+export SPDK_VAGRANT_HTTP_PROXY
+export SPDK_VAGRANT_VMCPU
+export SPDK_VAGRANT_VMRAM
+export SPDK_DIR
+export COPY_SPDK_DIR
+export COPY_SPDK_ARTIFACTS
+export DEPLOY_TEST_VM
+export NVME_CMB
+export NVME_DISKS_TYPE
+export NVME_DISKS_NAMESPACES
+export NVME_FILE
+export VAGRANT_PASSWORD_AUTH
+export VAGRANT_HUGE_MEM
+
+if [ -n "$SPDK_VAGRANT_PROVIDER" ]; then
+ provider="--provider=${SPDK_VAGRANT_PROVIDER}"
+fi
+
+if [ -n "$SPDK_VAGRANT_PROVIDER" ]; then
+ export SPDK_VAGRANT_PROVIDER
+fi
+
+if [ -n "$SPDK_QEMU_EMULATOR" ] && [ "$SPDK_VAGRANT_PROVIDER" == "libvirt" ]; then
+ export SPDK_QEMU_EMULATOR
+fi
+
+if [ ${DRY_RUN} = 1 ]; then
+ echo "Environemnt Variables"
+ printenv SPDK_VAGRANT_DISTRO
+ printenv SPDK_VAGRANT_VMRAM
+ printenv SPDK_VAGRANT_VMCPU
+ printenv SPDK_VAGRANT_PROVIDER
+ printenv SPDK_VAGRANT_HTTP_PROXY
+ printenv SPDK_QEMU_EMULATOR
+ printenv NVME_DISKS_TYPE
+ printenv NVME_AUTO_CREATE
+ printenv NVME_DISKS_NAMESPACES
+ printenv NVME_FILE
+ printenv SPDK_DIR
+ printenv VAGRANT_HUGE_MEM
+fi
+if [ -z "$VAGRANTFILE_DIR" ]; then
+ VAGRANTFILE_DIR="${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}-${SPDK_VAGRANT_PROVIDER}"
+ export VAGRANTFILE_DIR
+fi
+
+if [ -d "${VAGRANTFILE_DIR}" ]; then
+ echo "Error: ${VAGRANTFILE_DIR} already exists!"
+ exit 1
+fi
+
+if [ ${DRY_RUN} != 1 ]; then
+ mkdir -vp "${VAGRANTFILE_DIR}"
+ cp ${DIR}/Vagrantfile ${VAGRANTFILE_DIR}
+ pushd "${VAGRANTFILE_DIR}"
+ if [ -n "${http_proxy}" ]; then
+ export http_proxy
+ export https_proxy
+ if vagrant plugin list | grep -q vagrant-proxyconf; then
+ echo "vagrant-proxyconf already installed... skipping"
+ else
+ vagrant plugin install vagrant-proxyconf
+ fi
+ if echo "$SPDK_VAGRANT_DISTRO" | grep -q freebsd; then
+ cat > ~/vagrant_pkg.conf << EOF
+pkg_env: {
+http_proxy: ${http_proxy}
+}
+EOF
+ fi
+ fi
+ mkdir -p "${VAGRANTFILE_DIR}/output"
+ vagrant up $provider
+ if [ ${VAGRANT_PACKAGE_BOX} == 1 ]; then
+ vagrant ssh -c 'sudo spdk_repo/spdk/scripts/vagrant/update.sh'
+ vagrant halt
+ vagrant package --output spdk_${SPDK_VAGRANT_DISTRO}.box
+ vagrant box add spdk/${SPDK_VAGRANT_DISTRO} spdk_${SPDK_VAGRANT_DISTRO}.box \
+ && rm spdk_${SPDK_VAGRANT_DISTRO}.box
+ vagrant destroy
+ fi
+ echo ""
+ echo " SUCCESS!"
+ echo ""
+ echo " cd to ${VAGRANTFILE_DIR} and type \"vagrant ssh\" to use."
+ echo " Use vagrant \"suspend\" and vagrant \"resume\" to stop and start."
+ echo " Use vagrant \"destroy\" followed by \"rm -rf ${VAGRANTFILE_DIR}\" to destroy all trace of vm."
+ echo ""
+fi
diff --git a/src/spdk/scripts/vagrant/create_vhost_vm.sh b/src/spdk/scripts/vagrant/create_vhost_vm.sh
new file mode 100755
index 000000000..5ad416c3b
--- /dev/null
+++ b/src/spdk/scripts/vagrant/create_vhost_vm.sh
@@ -0,0 +1,132 @@
+#!/usr/bin/env bash
+
+# create_vhost_vm.sh
+#
+# Creates a virtual machine image used as a dependency for running vhost tests
+
+set -e
+
+VAGRANT_TARGET="$PWD"
+
+DIR="$(cd "$(dirname $0)" && pwd)"
+SPDK_DIR="$(cd "${DIR}/../../" && pwd)"
+USE_SSH_DIR=""
+MOVE_TO_DEFAULT_DIR=false
+INSTALL_DEPS=false
+
+# The command line help
+display_help() {
+ echo
+ echo " Usage: ${0##*/} <distro>"
+ echo
+ echo " distro = <ubuntu16 | ubuntu18 | fedora31> "
+ echo
+ echo " --use-ssh-dir=<dir path> Use existing spdk_vhost_id_rsa keys from specified directory"
+ echo " for setting up SSH key pair on VM"
+ echo " --install-deps Install SPDK build dependencies on VM. Needed by some of the"
+ echo " vhost and vhost initiator tests. Default: false."
+ echo " --move-to-default-dir Move generated files to default directories used by vhost test scripts."
+ echo " Default: false."
+ echo " --http-proxy Default: \"${SPDK_VAGRANT_HTTP_PROXY}\""
+ echo " -h help"
+ echo
+ echo " Examples:"
+ echo
+}
+
+while getopts ":h-:" opt; do
+ case "${opt}" in
+ -)
+ case "${OPTARG}" in
+ use-ssh-dir=*) USE_SSH_DIR="${OPTARG#*=}" ;;
+ move-to-default-dir) MOVE_TO_DEFAULT_DIR=true ;;
+ install-deps) INSTALL_DEPS=true ;;
+ http-proxy=*)
+ http_proxy=$OPTARG
+ https_proxy=$http_proxy
+ SPDK_VAGRANT_HTTP_PROXY="${http_proxy}"
+ ;;
+ *)
+ echo " Invalid argument -$OPTARG" >&2
+ echo " Try \"$0 -h\"" >&2
+ exit 1
+ ;;
+ esac
+ ;;
+ h)
+ display_help >&2
+ exit 0
+ ;;
+ *)
+ echo " Invalid argument: -$OPTARG" >&2
+ echo " Try: \"$0 -h\"" >&2
+ exit 1
+ ;;
+ esac
+done
+export SPDK_DIR
+export SPDK_VAGRANT_HTTP_PROXY
+export INSTALL_DEPS
+
+shift "$((OPTIND - 1))" # Discard the options and sentinel --
+
+SPDK_VAGRANT_DISTRO="$*"
+
+case "${SPDK_VAGRANT_DISTRO}" in
+ ubuntu16)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ ubuntu18)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ fedora31)
+ export SPDK_VAGRANT_DISTRO
+ ;;
+ *)
+ echo " Invalid argument \"${SPDK_VAGRANT_DISTRO}\""
+ echo " Try: \"$0 -h\"" >&2
+ exit 1
+ ;;
+esac
+
+mkdir -vp "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}"
+cp ${DIR}/Vagrantfile_vhost_vm ${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}/Vagrantfile
+
+# Copy or generate SSH keys to the VM
+mkdir -vp "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}/ssh_keys"
+
+if [[ -n $USE_SSH_DIR ]]; then
+ cp ${USE_SSH_DIR}/spdk_vhost_id_rsa* "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}/ssh_keys"
+else
+ ssh-keygen -f "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}/ssh_keys/spdk_vhost_id_rsa" -N "" -q
+fi
+export SPDK_VAGRANT_SSH_KEY="${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}/ssh_keys/spdk_vhost_id_rsa"
+
+pushd "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}"
+if [ -n "${http_proxy}" ]; then
+ export http_proxy
+ export https_proxy
+ if vagrant plugin list | grep -q vagrant-proxyconf; then
+ echo "vagrant-proxyconf already installed... skipping"
+ else
+ vagrant plugin install vagrant-proxyconf
+ fi
+fi
+VBoxManage setproperty machinefolder "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}"
+vagrant up
+vagrant halt
+VBoxManage setproperty machinefolder default
+
+# Convert Vbox .vmkd image to qcow2
+vmdk_img=$(find ${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO} -name "*.vmdk")
+qemu-img convert -f vmdk -O qcow2 ${vmdk_img} ${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}/vhost_vm_image.qcow2
+
+if $MOVE_TO_DEFAULT_DIR; then
+ sudo mkdir -p /home/sys_sgsw
+ sudo mv -f ${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}/vhost_vm_image.qcow2 /home/sys_sgsw/vhost_vm_image.qcow2
+ sudo mv -f ${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}/ssh_keys/spdk_vhost_id_rsa* ~/.ssh/
+fi
+
+echo ""
+echo " SUCCESS!"
+echo ""
diff --git a/src/spdk/scripts/vagrant/local.conf b/src/spdk/scripts/vagrant/local.conf
new file mode 100644
index 000000000..f392d4bbb
--- /dev/null
+++ b/src/spdk/scripts/vagrant/local.conf
@@ -0,0 +1,51 @@
+[[local|localrc]]
+ADMIN_PASSWORD=secret
+DATABASE_PASSWORD=secret
+RABBIT_PASSWORD=secret
+SERVICE_PASSWORD=secret
+HOST_IP=10.0.2.15
+USE_PYTHON3=True
+
+# These options define expected driver capabilities
+TEMPEST_VOLUME_DRIVER=spdk
+TEMPEST_VOLUME_VENDOR="Intel"
+TEMPEST_STORAGE_PROTOCOL=nvmet_rdma
+
+# Disable security groups entirely
+Q_USE_SECGROUP=False
+LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
+CINDER_SECURE_DELETE=False
+public_interface=eth0
+
+[[test-config|$TEMPEST_CONFIG]]
+[compute]
+build_timeout = 1500
+fixed_network_name = net0
+
+[volume]
+build_timeout = 1500
+
+[validation]
+ssh_timeout = 396
+
+[[post-config|$CINDER_CONF]]
+[DEFAULT]
+default_volume_type = SPDK
+enabled_backends = spdk
+target_helper = spdk-nvmeof
+debug = True
+
+[spdk]
+spdk_rpc_ip = localhost
+spdk_rpc_port = 3333
+spdk_rpc_username = secret
+spdk_rpc_password = secret
+target_ip_address = 10.0.2.15
+target_port = 4260
+target_protocol = nvmet_rdma
+target_helper = spdk-nvmeof
+target_prefix = nqn.2014-08.org.spdk
+volume_driver = cinder.volume.drivers.spdk.SPDKDriver
+volume_backend_name = SPDK
+spdk_max_queue_depth = 64
+image_volume_cache_enabled = True
diff --git a/src/spdk/scripts/vagrant/run-autorun.sh b/src/spdk/scripts/vagrant/run-autorun.sh
new file mode 100755
index 000000000..28cefe474
--- /dev/null
+++ b/src/spdk/scripts/vagrant/run-autorun.sh
@@ -0,0 +1,245 @@
+#!/bin/bash
+
+#
+# BSD LICENSE
+#
+# Copyright (c) 2018 by NetApp, Inc.
+# All Rights Reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+if [ -z "${MAKE}" ]; then
+ export MAKE=make
+fi
+
+if [ -z "${GIT}" ]; then
+ export GIT=git
+fi
+
+if [ -z "${READLINK}" ]; then
+ export READLINK=readlink
+fi
+
+AUTOTEST_DRIVER_PATH=$($READLINK -f ${BASH_SOURCE%/*})
+SPDK_AUTOTEST_LOCAL_PATH=$PWD
+TIMESTAMP=$(date +"%Y%m%d%H%M%S")
+BUILD_NAME="build-${TIMESTAMP}"
+
+# The command line help
+display_help() {
+ echo
+ echo "Usage: $0 -d <path_to_spdk_tree> [-h] | [-q] | [-n]"
+ echo " -d : Specify a path to an SPDK source tree"
+ echo " -q : No output to screen"
+ echo " -n : Noop - dry-run"
+ echo " -h : This help"
+ echo
+ echo "Examples:"
+ echo " run-spdk-autotest.sh -d . -q"
+ echo " run-spdk-autotest.sh -d /home/vagrant/spdk_repo/spdk"
+ echo
+}
+
+set -e
+
+NOOP=0
+METHOD=0
+V=1
+OPTIND=1 # Reset in case getopts has been used previously in the shell.
+while getopts "d:qhn" opt; do
+ case "$opt" in
+ d)
+ SPDK_SOURCE_PATH=$($READLINK -f $OPTARG)
+ echo Using SPDK source at ${SPDK_SOURCE_PATH}
+ METHOD=1
+ ;;
+ q)
+ V=0
+ ;;
+ n)
+ NOOP=1
+ ;;
+ h)
+ display_help >&2
+ exit 0
+ ;;
+ *)
+ echo "Invalid option"
+ echo ""
+ display_help >&2
+ exit 1
+ ;;
+ esac
+done
+
+if [ -z "${SPDK_SOURCE_PATH}" ]; then
+ echo "Error: Must specify a source path "
+ display_help
+ exit 1
+fi
+
+# The following code verifies the input parameters and sets up the following variables:
+#
+# SPDK_AUTOTEST_LOCAL_PATH
+# GIT_REPO_PATH
+# GIT_BRANCH
+#
+
+case "$METHOD" in
+ 1)
+ if [ ! -d "${SPDK_SOURCE_PATH}" ]; then
+ echo "${SPDK_SOURCE_PATH} does not exist!"
+ exit 1
+ fi
+ if [ ! -d "${SPDK_SOURCE_PATH}/.git" ]; then
+ echo "${SPDK_SOURCE_PATH} is not a git repository"
+ exit 1
+ fi
+
+ GIT_REPO_SRC_DIR=$($READLINK -f "${SPDK_SOURCE_PATH}" | tr -t '/' ' ' | awk '{print $NF}')
+
+ if [ ! "${GIT_REPO_SRC_DIR}" = "spdk" ]; then
+ echo "The ${SPDK_SOURCE_PATH} git repository is not named \"spdk\""
+ exit 1
+ fi
+
+ pushd "${SPDK_SOURCE_PATH}"
+ GIT_REPO_SRC=$(git rev-parse --show-toplevel)
+ GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
+ popd
+
+ if [ "${SPDK_AUTOTEST_LOCAL_PATH}" = "${SPDK_SOURCE_PATH}" ]; then
+ SPDK_AUTOTEST_LOCAL_PATH=$($READLINK -f ${SPDK_AUTOTEST_LOCAL_PATH}/..)
+ echo "Set SPDK_AUTOTEST_LOCAL_PATH to ${SPDK_AUTOTEST_LOCAL_PATH}"
+ fi
+
+ if [ -d "${SPDK_AUTOTEST_LOCAL_PATH}/${GIT_BRANCH}" ]; then
+ if [ -d "${SPDK_AUTOTEST_LOCAL_PATH}/${GIT_BRANCH}/.git" ]; then
+ echo "${SPDK_AUTOTEST_LOCAL_PATH}/${GIT_BRANCH} is a git repository!"
+ exit 1
+ fi
+ fi
+
+ GIT_REPO_PATH="${SPDK_AUTOTEST_LOCAL_PATH}/${GIT_BRANCH}/${BUILD_NAME}"
+ ;;
+ *)
+ echo "Internal Error: Must specify a source path or branch name"
+ display_help
+ exit 1
+ ;;
+esac
+
+AUTOTEST_RESULTS="${SPDK_AUTOTEST_LOCAL_PATH}/${GIT_BRANCH}/${BUILD_NAME}"
+AUTOTEST_OUTPUT_PATH="${GIT_REPO_PATH}/output"
+rootdir="${GIT_REPO_PATH}/spdk"
+BUILD_LOG_FILE="${AUTOTEST_OUTPUT_PATH}/build.log"
+
+if [[ ${NOOP} -eq 1 ]]; then
+ echo "AUTOTEST_DRIVER_PATH $AUTOTEST_DRIVER_PATH"
+ #echo "SPDK_AUTOTEST_LOCAL_PATH $SPDK_AUTOTEST_LOCAL_PATH"
+ echo "AUTOTEST_OUTPUT_PATH $AUTOTEST_OUTPUT_PATH"
+ #echo "rootdir $rootdir"
+ echo "BUILD_LOG_FILE $BUILD_LOG_FILE"
+ #echo "GIT_BRANCH $GIT_BRANCH"
+ #echo "BUILD_NAME $BUILD_NAME"
+ echo "GIT_REPO_PATH $GIT_REPO_PATH"
+ echo "AUTOTEST_RESULTS $AUTOTEST_RESULTS"
+fi
+
+#
+# I'd like to keep these files under source control
+#
+if [[ -e "${AUTOTEST_DRIVER_PATH}/autorun-spdk.conf" ]]; then
+ conf="${AUTOTEST_DRIVER_PATH}/autorun-spdk.conf"
+fi
+if [[ -e ~/autorun-spdk.conf ]]; then
+ conf=~/autorun-spdk.conf
+fi
+
+if [[ -z $conf ]]; then
+ echo Conf file not found.
+ exit 1
+fi
+
+mkdir -pv --mode=775 "${AUTOTEST_OUTPUT_PATH}"
+rm -f latest
+ln -sv ${GIT_REPO_PATH} latest
+
+if [[ ${NOOP} -eq 0 ]]; then
+ echo V=$V
+ if [[ $V -eq 0 ]]; then
+ echo Quieting output
+ exec 3>&1 4>&2 > "${BUILD_LOG_FILE}" 2>&1
+ else
+ echo Teeing to ${BUILD_LOG_FILE}
+ exec > >(tee -a "${BUILD_LOG_FILE}") 2>&1
+ fi
+
+ case "$METHOD" in
+ 1)
+ echo "rsync git repository from ${GIT_REPO_SRC} to ${GIT_REPO_PATH}"
+ rsync -av "${GIT_REPO_SRC}" "${GIT_REPO_PATH}"
+ pushd "${GIT_REPO_PATH}/spdk"
+ sudo "${MAKE}" clean -j $(nproc)
+ sudo "${GIT}" clean -d -f
+ popd
+ ;;
+ *)
+ echo "Internal Error: Must specify a source path or branch name"
+ display_help
+ exit 1
+ ;;
+ esac
+
+ trap "echo ERROR; exit" INT TERM EXIT
+
+ pushd "${AUTOTEST_OUTPUT_PATH}"
+ export output_dir="${AUTOTEST_OUTPUT_PATH}"
+
+ # Runs agent scripts
+ "${rootdir}/autobuild.sh" "$conf"
+ sudo -E "${rootdir}/autotest.sh" "$conf"
+ "${rootdir}/autopackage.sh" "$conf"
+ sudo -E "${rootdir}/autorun_post.py" -d "${AUTOTEST_OUTPUT_PATH}" -r "${rootdir}"
+
+ echo "All Tests Passed" > ${GIT_REPO_PATH}/passed
+
+ # Redirect back to screen
+ if [[ $V -eq 0 ]]; then
+ echo Redirect to screen
+ exec 1>&3 2>&4 > >(tee -a "${BUILD_LOG_FILE}") 2>&1
+ fi
+
+ popd
+
+fi
+
+echo "all tests passed"
+
+echo Output directory: ${GIT_REPO_PATH}
+echo Build log: "${BUILD_LOG_FILE}"
diff --git a/src/spdk/scripts/vagrant/update.sh b/src/spdk/scripts/vagrant/update.sh
new file mode 100755
index 000000000..87bb21c9d
--- /dev/null
+++ b/src/spdk/scripts/vagrant/update.sh
@@ -0,0 +1,98 @@
+#!/usr/bin/env bash
+
+if [ ! "$USER" = "root" ]; then
+ echo
+ echo Error: must be run as root!
+ echo
+ exit 1
+fi
+
+set -e
+
+DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+SPDK_DIR="$(cd "${DIR}/../../" && pwd)"
+echo "SPDK_DIR = $SPDK_DIR"
+
+# Bug fix for vagrant rsync problem
+if [ -d /home/vagrant/spdk_repo ]; then
+ echo "Fixing permissions on /home/vagrant/spdk_repo"
+ chown vagrant /home/vagrant/spdk_repo
+ chgrp vagrant /home/vagrant/spdk_repo
+fi
+
+# Setup for run-autorun.sh
+if [ ! -f /home/vagrant/autorun-spdk.conf ]; then
+ echo "Copying scripts/vagrant/autorun-spdk.conf to /home/vagrant"
+ cp ${SPDK_DIR}/scripts/vagrant/autorun-spdk.conf /home/vagrant
+ chown vagrant /home/vagrant/autorun-spdk.conf
+ chgrp vagrant /home/vagrant/autorun-spdk.conf
+fi
+
+SYSTEM=$(uname -s)
+
+if [ "$SYSTEM" = "FreeBSD" ]; then
+ # Do initial setup for the system
+ pkg upgrade -f
+ ${SPDK_DIR}/scripts/pkgdep.sh --all
+ if [ -d /usr/src/.git ]; then
+ echo
+ echo "/usr/src/ is a git repository"
+ echo "consider \"cd /usr/src/; git pull\" to update"
+ echo
+ else
+ git clone --depth 10 -b release/11.1.0 https://github.com/freebsd/freebsd.git /usr/src
+ fi
+else
+
+ # Make sure that we get the hugepages we need on provision boot
+ # Note: The package install should take care of this at the end
+ # But sometimes after all the work of provisioning, we can't
+ # get the requested number of hugepages without rebooting.
+ # So do it here just in case
+ sysctl -w vm.nr_hugepages=1024
+ HUGEPAGES=$(sysctl -n vm.nr_hugepages)
+ if [ $HUGEPAGES != 1024 ]; then
+ echo "Warning: Unable to get 1024 hugepages, only got $HUGEPAGES"
+ echo "Warning: Adjusting HUGEMEM in /home/vagrant/autorun-spdk.conf"
+ sed "s/HUGEMEM=.*$/HUGEMEM=${HUGEPAGES}/g" /home/vagrant/autorun-spdk.conf > /home/vagrant/foo.conf
+ mv -f /home/vagrant/foo.conf /home/vagrant/autorun-spdk.conf
+ fi
+
+ # Figure out what system we are running on
+ if [ -f /etc/lsb-release ]; then
+ . /etc/lsb-release
+ elif [ -f /etc/redhat-release ]; then
+ yum update -y
+ yum install -y redhat-lsb
+ DISTRIB_ID=$(lsb_release -si)
+ DISTRIB_RELEASE=$(lsb_release -sr)
+ DISTRIB_CODENAME=$(lsb_release -sc)
+ DISTRIB_DESCRIPTION=$(lsb_release -sd)
+ fi
+
+ # Do initial setup for the system
+ if [ "$DISTRIB_ID" == "Ubuntu" ]; then
+ set -xv
+ export DEBIAN_PRIORITY=critical
+ export DEBIAN_FRONTEND=noninteractive
+ export DEBCONF_NONINTERACTIVE_SEEN=true
+ # Standard update + upgrade dance
+ apt-get update --assume-yes --no-install-suggests --no-install-recommends -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
+ apt-get upgrade --assume-yes --no-install-suggests --no-install-recommends -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
+ ${SPDK_DIR}/scripts/pkgdep.sh --all
+ apt-get clean
+ elif [ "$DISTRIB_ID" == "CentOS" ]; then
+ # Standard update + upgrade dance
+ yum check-update
+ yum update -y
+ ${SPDK_DIR}/scripts/pkgdep.sh --all
+ yum clean all
+ elif [ "$DISTRIB_ID" == "Fedora" ]; then
+ yum check-update
+ yum update -y
+ "$SPDK_DIR"/scripts/pkgdep.sh --all
+ sudo -u vagrant "$SPDK_DIR"/test/common/config/vm_setup.sh -i
+ yum clean all
+ fi
+ cat /dev/null > ~/.bash_history && history -c
+fi