summaryrefslogtreecommitdiffstats
path: root/src/spdk/scripts/rpc
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/scripts/rpc')
-rw-r--r--src/spdk/scripts/rpc/__init__.py157
-rw-r--r--src/spdk/scripts/rpc/app.py23
-rw-r--r--src/spdk/scripts/rpc/bdev.py531
-rw-r--r--src/spdk/scripts/rpc/client.py100
-rw-r--r--src/spdk/scripts/rpc/ioat.py12
-rw-r--r--src/spdk/scripts/rpc/iscsi.py502
-rw-r--r--src/spdk/scripts/rpc/log.py65
-rw-r--r--src/spdk/scripts/rpc/lvol.py195
-rw-r--r--src/spdk/scripts/rpc/nbd.py18
-rw-r--r--src/spdk/scripts/rpc/net.py29
-rw-r--r--src/spdk/scripts/rpc/nvme.py54
-rw-r--r--src/spdk/scripts/rpc/nvmf.py352
-rw-r--r--src/spdk/scripts/rpc/pmem.py29
-rw-r--r--src/spdk/scripts/rpc/subsystem.py7
-rw-r--r--src/spdk/scripts/rpc/vhost.py248
15 files changed, 2322 insertions, 0 deletions
diff --git a/src/spdk/scripts/rpc/__init__.py b/src/spdk/scripts/rpc/__init__.py
new file mode 100644
index 00000000..9a4dbb58
--- /dev/null
+++ b/src/spdk/scripts/rpc/__init__.py
@@ -0,0 +1,157 @@
+import json
+import sys
+
+from . import app
+from . import bdev
+from . import ioat
+from . import iscsi
+from . import log
+from . import lvol
+from . import nbd
+from . import net
+from . import nvme
+from . import nvmf
+from . import pmem
+from . import subsystem
+from . import vhost
+from . import client as rpc_client
+
+
+def start_subsystem_init(client):
+ """Start initialization of subsystems"""
+ return client.call('start_subsystem_init')
+
+
+def get_rpc_methods(client, current=None):
+ """Get list of supported RPC methods.
+ Args:
+ current: Get list of RPC methods only callable in the current state.
+ """
+ params = {}
+
+ if current:
+ params['current'] = current
+
+ return client.call('get_rpc_methods', params)
+
+
+def _json_dump(config, fd, indent):
+ if indent is None:
+ indent = 2
+ elif indent < 0:
+ indent = None
+ json.dump(config, fd, indent=indent)
+ fd.write('\n')
+
+
+def save_config(client, fd, indent=2):
+ """Write current (live) configuration of SPDK subsystems and targets to stdout.
+ Args:
+ fd: opened file descriptor where data will be saved
+ indent: Indent level. Value less than 0 mean compact mode.
+ Default indent level is 2.
+ """
+ config = {
+ 'subsystems': []
+ }
+
+ for elem in client.call('get_subsystems'):
+ cfg = {
+ 'subsystem': elem['subsystem'],
+ 'config': client.call('get_subsystem_config', {"name": elem['subsystem']})
+ }
+ config['subsystems'].append(cfg)
+
+ _json_dump(config, fd, indent)
+
+
+def load_config(client, fd):
+ """Configure SPDK subsystems and targets using JSON RPC read from stdin.
+ Args:
+ fd: opened file descriptor where data will be taken from
+ """
+ json_config = json.load(fd)
+
+ # remove subsystems with no config
+ subsystems = json_config['subsystems']
+ for subsystem in list(subsystems):
+ if not subsystem['config']:
+ subsystems.remove(subsystem)
+
+ # check if methods in the config file are known
+ allowed_methods = client.call('get_rpc_methods')
+ for subsystem in list(subsystems):
+ config = subsystem['config']
+ for elem in list(config):
+ if 'method' not in elem or elem['method'] not in allowed_methods:
+ raise rpc_client.JSONRPCException("Unknown method was included in the config file")
+
+ while subsystems:
+ allowed_methods = client.call('get_rpc_methods', {'current': True})
+ allowed_found = False
+
+ for subsystem in list(subsystems):
+ config = subsystem['config']
+ for elem in list(config):
+ if 'method' not in elem or elem['method'] not in allowed_methods:
+ continue
+
+ client.call(elem['method'], elem['params'])
+ config.remove(elem)
+ allowed_found = True
+
+ if not config:
+ subsystems.remove(subsystem)
+
+ if 'start_subsystem_init' in allowed_methods:
+ client.call('start_subsystem_init')
+ allowed_found = True
+
+ if not allowed_found:
+ break
+
+ if subsystems:
+ print("Some configs were skipped because the RPC state that can call them passed over.")
+
+
+def save_subsystem_config(client, fd, indent=2, name=None):
+ """Write current (live) configuration of SPDK subsystem to stdout.
+ Args:
+ fd: opened file descriptor where data will be saved
+ indent: Indent level. Value less than 0 mean compact mode.
+ Default is indent level 2.
+ """
+ cfg = {
+ 'subsystem': name,
+ 'config': client.call('get_subsystem_config', {"name": name})
+ }
+
+ _json_dump(cfg, fd, indent)
+
+
+def load_subsystem_config(client, fd):
+ """Configure SPDK subsystem using JSON RPC read from stdin.
+ Args:
+ fd: opened file descriptor where data will be taken from
+ """
+ subsystem = json.load(fd)
+
+ if not subsystem['config']:
+ return
+
+ allowed_methods = client.call('get_rpc_methods')
+ config = subsystem['config']
+ for elem in list(config):
+ if 'method' not in elem or elem['method'] not in allowed_methods:
+ raise rpc_client.JSONRPCException("Unknown method was included in the config file")
+
+ allowed_methods = client.call('get_rpc_methods', {'current': True})
+ for elem in list(config):
+ if 'method' not in elem or elem['method'] not in allowed_methods:
+ continue
+
+ client.call(elem['method'], elem['params'])
+ config.remove(elem)
+
+ if config:
+ print("Some configs were skipped because they cannot be called in the current RPC state.")
diff --git a/src/spdk/scripts/rpc/app.py b/src/spdk/scripts/rpc/app.py
new file mode 100644
index 00000000..c9b088f8
--- /dev/null
+++ b/src/spdk/scripts/rpc/app.py
@@ -0,0 +1,23 @@
+def kill_instance(client, sig_name):
+ """Send a signal to the SPDK process.
+
+ Args:
+ sig_name: signal to send ("SIGINT", "SIGTERM", "SIGQUIT", "SIGHUP", or "SIGKILL")
+ """
+ params = {'sig_name': sig_name}
+ return client.call('kill_instance', params)
+
+
+def context_switch_monitor(client, enabled=None):
+ """Query or set state of context switch monitoring.
+
+ Args:
+ enabled: True to enable monitoring; False to disable monitoring; None to query (optional)
+
+ Returns:
+ Current context switch monitoring state (after applying enabled flag).
+ """
+ params = {}
+ if enabled is not None:
+ params['enabled'] = enabled
+ return client.call('context_switch_monitor', params)
diff --git a/src/spdk/scripts/rpc/bdev.py b/src/spdk/scripts/rpc/bdev.py
new file mode 100644
index 00000000..6c7d0ecd
--- /dev/null
+++ b/src/spdk/scripts/rpc/bdev.py
@@ -0,0 +1,531 @@
+def set_bdev_options(client, bdev_io_pool_size=None, bdev_io_cache_size=None):
+ """Set parameters for the bdev subsystem.
+
+ Args:
+ bdev_io_pool_size: number of bdev_io structures in shared buffer pool (optional)
+ bdev_io_cache_size: maximum number of bdev_io structures cached per thread (optional)
+ """
+ params = {}
+
+ if bdev_io_pool_size:
+ params['bdev_io_pool_size'] = bdev_io_pool_size
+ if bdev_io_cache_size:
+ params['bdev_io_cache_size'] = bdev_io_cache_size
+
+ return client.call('set_bdev_options', params)
+
+
+def construct_crypto_bdev(client, base_bdev_name, name, crypto_pmd, key):
+ """Construct a crypto virtual block device.
+
+ Args:
+ base_bdev_name: name of the underlying base bdev
+ name: name for the crypto vbdev
+ crypto_pmd: name of of the DPDK crypto driver to use
+ key: key
+
+ Returns:
+ Name of created virtual block device.
+ """
+ params = {'base_bdev_name': base_bdev_name, 'name': name, 'crypto_pmd': crypto_pmd, 'key': key}
+
+ return client.call('construct_crypto_bdev', params)
+
+
+def delete_crypto_bdev(client, name):
+ """Delete crypto virtual block device.
+
+ Args:
+ name: name of crypto vbdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_crypto_bdev', params)
+
+
+def construct_malloc_bdev(client, num_blocks, block_size, name=None, uuid=None):
+ """Construct a malloc block device.
+
+ Args:
+ num_blocks: size of block device in blocks
+ block_size: block size of device; must be a power of 2 and at least 512
+ name: name of block device (optional)
+ uuid: UUID of block device (optional)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {'num_blocks': num_blocks, 'block_size': block_size}
+ if name:
+ params['name'] = name
+ if uuid:
+ params['uuid'] = uuid
+ return client.call('construct_malloc_bdev', params)
+
+
+def delete_malloc_bdev(client, name):
+ """Delete malloc block device.
+
+ Args:
+ bdev_name: name of malloc bdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_malloc_bdev', params)
+
+
+def construct_null_bdev(client, num_blocks, block_size, name, uuid=None):
+ """Construct a null block device.
+
+ Args:
+ num_blocks: size of block device in blocks
+ block_size: block size of device; must be a power of 2 and at least 512
+ name: name of block device
+ uuid: UUID of block device (optional)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {'name': name, 'num_blocks': num_blocks,
+ 'block_size': block_size}
+ if uuid:
+ params['uuid'] = uuid
+ return client.call('construct_null_bdev', params)
+
+
+def delete_null_bdev(client, name):
+ """Remove null bdev from the system.
+
+ Args:
+ name: name of null bdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_null_bdev', params)
+
+
+def get_raid_bdevs(client, category):
+ """Get list of raid bdevs based on category
+
+ Args:
+ category: any one of all or online or configuring or offline
+
+ Returns:
+ List of raid bdev names
+ """
+ params = {'category': category}
+ return client.call('get_raid_bdevs', params)
+
+
+def construct_raid_bdev(client, name, strip_size, raid_level, base_bdevs):
+ """Construct pooled device
+
+ Args:
+ name: user defined raid bdev name
+ strip_size: strip size of raid bdev in KB, supported values like 8, 16, 32, 64, 128, 256, 512, 1024 etc
+ raid_level: raid level of raid bdev, supported values 0
+ base_bdevs: Space separated names of Nvme bdevs in double quotes, like "Nvme0n1 Nvme1n1 Nvme2n1"
+
+ Returns:
+ None
+ """
+ params = {'name': name, 'strip_size': strip_size, 'raid_level': raid_level, 'base_bdevs': base_bdevs}
+
+ return client.call('construct_raid_bdev', params)
+
+
+def destroy_raid_bdev(client, name):
+ """Destroy pooled device
+
+ Args:
+ name: raid bdev name
+
+ Returns:
+ None
+ """
+ params = {'name': name}
+ return client.call('destroy_raid_bdev', params)
+
+
+def construct_aio_bdev(client, filename, name, block_size=None):
+ """Construct a Linux AIO block device.
+
+ Args:
+ filename: path to device or file (ex: /dev/sda)
+ name: name of block device
+ block_size: block size of device (optional; autodetected if omitted)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {'name': name,
+ 'filename': filename}
+
+ if block_size:
+ params['block_size'] = block_size
+
+ return client.call('construct_aio_bdev', params)
+
+
+def delete_aio_bdev(client, name):
+ """Remove aio bdev from the system.
+
+ Args:
+ bdev_name: name of aio bdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_aio_bdev', params)
+
+
+def set_bdev_nvme_options(client, action_on_timeout=None, timeout_us=None, retry_count=None, nvme_adminq_poll_period_us=None):
+ """Set options for the bdev nvme. This is startup command.
+
+ Args:
+ action_on_timeout: action to take on command time out. Valid values are: none, reset, abort (optional)
+ timeout_us: Timeout for each command, in microseconds. If 0, don't track timeouts (optional)
+ retry_count: The number of attempts per I/O when an I/O fails (optional)
+ nvme_adminq_poll_period_us: how often the admin queue is polled for asynchronous events in microsecon (optional)
+ """
+ params = {}
+
+ if action_on_timeout:
+ params['action_on_timeout'] = action_on_timeout
+
+ if timeout_us:
+ params['timeout_us'] = timeout_us
+
+ if retry_count:
+ params['retry_count'] = retry_count
+
+ if nvme_adminq_poll_period_us:
+ params['nvme_adminq_poll_period_us'] = nvme_adminq_poll_period_us
+
+ return client.call('set_bdev_nvme_options', params)
+
+
+def set_bdev_nvme_hotplug(client, enable, period_us=None):
+ """Set options for the bdev nvme. This is startup command.
+
+ Args:
+ enable: True to enable hotplug, False to disable.
+ period_us: how often the hotplug is processed for insert and remove events. Set 0 to reset to default. (optional)
+ """
+ params = {'enable': enable}
+
+ if period_us:
+ params['period_us'] = period_us
+
+ return client.call('set_bdev_nvme_hotplug', params)
+
+
+def construct_nvme_bdev(client, name, trtype, traddr, adrfam=None, trsvcid=None, subnqn=None):
+ """Construct NVMe namespace block device.
+
+ Args:
+ name: bdev name prefix; "n" + namespace ID will be appended to create unique names
+ trtype: transport type ("PCIe", "RDMA")
+ traddr: transport address (PCI BDF or IP address)
+ adrfam: address family ("IPv4", "IPv6", "IB", or "FC") (optional for PCIe)
+ trsvcid: transport service ID (port number for IP-based addresses; optional for PCIe)
+ subnqn: subsystem NQN to connect to (optional)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {'name': name,
+ 'trtype': trtype,
+ 'traddr': traddr}
+
+ if adrfam:
+ params['adrfam'] = adrfam
+
+ if trsvcid:
+ params['trsvcid'] = trsvcid
+
+ if subnqn:
+ params['subnqn'] = subnqn
+
+ return client.call('construct_nvme_bdev', params)
+
+
+def delete_nvme_controller(client, name):
+ """Remove NVMe controller from the system.
+
+ Args:
+ name: controller name
+ """
+
+ params = {'name': name}
+ return client.call('delete_nvme_controller', params)
+
+
+def construct_rbd_bdev(client, pool_name, rbd_name, block_size, name=None):
+ """Construct a Ceph RBD block device.
+
+ Args:
+ pool_name: Ceph RBD pool name
+ rbd_name: Ceph RBD image name
+ block_size: block size of RBD volume
+ name: name of block device (optional)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'pool_name': pool_name,
+ 'rbd_name': rbd_name,
+ 'block_size': block_size,
+ }
+
+ if name:
+ params['name'] = name
+
+ return client.call('construct_rbd_bdev', params)
+
+
+def delete_rbd_bdev(client, name):
+ """Remove rbd bdev from the system.
+
+ Args:
+ name: name of rbd bdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_rbd_bdev', params)
+
+
+def construct_error_bdev(client, base_name):
+ """Construct an error injection block device.
+
+ Args:
+ base_name: base bdev name
+ """
+ params = {'base_name': base_name}
+ return client.call('construct_error_bdev', params)
+
+
+def delete_error_bdev(client, name):
+ """Remove error bdev from the system.
+
+ Args:
+ bdev_name: name of error bdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_error_bdev', params)
+
+
+def construct_iscsi_bdev(client, name, url, initiator_iqn):
+ """Construct a iSCSI block device.
+
+ Args:
+ name: name of block device
+ url: iSCSI URL
+ initiator_iqn: IQN name to be used by initiator
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'name': name,
+ 'url': url,
+ 'initiator_iqn': initiator_iqn,
+ }
+ return client.call('construct_iscsi_bdev', params)
+
+
+def delete_iscsi_bdev(client, name):
+ """Remove iSCSI bdev from the system.
+
+ Args:
+ bdev_name: name of iSCSI bdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_iscsi_bdev', params)
+
+
+def construct_pmem_bdev(client, pmem_file, name):
+ """Construct a libpmemblk block device.
+
+ Args:
+ pmem_file: path to pmemblk pool file
+ name: name of block device
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'pmem_file': pmem_file,
+ 'name': name
+ }
+ return client.call('construct_pmem_bdev', params)
+
+
+def delete_pmem_bdev(client, name):
+ """Remove pmem bdev from the system.
+
+ Args:
+ name: name of pmem bdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_pmem_bdev', params)
+
+
+def construct_passthru_bdev(client, base_bdev_name, passthru_bdev_name):
+ """Construct a pass-through block device.
+
+ Args:
+ base_bdev_name: name of the existing bdev
+ passthru_bdev_name: name of block device
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'base_bdev_name': base_bdev_name,
+ 'passthru_bdev_name': passthru_bdev_name,
+ }
+ return client.call('construct_passthru_bdev', params)
+
+
+def delete_passthru_bdev(client, name):
+ """Remove pass through bdev from the system.
+
+ Args:
+ name: name of pass through bdev to delete
+ """
+ params = {'name': name}
+ return client.call('delete_passthru_bdev', params)
+
+
+def construct_split_vbdev(client, base_bdev, split_count, split_size_mb=None):
+ """Construct split block devices from a base bdev.
+
+ Args:
+ base_bdev: name of bdev to split
+ split_count: number of split bdevs to create
+ split_size_mb: size of each split volume in MiB (optional)
+
+ Returns:
+ List of created block devices.
+ """
+ params = {
+ 'base_bdev': base_bdev,
+ 'split_count': split_count,
+ }
+ if split_size_mb:
+ params['split_size_mb'] = split_size_mb
+
+ return client.call('construct_split_vbdev', params)
+
+
+def destruct_split_vbdev(client, base_bdev):
+ """Destroy split block devices.
+
+ Args:
+ base_bdev: name of previously split bdev
+ """
+ params = {
+ 'base_bdev': base_bdev,
+ }
+
+ return client.call('destruct_split_vbdev', params)
+
+
+def get_bdevs(client, name=None):
+ """Get information about block devices.
+
+ Args:
+ name: bdev name to query (optional; if omitted, query all bdevs)
+
+ Returns:
+ List of bdev information objects.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('get_bdevs', params)
+
+
+def get_bdevs_iostat(client, name=None):
+ """Get I/O statistics for block devices.
+
+ Args:
+ name: bdev name to query (optional; if omitted, query all bdevs)
+
+ Returns:
+ I/O statistics for the requested block devices.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('get_bdevs_iostat', params)
+
+
+def delete_bdev(client, bdev_name):
+ """Remove a bdev from the system.
+
+ Args:
+ bdev_name: name of bdev to delete
+ """
+ params = {'name': bdev_name}
+ return client.call('delete_bdev', params)
+
+
+def bdev_inject_error(client, name, io_type, error_type, num=1):
+ """Inject an error via an error bdev.
+
+ Args:
+ name: name of error bdev
+ io_type: one of "clear", "read", "write", "unmap", "flush", or "all"
+ error_type: one of "failure" or "pending"
+ num: number of commands to fail
+ """
+ params = {
+ 'name': name,
+ 'io_type': io_type,
+ 'error_type': error_type,
+ 'num': num,
+ }
+
+ return client.call('bdev_inject_error', params)
+
+
+def set_bdev_qd_sampling_period(client, name, period):
+ """Enable queue depth tracking on a specified bdev.
+
+ Args:
+ name: name of a bdev on which to track queue depth.
+ period: period (in microseconds) at which to update the queue depth reading. If set to 0, polling will be disabled.
+ """
+
+ params = {}
+ params['name'] = name
+ params['period'] = period
+ return client.call('set_bdev_qd_sampling_period', params)
+
+
+def set_bdev_qos_limit(client, name, rw_ios_per_sec=None, rw_mbytes_per_sec=None):
+ """Set QoS rate limit on a block device.
+
+ Args:
+ name: name of block device
+ rw_ios_per_sec: R/W IOs per second limit (>=10000, example: 20000). 0 means unlimited.
+ rw_mbytes_per_sec: R/W megabytes per second limit (>=10, example: 100). 0 means unlimited.
+ """
+ params = {}
+ params['name'] = name
+ if rw_ios_per_sec is not None:
+ params['rw_ios_per_sec'] = rw_ios_per_sec
+ if rw_mbytes_per_sec is not None:
+ params['rw_mbytes_per_sec'] = rw_mbytes_per_sec
+ return client.call('set_bdev_qos_limit', params)
+
+
+def apply_firmware(client, bdev_name, filename):
+ """Download and commit firmware to NVMe device.
+
+ Args:
+ bdev_name: name of NVMe block device
+ filename: filename of the firmware to download
+ """
+ params = {
+ 'filename': filename,
+ 'bdev_name': bdev_name,
+ }
+ return client.call('apply_nvme_firmware', params)
diff --git a/src/spdk/scripts/rpc/client.py b/src/spdk/scripts/rpc/client.py
new file mode 100644
index 00000000..6a71ab51
--- /dev/null
+++ b/src/spdk/scripts/rpc/client.py
@@ -0,0 +1,100 @@
+import json
+import socket
+import time
+
+
+def print_dict(d):
+ print(json.dumps(d, indent=2))
+
+
+class JSONRPCException(Exception):
+ def __init__(self, message):
+ self.message = message
+
+
+class JSONRPCClient(object):
+ def __init__(self, addr, port=None, verbose=False, timeout=60.0):
+ self.verbose = verbose
+ self.timeout = timeout
+ try:
+ if addr.startswith('/'):
+ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.sock.connect(addr)
+ elif ':' in addr:
+ for res in socket.getaddrinfo(addr, port, socket.AF_INET6, socket.SOCK_STREAM, socket.SOL_TCP):
+ af, socktype, proto, canonname, sa = res
+ self.sock = socket.socket(af, socktype, proto)
+ self.sock.connect(sa)
+ else:
+ self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.sock.connect((addr, port))
+ except socket.error as ex:
+ raise JSONRPCException("Error while connecting to %s\n"
+ "Error details: %s" % (addr, ex))
+
+ def __del__(self):
+ self.sock.close()
+
+ def call(self, method, params={}, verbose=False):
+ req = {}
+ req['jsonrpc'] = '2.0'
+ req['method'] = method
+ req['id'] = 1
+ if (params):
+ req['params'] = params
+ reqstr = json.dumps(req)
+
+ verbose = verbose or self.verbose
+
+ if verbose:
+ print("request:")
+ print(json.dumps(req, indent=2))
+
+ self.sock.sendall(reqstr.encode("utf-8"))
+ buf = ''
+ closed = False
+ response = {}
+ start_time = time.clock()
+
+ while not closed:
+ try:
+ timeout = self.timeout - (time.clock() - start_time)
+ if timeout <= 0.0:
+ break
+
+ self.sock.settimeout(timeout)
+ newdata = self.sock.recv(4096)
+ if (newdata == b''):
+ closed = True
+
+ buf += newdata.decode("utf-8")
+ response = json.loads(buf)
+ except socket.timeout:
+ break
+ except ValueError:
+ continue # incomplete response; keep buffering
+ break
+
+ if not response:
+ if method == "kill_instance":
+ return {}
+ if closed:
+ msg = "Connection closed with partial response:"
+ else:
+ msg = "Timeout while waiting for response:"
+ msg = "\n".join([msg, buf])
+ raise JSONRPCException(msg)
+
+ if verbose:
+ print("response:")
+ print(json.dumps(response, indent=2))
+
+ if 'error' in response:
+ msg = "\n".join(["Got JSON-RPC error response",
+ "request:",
+ json.dumps(req, indent=2),
+ "response:",
+ json.dumps(response['error'], indent=2)])
+ raise JSONRPCException(msg)
+
+ return response['result']
diff --git a/src/spdk/scripts/rpc/ioat.py b/src/spdk/scripts/rpc/ioat.py
new file mode 100644
index 00000000..958e18bb
--- /dev/null
+++ b/src/spdk/scripts/rpc/ioat.py
@@ -0,0 +1,12 @@
+def scan_ioat_copy_engine(client, pci_whitelist):
+ """Scan and enable IOAT copy engine.
+
+ Args:
+ pci_whitelist: Python list of PCI addresses in
+ domain:bus:device.function format or
+ domain.bus.device.function format
+ """
+ params = {}
+ if pci_whitelist:
+ params['pci_whitelist'] = pci_whitelist
+ return client.call('scan_ioat_copy_engine', params)
diff --git a/src/spdk/scripts/rpc/iscsi.py b/src/spdk/scripts/rpc/iscsi.py
new file mode 100644
index 00000000..a824ad20
--- /dev/null
+++ b/src/spdk/scripts/rpc/iscsi.py
@@ -0,0 +1,502 @@
+
+
+def set_iscsi_options(
+ client,
+ auth_file=None,
+ node_base=None,
+ nop_timeout=None,
+ nop_in_interval=None,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None,
+ chap_group=None,
+ max_sessions=None,
+ max_queue_depth=None,
+ max_connections_per_session=None,
+ default_time2wait=None,
+ default_time2retain=None,
+ first_burst_length=None,
+ immediate_data=None,
+ error_recovery_level=None,
+ allow_duplicated_isid=None,
+ min_connections_per_core=None):
+ """Set iSCSI target options.
+
+ Args:
+ auth_file: Path to CHAP shared secret file (optional)
+ node_base: Prefix of the name of iSCSI target node (optional)
+ nop_timeout: Timeout in seconds to nop-in request to the initiator (optional)
+ nop_in_interval: Time interval in secs between nop-in requests by the target (optional)
+ disable_chap: CHAP for discovery session should be disabled (optional)
+ require_chap: CHAP for discovery session should be required
+ mutual_chap: CHAP for discovery session should be mutual
+ chap_group: Authentication group ID for discovery session
+ max_sessions: Maximum number of sessions in the host
+ max_queue_depth: Maximum number of outstanding I/Os per queue
+ max_connections_per_session: Negotiated parameter, MaxConnections
+ default_time2wait: Negotiated parameter, DefaultTime2Wait
+ default_time2retain: Negotiated parameter, DefaultTime2Retain
+ first_burst_length: Negotiated parameter, FirstBurstLength
+ immediate_data: Negotiated parameter, ImmediateData
+ error_recovery_level: Negotiated parameter, ErrorRecoveryLevel
+ allow_duplicated_isid: Allow duplicated initiator session ID
+ min_connections_per_core: Allocation unit of connections per core
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ if auth_file:
+ params['auth_file'] = auth_file
+ if node_base:
+ params['node_base'] = node_base
+ if nop_timeout:
+ params['nop_timeout'] = nop_timeout
+ if nop_in_interval:
+ params['nop_in_interval'] = nop_in_interval
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ if chap_group:
+ params['chap_group'] = chap_group
+ if max_sessions:
+ params['max_sessions'] = max_sessions
+ if max_queue_depth:
+ params['max_queue_depth'] = max_queue_depth
+ if max_connections_per_session:
+ params['max_connections_per_session'] = max_connections_per_session
+ if default_time2wait:
+ params['default_time2wait'] = default_time2wait
+ if default_time2retain:
+ params['default_time2retain'] = default_time2retain
+ if first_burst_length:
+ params['first_burst_length'] = first_burst_length
+ if immediate_data:
+ params['immediate_data'] = immediate_data
+ if error_recovery_level:
+ params['error_recovery_level'] = error_recovery_level
+ if allow_duplicated_isid:
+ params['allow_duplicated_isid'] = allow_duplicated_isid
+ if min_connections_per_core:
+ params['min_connections_per_core'] = min_connections_per_core
+
+ return client.call('set_iscsi_options', params)
+
+
+def set_iscsi_discovery_auth(
+ client,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None,
+ chap_group=None):
+ """Set CHAP authentication for discovery service.
+
+ Args:
+ disable_chap: CHAP for discovery session should be disabled (optional)
+ require_chap: CHAP for discovery session should be required (optional)
+ mutual_chap: CHAP for discovery session should be mutual (optional)
+ chap_group: Authentication group ID for discovery session (optional)
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ if chap_group:
+ params['chap_group'] = chap_group
+
+ return client.call('set_iscsi_discovery_auth', params)
+
+
+def get_iscsi_auth_groups(client):
+ """Display current authentication group configuration.
+
+ Returns:
+ List of current authentication group configuration.
+ """
+ return client.call('get_iscsi_auth_groups')
+
+
+def get_portal_groups(client):
+ """Display current portal group configuration.
+
+ Returns:
+ List of current portal group configuration.
+ """
+ return client.call('get_portal_groups')
+
+
+def get_initiator_groups(client):
+ """Display current initiator group configuration.
+
+ Returns:
+ List of current initiator group configuration.
+ """
+ return client.call('get_initiator_groups')
+
+
+def get_target_nodes(client):
+ """Display target nodes.
+
+ Returns:
+ List of ISCSI target node objects.
+ """
+ return client.call('get_target_nodes')
+
+
+def construct_target_node(
+ client,
+ luns,
+ pg_ig_maps,
+ name,
+ alias_name,
+ queue_depth,
+ chap_group=None,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None,
+ header_digest=None,
+ data_digest=None):
+ """Add a target node.
+
+ Args:
+ luns: List of bdev_name_id_pairs, e.g. [{"bdev_name": "Malloc1", "lun_id": 1}]
+ pg_ig_maps: List of pg_ig_mappings, e.g. [{"pg_tag": pg, "ig_tag": ig}]
+ name: Target node name (ASCII)
+ alias_name: Target node alias name (ASCII)
+ queue_depth: Desired target queue depth
+ chap_group: Authentication group ID for this target node
+ disable_chap: CHAP authentication should be disabled for this target node
+ require_chap: CHAP authentication should be required for this target node
+ mutual_chap: CHAP authentication should be mutual/bidirectional
+ header_digest: Header Digest should be required for this target node
+ data_digest: Data Digest should be required for this target node
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ 'alias_name': alias_name,
+ 'pg_ig_maps': pg_ig_maps,
+ 'luns': luns,
+ 'queue_depth': queue_depth,
+ }
+
+ if chap_group:
+ params['chap_group'] = chap_group
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ if header_digest:
+ params['header_digest'] = header_digest
+ if data_digest:
+ params['data_digest'] = data_digest
+ return client.call('construct_target_node', params)
+
+
+def target_node_add_lun(client, name, bdev_name, lun_id=None):
+ """Add LUN to the target node.
+
+ Args:
+ name: Target node name (ASCII)
+ bdev_name: bdev name
+ lun_id: LUN ID (integer >= 0)
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ 'bdev_name': bdev_name,
+ }
+ if lun_id:
+ params['lun_id'] = lun_id
+ return client.call('target_node_add_lun', params)
+
+
+def set_iscsi_target_node_auth(
+ client,
+ name,
+ chap_group=None,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None):
+ """Set CHAP authentication for the target node.
+
+ Args:
+ name: Target node name (ASCII)
+ chap_group: Authentication group ID for this target node
+ disable_chap: CHAP authentication should be disabled for this target node
+ require_chap: CHAP authentication should be required for this target node
+ mutual_chap: CHAP authentication should be mutual/bidirectional
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ }
+
+ if chap_group:
+ params['chap_group'] = chap_group
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ return client.call('set_iscsi_target_node_auth', params)
+
+
+def add_iscsi_auth_group(client, tag, secrets=None):
+ """Add authentication group for CHAP authentication.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0).
+ secrets: Array of secrets objects (optional).
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+
+ if secrets:
+ params['secrets'] = secrets
+ return client.call('add_iscsi_auth_group', params)
+
+
+def delete_iscsi_auth_group(client, tag):
+ """Delete an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+ return client.call('delete_iscsi_auth_group', params)
+
+
+def add_secret_to_iscsi_auth_group(client, tag, user, secret, muser=None, msecret=None):
+ """Add a secret to an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+ user: User name for one-way CHAP authentication
+ secret: Secret for one-way CHAP authentication
+ muser: User name for mutual CHAP authentication (optional)
+ msecret: Secret for mutual CHAP authentication (optional)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag, 'user': user, 'secret': secret}
+
+ if muser:
+ params['muser'] = muser
+ if msecret:
+ params['msecret'] = msecret
+ return client.call('add_secret_to_iscsi_auth_group', params)
+
+
+def delete_secret_from_iscsi_auth_group(client, tag, user):
+ """Delete a secret from an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+ user: User name for one-way CHAP authentication
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag, 'user': user}
+ return client.call('delete_secret_from_iscsi_auth_group', params)
+
+
+def delete_pg_ig_maps(client, pg_ig_maps, name):
+ """Delete PG-IG maps from the target node.
+
+ Args:
+ pg_ig_maps: List of pg_ig_mappings, e.g. [{"pg_tag": pg, "ig_tag": ig}]
+ name: Target node alias name (ASCII)
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ 'pg_ig_maps': pg_ig_maps,
+ }
+ return client.call('delete_pg_ig_maps', params)
+
+
+def add_pg_ig_maps(client, pg_ig_maps, name):
+ """Add PG-IG maps to the target node.
+
+ Args:
+ pg_ig_maps: List of pg_ig_mappings, e.g. [{"pg_tag": pg, "ig_tag": ig}]
+ name: Target node alias name (ASCII)
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ 'pg_ig_maps': pg_ig_maps,
+ }
+ return client.call('add_pg_ig_maps', params)
+
+
+def add_portal_group(client, portals, tag):
+ """Add a portal group.
+
+ Args:
+ portals: List of portals, e.g. [{'host': ip, 'port': port}] or [{'host': ip, 'port': port, 'cpumask': cpumask}]
+ tag: Initiator group tag (unique, integer > 0)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag, 'portals': portals}
+ return client.call('add_portal_group', params)
+
+
+def add_initiator_group(client, tag, initiators, netmasks):
+ """Add an initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses, e.g. ["127.0.0.1","192.168.200.100"]
+ netmasks: List of initiator netmasks, e.g. ["255.255.0.0","255.248.0.0"]
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag, 'initiators': initiators, 'netmasks': netmasks}
+ return client.call('add_initiator_group', params)
+
+
+def add_initiators_to_initiator_group(
+ client,
+ tag,
+ initiators=None,
+ netmasks=None):
+ """Add initiators to an existing initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses, e.g. ["127.0.0.1","192.168.200.100"]
+ netmasks: List of initiator netmasks, e.g. ["255.255.0.0","255.248.0.0"]
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+
+ if initiators:
+ params['initiators'] = initiators
+ if netmasks:
+ params['netmasks'] = netmasks
+ return client.call('add_initiators_to_initiator_group', params)
+
+
+def delete_initiators_from_initiator_group(
+ client, tag, initiators=None, netmasks=None):
+ """Delete initiators from an existing initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses, e.g. ["127.0.0.1","192.168.200.100"]
+ netmasks: List of initiator netmasks, e.g. ["255.255.0.0","255.248.0.0"]
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+
+ if initiators:
+ params['initiators'] = initiators
+ if netmasks:
+ params['netmasks'] = netmasks
+ return client.call('delete_initiators_from_initiator_group', params)
+
+
+def delete_target_node(client, target_node_name):
+ """Delete a target node.
+
+ Args:
+ target_node_name: Target node name to be deleted. Example: iqn.2016-06.io.spdk:disk1.
+
+ Returns:
+ True or False
+ """
+ params = {'name': target_node_name}
+ return client.call('delete_target_node', params)
+
+
+def delete_portal_group(client, tag):
+ """Delete a portal group.
+
+ Args:
+ tag: Portal group tag (unique, integer > 0)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+ return client.call('delete_portal_group', params)
+
+
+def delete_initiator_group(client, tag):
+ """Delete an initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+ return client.call('delete_initiator_group', params)
+
+
+def get_iscsi_connections(client):
+ """Display iSCSI connections.
+
+ Returns:
+ List of iSCSI connection.
+ """
+ return client.call('get_iscsi_connections')
+
+
+def get_iscsi_global_params(client):
+ """Display iSCSI global parameters.
+
+ Returns:
+ List of iSCSI global parameter.
+ """
+ return client.call('get_iscsi_global_params')
+
+
+def get_scsi_devices(client):
+ """Display SCSI devices.
+
+ Returns:
+ List of SCSI device.
+ """
+ return client.call('get_scsi_devices')
diff --git a/src/spdk/scripts/rpc/log.py b/src/spdk/scripts/rpc/log.py
new file mode 100644
index 00000000..a152b3b8
--- /dev/null
+++ b/src/spdk/scripts/rpc/log.py
@@ -0,0 +1,65 @@
+def set_trace_flag(client, flag):
+ """Set trace flag.
+
+ Args:
+ flag: trace mask we want to set. (for example "nvme")
+ """
+ params = {'flag': flag}
+ return client.call('set_trace_flag', params)
+
+
+def clear_trace_flag(client, flag):
+ """Clear trace flag.
+
+ Args:
+ flag: trace mask we want to clear. (for example "nvme")
+ """
+ params = {'flag': flag}
+ return client.call('clear_trace_flag', params)
+
+
+def get_trace_flags(client):
+ """Get trace flags
+
+ Returns:
+ List of trace flag
+ """
+ return client.call('get_trace_flags')
+
+
+def set_log_level(client, level):
+ """Set log level.
+
+ Args:
+ level: log level we want to set. (for example "DEBUG")
+ """
+ params = {'level': level}
+ return client.call('set_log_level', params)
+
+
+def get_log_level(client):
+ """Get log level
+
+ Returns:
+ Current log level
+ """
+ return client.call('get_log_level')
+
+
+def set_log_print_level(client, level):
+ """Set log print level.
+
+ Args:
+ level: log print level we want to set. (for example "DEBUG")
+ """
+ params = {'level': level}
+ return client.call('set_log_print_level', params)
+
+
+def get_log_print_level(client):
+ """Get log print level
+
+ Returns:
+ Current log print level
+ """
+ return client.call('get_log_print_level')
diff --git a/src/spdk/scripts/rpc/lvol.py b/src/spdk/scripts/rpc/lvol.py
new file mode 100644
index 00000000..e7e05a3b
--- /dev/null
+++ b/src/spdk/scripts/rpc/lvol.py
@@ -0,0 +1,195 @@
+def construct_lvol_store(client, bdev_name, lvs_name, cluster_sz=None):
+ """Construct a logical volume store.
+
+ Args:
+ bdev_name: bdev on which to construct logical volume store
+ lvs_name: name of the logical volume store to create
+ cluster_sz: cluster size of the logical volume store in bytes (optional)
+
+ Returns:
+ UUID of created logical volume store.
+ """
+ params = {'bdev_name': bdev_name, 'lvs_name': lvs_name}
+ if cluster_sz:
+ params['cluster_sz'] = cluster_sz
+ return client.call('construct_lvol_store', params)
+
+
+def rename_lvol_store(client, old_name, new_name):
+ """Rename a logical volume store.
+
+ Args:
+ old_name: existing logical volume store name
+ new_name: new logical volume store name
+ """
+ params = {
+ 'old_name': old_name,
+ 'new_name': new_name
+ }
+ return client.call('rename_lvol_store', params)
+
+
+def construct_lvol_bdev(client, lvol_name, size, thin_provision=False, uuid=None, lvs_name=None):
+ """Create a logical volume on a logical volume store.
+
+ Args:
+ lvol_name: name of logical volume to create
+ size: desired size of logical volume in bytes (will be rounded up to a multiple of cluster size)
+ thin_provision: True to enable thin provisioning
+ uuid: UUID of logical volume store to create logical volume on (optional)
+ lvs_name: name of logical volume store to create logical volume on (optional)
+
+ Either uuid or lvs_name must be specified, but not both.
+
+ Returns:
+ Name of created logical volume block device.
+ """
+ if (uuid and lvs_name) or (not uuid and not lvs_name):
+ raise ValueError("Either uuid or lvs_name must be specified, but not both")
+
+ params = {'lvol_name': lvol_name, 'size': size}
+ if thin_provision:
+ params['thin_provision'] = thin_provision
+ if uuid:
+ params['uuid'] = uuid
+ if lvs_name:
+ params['lvs_name'] = lvs_name
+ return client.call('construct_lvol_bdev', params)
+
+
+def snapshot_lvol_bdev(client, lvol_name, snapshot_name):
+ """Capture a snapshot of the current state of a logical volume.
+
+ Args:
+ lvol_name: logical volume to create a snapshot from
+ snapshot_name: name for the newly created snapshot
+
+ Returns:
+ Name of created logical volume snapshot.
+ """
+ params = {
+ 'lvol_name': lvol_name,
+ 'snapshot_name': snapshot_name
+ }
+ return client.call('snapshot_lvol_bdev', params)
+
+
+def clone_lvol_bdev(client, snapshot_name, clone_name):
+ """Create a logical volume based on a snapshot.
+
+ Args:
+ snapshot_name: snapshot to clone
+ clone_name: name of logical volume to create
+
+ Returns:
+ Name of created logical volume clone.
+ """
+ params = {
+ 'snapshot_name': snapshot_name,
+ 'clone_name': clone_name
+ }
+ return client.call('clone_lvol_bdev', params)
+
+
+def rename_lvol_bdev(client, old_name, new_name):
+ """Rename a logical volume.
+
+ Args:
+ old_name: existing logical volume name
+ new_name: new logical volume name
+ """
+ params = {
+ 'old_name': old_name,
+ 'new_name': new_name
+ }
+ return client.call('rename_lvol_bdev', params)
+
+
+def resize_lvol_bdev(client, name, size):
+ """Resize a logical volume.
+
+ Args:
+ name: name of logical volume to resize
+ size: desired size of logical volume in bytes (will be rounded up to a multiple of cluster size)
+ """
+ params = {
+ 'name': name,
+ 'size': size,
+ }
+ return client.call('resize_lvol_bdev', params)
+
+
+def destroy_lvol_bdev(client, name):
+ """Destroy a logical volume.
+
+ Args:
+ name: name of logical volume to destroy
+ """
+ params = {
+ 'name': name,
+ }
+ return client.call('destroy_lvol_bdev', params)
+
+
+def inflate_lvol_bdev(client, name):
+ """Inflate a logical volume.
+
+ Args:
+ name: name of logical volume to inflate
+ """
+ params = {
+ 'name': name,
+ }
+ return client.call('inflate_lvol_bdev', params)
+
+
+def decouple_parent_lvol_bdev(client, name):
+ """Decouple parent of a logical volume.
+
+ Args:
+ name: name of logical volume to decouple parent
+ """
+ params = {
+ 'name': name,
+ }
+ return client.call('decouple_parent_lvol_bdev', params)
+
+
+def destroy_lvol_store(client, uuid=None, lvs_name=None):
+ """Destroy a logical volume store.
+
+ Args:
+ uuid: UUID of logical volume store to destroy (optional)
+ lvs_name: name of logical volume store to destroy (optional)
+
+ Either uuid or lvs_name must be specified, but not both.
+ """
+ if (uuid and lvs_name) or (not uuid and not lvs_name):
+ raise ValueError("Exactly one of uuid or lvs_name must be specified")
+
+ params = {}
+ if uuid:
+ params['uuid'] = uuid
+ if lvs_name:
+ params['lvs_name'] = lvs_name
+ return client.call('destroy_lvol_store', params)
+
+
+def get_lvol_stores(client, uuid=None, lvs_name=None):
+ """List logical volume stores.
+
+ Args:
+ uuid: UUID of logical volume store to retrieve information about (optional)
+ lvs_name: name of logical volume store to retrieve information about (optional)
+
+ Either uuid or lvs_name may be specified, but not both.
+ If both uuid and lvs_name are omitted, information about all logical volume stores is returned.
+ """
+ if (uuid and lvs_name):
+ raise ValueError("Exactly one of uuid or lvs_name may be specified")
+ params = {}
+ if uuid:
+ params['uuid'] = uuid
+ if lvs_name:
+ params['lvs_name'] = lvs_name
+ return client.call('get_lvol_stores', params)
diff --git a/src/spdk/scripts/rpc/nbd.py b/src/spdk/scripts/rpc/nbd.py
new file mode 100644
index 00000000..70cba167
--- /dev/null
+++ b/src/spdk/scripts/rpc/nbd.py
@@ -0,0 +1,18 @@
+def start_nbd_disk(client, bdev_name, nbd_device):
+ params = {
+ 'bdev_name': bdev_name,
+ 'nbd_device': nbd_device
+ }
+ return client.call('start_nbd_disk', params)
+
+
+def stop_nbd_disk(client, nbd_device):
+ params = {'nbd_device': nbd_device}
+ return client.call('stop_nbd_disk', params)
+
+
+def get_nbd_disks(client, nbd_device=None):
+ params = {}
+ if nbd_device:
+ params['nbd_device'] = nbd_device
+ return client.call('get_nbd_disks', params)
diff --git a/src/spdk/scripts/rpc/net.py b/src/spdk/scripts/rpc/net.py
new file mode 100644
index 00000000..e1ba7aa8
--- /dev/null
+++ b/src/spdk/scripts/rpc/net.py
@@ -0,0 +1,29 @@
+def add_ip_address(client, ifc_index, ip_addr):
+ """Add IP address.
+
+ Args:
+ ifc_index: ifc index of the nic device (int)
+ ip_addr: ip address will be added
+ """
+ params = {'ifc_index': ifc_index, 'ip_address': ip_addr}
+ return client.call('add_ip_address', params)
+
+
+def delete_ip_address(client, ifc_index, ip_addr):
+ """Delete IP address.
+
+ Args:
+ ifc_index: ifc index of the nic device (int)
+ ip_addr: ip address will be deleted
+ """
+ params = {'ifc_index': ifc_index, 'ip_address': ip_addr}
+ return client.call('delete_ip_address', params)
+
+
+def get_interfaces(client):
+ """Display current interface list
+
+ Returns:
+ List of current interface
+ """
+ return client.call('get_interfaces')
diff --git a/src/spdk/scripts/rpc/nvme.py b/src/spdk/scripts/rpc/nvme.py
new file mode 100644
index 00000000..a889474b
--- /dev/null
+++ b/src/spdk/scripts/rpc/nvme.py
@@ -0,0 +1,54 @@
+
+
+def send_nvme_cmd(client, name, cmd_type, data_direction, cmdbuf,
+ data=None, metadata=None,
+ data_len=None, metadata_len=None,
+ timeout_ms=None):
+ """Send one NVMe command
+
+ Args:
+ name: Name of the operating NVMe controller
+ cmd_type: Type of nvme cmd. Valid values are: admin, io
+ data_direction: Direction of data transfer. Valid values are: c2h, h2c
+ cmdbuf: NVMe command encoded by base64 urlsafe
+ data: Data transferring to controller from host, encoded by base64 urlsafe
+ metadata: metadata transferring to controller from host, encoded by base64 urlsafe
+ data_length: Data length required to transfer from controller to host
+ metadata_length: Metadata length required to transfer from controller to host
+ timeout-ms: Command execution timeout value, in milliseconds, if 0, don't track timeout
+
+ Returns:
+ NVMe completion queue entry, requested data and metadata, all are encoded by base64 urlsafe.
+ """
+ params = {'name': name,
+ 'cmd_type': cmd_type,
+ 'data_direction': data_direction,
+ 'cmdbuf': cmdbuf}
+
+ if data:
+ params['data'] = data
+ if metadata:
+ params['metadata'] = metadata
+ if data_len:
+ params['data_len'] = data_len
+ if metadata_len:
+ params['metadata_len'] = metadata_len
+ if timeout_ms:
+ params['timeout_ms'] = timeout_ms
+
+ return client.call('send_nvme_cmd', params)
+
+
+def get_nvme_controllers(client, name=None):
+ """Get information about NVMe controllers.
+
+ Args:
+ name: NVMe controller name to query (optional; if omitted, query all NVMe controllers)
+
+ Returns:
+ List of NVMe controller information objects.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('get_nvme_controllers', params)
diff --git a/src/spdk/scripts/rpc/nvmf.py b/src/spdk/scripts/rpc/nvmf.py
new file mode 100644
index 00000000..d805ebca
--- /dev/null
+++ b/src/spdk/scripts/rpc/nvmf.py
@@ -0,0 +1,352 @@
+
+
+def set_nvmf_target_options(client,
+ max_queue_depth=None,
+ max_qpairs_per_ctrlr=None,
+ in_capsule_data_size=None,
+ max_io_size=None,
+ max_subsystems=None,
+ io_unit_size=None):
+ """Set NVMe-oF target options.
+
+ Args:
+ max_queue_depth: Max number of outstanding I/O per queue (optional)
+ max_qpairs_per_ctrlr: Max number of SQ and CQ per controller (optional)
+ in_capsule_data_size: Maximum in-capsule data size in bytes (optional)
+ max_io_size: Maximum I/O data size in bytes (optional)
+ max_subsystems: Maximum number of NVMe-oF subsystems (optional)
+ io_unit_size: I/O unit size in bytes (optional)
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ if max_queue_depth:
+ params['max_queue_depth'] = max_queue_depth
+ if max_qpairs_per_ctrlr:
+ params['max_qpairs_per_ctrlr'] = max_qpairs_per_ctrlr
+ if in_capsule_data_size:
+ params['in_capsule_data_size'] = in_capsule_data_size
+ if max_io_size:
+ params['max_io_size'] = max_io_size
+ if max_subsystems:
+ params['max_subsystems'] = max_subsystems
+ if io_unit_size:
+ params['io_unit_size'] = io_unit_size
+ return client.call('set_nvmf_target_options', params)
+
+
+def set_nvmf_target_config(client,
+ acceptor_poll_rate=None,
+ conn_sched=None):
+ """Set NVMe-oF target subsystem configuration.
+
+ Args:
+ acceptor_poll_rate: Acceptor poll period in microseconds (optional)
+ conn_sched: Scheduling of incoming connections (optional)
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ if acceptor_poll_rate:
+ params['acceptor_poll_rate'] = acceptor_poll_rate
+ if conn_sched:
+ params['conn_sched'] = conn_sched
+ return client.call('set_nvmf_target_config', params)
+
+
+def nvmf_create_transport(client,
+ trtype,
+ max_queue_depth=None,
+ max_qpairs_per_ctrlr=None,
+ in_capsule_data_size=None,
+ max_io_size=None,
+ io_unit_size=None,
+ max_aq_depth=None):
+ """NVMf Transport Create options.
+
+ Args:
+ trtype: Transport type (ex. RDMA)
+ max_queue_depth: Max number of outstanding I/O per queue (optional)
+ max_qpairs_per_ctrlr: Max number of SQ and CQ per controller (optional)
+ in_capsule_data_size: Maximum in-capsule data size in bytes (optional)
+ max_io_size: Maximum I/O data size in bytes (optional)
+ io_unit_size: I/O unit size in bytes (optional)
+ max_aq_depth: Max size admin quque per controller (optional)
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ params['trtype'] = trtype
+ if max_queue_depth:
+ params['max_queue_depth'] = max_queue_depth
+ if max_qpairs_per_ctrlr:
+ params['max_qpairs_per_ctrlr'] = max_qpairs_per_ctrlr
+ if in_capsule_data_size:
+ params['in_capsule_data_size'] = in_capsule_data_size
+ if max_io_size:
+ params['max_io_size'] = max_io_size
+ if io_unit_size:
+ params['io_unit_size'] = io_unit_size
+ if max_aq_depth:
+ params['max_aq_depth'] = max_aq_depth
+ return client.call('nvmf_create_transport', params)
+
+
+def get_nvmf_subsystems(client):
+ """Get list of NVMe-oF subsystems.
+
+ Returns:
+ List of NVMe-oF subsystem objects.
+ """
+ return client.call('get_nvmf_subsystems')
+
+
+def construct_nvmf_subsystem(client,
+ nqn,
+ serial_number,
+ listen_addresses=None,
+ hosts=None,
+ allow_any_host=False,
+ namespaces=None,
+ max_namespaces=0):
+ """Construct an NVMe over Fabrics target subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ serial_number: Serial number of virtual controller.
+ listen_addresses: Array of listen_address objects (optional).
+ hosts: Array of strings containing allowed host NQNs (optional). Default: No hosts allowed.
+ allow_any_host: Allow any host (True) or enforce allowed host whitelist (False). Default: False.
+ namespaces: Array of namespace objects (optional). Default: No namespaces.
+ max_namespaces: Maximum number of namespaces that can be attached to the subsystem (optional). Default: 0 (Unlimited).
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'nqn': nqn,
+ 'serial_number': serial_number,
+ }
+
+ if max_namespaces:
+ params['max_namespaces'] = max_namespaces
+
+ if listen_addresses:
+ params['listen_addresses'] = listen_addresses
+
+ if hosts:
+ params['hosts'] = hosts
+
+ if allow_any_host:
+ params['allow_any_host'] = True
+
+ if namespaces:
+ params['namespaces'] = namespaces
+
+ return client.call('construct_nvmf_subsystem', params)
+
+
+def nvmf_subsystem_create(client,
+ nqn,
+ serial_number,
+ allow_any_host=False,
+ max_namespaces=0):
+ """Construct an NVMe over Fabrics target subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ serial_number: Serial number of virtual controller.
+ allow_any_host: Allow any host (True) or enforce allowed host whitelist (False). Default: False.
+ max_namespaces: Maximum number of namespaces that can be attached to the subsystem (optional). Default: 0 (Unlimited).
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'nqn': nqn,
+ }
+
+ if serial_number:
+ params['serial_number'] = serial_number
+
+ if allow_any_host:
+ params['allow_any_host'] = True
+
+ if max_namespaces:
+ params['max_namespaces'] = max_namespaces
+
+ return client.call('nvmf_subsystem_create', params)
+
+
+def nvmf_subsystem_add_listener(client, nqn, trtype, traddr, trsvcid, adrfam):
+ """Add a new listen address to an NVMe-oF subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ trtype: Transport type ("RDMA").
+ traddr: Transport address.
+ trsvcid: Transport service ID.
+ adrfam: Address family ("IPv4", "IPv6", "IB", or "FC").
+
+ Returns:
+ True or False
+ """
+ listen_address = {'trtype': trtype,
+ 'traddr': traddr,
+ 'trsvcid': trsvcid}
+
+ if adrfam:
+ listen_address['adrfam'] = adrfam
+
+ params = {'nqn': nqn,
+ 'listen_address': listen_address}
+
+ return client.call('nvmf_subsystem_add_listener', params)
+
+
+def nvmf_subsystem_remove_listener(
+ client,
+ nqn,
+ trtype,
+ traddr,
+ trsvcid,
+ adrfam):
+ """Remove existing listen address from an NVMe-oF subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ trtype: Transport type ("RDMA").
+ traddr: Transport address.
+ trsvcid: Transport service ID.
+ adrfam: Address family ("IPv4", "IPv6", "IB", or "FC").
+
+ Returns:
+ True or False
+ """
+ listen_address = {'trtype': trtype,
+ 'traddr': traddr,
+ 'trsvcid': trsvcid}
+
+ if adrfam:
+ listen_address['adrfam'] = adrfam
+
+ params = {'nqn': nqn,
+ 'listen_address': listen_address}
+
+ return client.call('nvmf_subsystem_remove_listener', params)
+
+
+def nvmf_subsystem_add_ns(client, nqn, bdev_name, nsid=None, nguid=None, eui64=None, uuid=None):
+ """Add a namespace to a subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ bdev_name: Name of bdev to expose as a namespace.
+ nsid: Namespace ID (optional).
+ nguid: 16-byte namespace globally unique identifier in hexadecimal (optional).
+ eui64: 8-byte namespace EUI-64 in hexadecimal (e.g. "ABCDEF0123456789") (optional).
+ uuid: Namespace UUID (optional).
+
+ Returns:
+ The namespace ID
+ """
+ ns = {'bdev_name': bdev_name}
+
+ if nsid:
+ ns['nsid'] = nsid
+
+ if nguid:
+ ns['nguid'] = nguid
+
+ if eui64:
+ ns['eui64'] = eui64
+
+ if uuid:
+ ns['uuid'] = uuid
+
+ params = {'nqn': nqn,
+ 'namespace': ns}
+
+ return client.call('nvmf_subsystem_add_ns', params)
+
+
+def nvmf_subsystem_remove_ns(client, nqn, nsid):
+ """Remove a existing namespace from a subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ nsid: Namespace ID.
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn,
+ 'nsid': nsid}
+
+ return client.call('nvmf_subsystem_remove_ns', params)
+
+
+def nvmf_subsystem_add_host(client, nqn, host):
+ """Add a host NQN to the whitelist of allowed hosts.
+
+ Args:
+ nqn: Subsystem NQN.
+ host: Host NQN to add to the list of allowed host NQNs
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn,
+ 'host': host}
+
+ return client.call('nvmf_subsystem_add_host', params)
+
+
+def nvmf_subsystem_remove_host(client, nqn, host):
+ """Remove a host NQN from the whitelist of allowed hosts.
+
+ Args:
+ nqn: Subsystem NQN.
+ host: Host NQN to remove to the list of allowed host NQNs
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn,
+ 'host': host}
+
+ return client.call('nvmf_subsystem_remove_host', params)
+
+
+def nvmf_subsystem_allow_any_host(client, nqn, disable):
+ """Configure a subsystem to allow any host to connect or to enforce the host NQN whitelist.
+
+ Args:
+ nqn: Subsystem NQN.
+ disable: Allow any host (true) or enforce allowed host whitelist (false).
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn, 'allow_any_host': False if disable else True}
+
+ return client.call('nvmf_subsystem_allow_any_host', params)
+
+
+def delete_nvmf_subsystem(client, nqn):
+ """Delete an existing NVMe-oF subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn}
+ return client.call('delete_nvmf_subsystem', params)
diff --git a/src/spdk/scripts/rpc/pmem.py b/src/spdk/scripts/rpc/pmem.py
new file mode 100644
index 00000000..4ab38ff3
--- /dev/null
+++ b/src/spdk/scripts/rpc/pmem.py
@@ -0,0 +1,29 @@
+def create_pmem_pool(client, pmem_file, num_blocks, block_size):
+ """Create pmem pool at specified path.
+ Args:
+ pmem_file: path at which to create pmem pool
+ num_blocks: number of blocks for created pmem pool file
+ block_size: block size for pmem pool file
+ """
+ params = {'pmem_file': pmem_file,
+ 'num_blocks': num_blocks,
+ 'block_size': block_size}
+ return client.call('create_pmem_pool', params)
+
+
+def pmem_pool_info(client, pmem_file):
+ """Get details about pmem pool.
+ Args:
+ pmem_file: path to pmem pool
+ """
+ params = {'pmem_file': pmem_file}
+ return client.call('pmem_pool_info', params)
+
+
+def delete_pmem_pool(client, pmem_file):
+ """Delete pmem pool.
+ Args:
+ pmem_file: path to pmem pool
+ """
+ params = {'pmem_file': pmem_file}
+ return client.call('delete_pmem_pool', params)
diff --git a/src/spdk/scripts/rpc/subsystem.py b/src/spdk/scripts/rpc/subsystem.py
new file mode 100644
index 00000000..c8e662bc
--- /dev/null
+++ b/src/spdk/scripts/rpc/subsystem.py
@@ -0,0 +1,7 @@
+def get_subsystems(client):
+ return client.call('get_subsystems')
+
+
+def get_subsystem_config(client, name):
+ params = {'name': name}
+ return client.call('get_subsystem_config', params)
diff --git a/src/spdk/scripts/rpc/vhost.py b/src/spdk/scripts/rpc/vhost.py
new file mode 100644
index 00000000..bc97455a
--- /dev/null
+++ b/src/spdk/scripts/rpc/vhost.py
@@ -0,0 +1,248 @@
+def set_vhost_controller_coalescing(client, ctrlr, delay_base_us, iops_threshold):
+ """Set coalescing for vhost controller.
+ Args:
+ ctrlr: controller name
+ delay_base_us: base delay time
+ iops_threshold: IOPS threshold when coalescing is enabled
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'delay_base_us': delay_base_us,
+ 'iops_threshold': iops_threshold,
+ }
+ return client.call('set_vhost_controller_coalescing', params)
+
+
+def construct_vhost_scsi_controller(client, ctrlr, cpumask=None):
+ """Construct a vhost scsi controller.
+ Args:
+ ctrlr: controller name
+ cpumask: cpu mask for this controller
+ """
+ params = {'ctrlr': ctrlr}
+
+ if cpumask:
+ params['cpumask'] = cpumask
+
+ return client.call('construct_vhost_scsi_controller', params)
+
+
+def add_vhost_scsi_lun(client, ctrlr, scsi_target_num, bdev_name):
+ """Add LUN to vhost scsi controller target.
+ Args:
+ ctrlr: controller name
+ scsi_target_num: target number to use
+ bdev_name: name of bdev to add to target
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'scsi_target_num': scsi_target_num,
+ 'bdev_name': bdev_name,
+ }
+ return client.call('add_vhost_scsi_lun', params)
+
+
+def remove_vhost_scsi_target(client, ctrlr, scsi_target_num):
+ """Remove target from vhost scsi controller.
+ Args:
+ ctrlr: controller name to remove target from
+ scsi_target_num: number of target to remove from controller
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'scsi_target_num': scsi_target_num
+ }
+ return client.call('remove_vhost_scsi_target', params)
+
+
+def construct_vhost_nvme_controller(client, ctrlr, io_queues, cpumask=None):
+ """Construct vhost NVMe controller.
+ Args:
+ ctrlr: controller name
+ io_queues: number of IO queues for the controller
+ cpumask: cpu mask for this controller
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'io_queues': io_queues
+ }
+
+ if cpumask:
+ params['cpumask'] = cpumask
+
+ return client.call('construct_vhost_nvme_controller', params)
+
+
+def add_vhost_nvme_ns(client, ctrlr, bdev_name):
+ """Add namespace to vhost nvme controller.
+ Args:
+ ctrlr: controller name where to add a namespace
+ bdev_name: block device name for a new namespace
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'bdev_name': bdev_name,
+ }
+
+ return client.call('add_vhost_nvme_ns', params)
+
+
+def construct_vhost_blk_controller(client, ctrlr, dev_name, cpumask=None, readonly=None):
+ """Construct vhost BLK controller.
+ Args:
+ ctrlr: controller name
+ dev_name: device name to add to controller
+ cpumask: cpu mask for this controller
+ readonly: set controller as read-only
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'dev_name': dev_name,
+ }
+ if cpumask:
+ params['cpumask'] = cpumask
+ if readonly:
+ params['readonly'] = readonly
+ return client.call('construct_vhost_blk_controller', params)
+
+
+def get_vhost_controllers(client, name=None):
+ """Get information about configured vhost controllers.
+
+ Args:
+ name: controller name to query (optional; if omitted, query all controllers)
+
+ Returns:
+ List of vhost controllers.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('get_vhost_controllers', params)
+
+
+def remove_vhost_controller(client, ctrlr):
+ """Remove vhost controller from configuration.
+ Args:
+ ctrlr: controller name to remove
+ """
+ params = {'ctrlr': ctrlr}
+ return client.call('remove_vhost_controller', params)
+
+
+def construct_virtio_dev(client, name, trtype, traddr, dev_type, vq_count=None, vq_size=None):
+ """Construct new virtio device using provided
+ transport type and device type.
+ Args:
+ name: name base for new created bdevs
+ trtype: virtio target transport type: pci or user
+ traddr: transport type specific target address: e.g. UNIX
+ domain socket path or BDF
+ dev_type: device type: blk or scsi
+ vq_count: number of virtual queues to be used
+ vq_size: size of each queue
+ """
+ params = {
+ 'name': name,
+ 'trtype': trtype,
+ 'traddr': traddr,
+ 'dev_type': dev_type
+ }
+ if vq_count:
+ params['vq_count'] = vq_count
+ if vq_size:
+ params['vq_size'] = vq_size
+ return client.call('construct_virtio_dev', params)
+
+
+def construct_virtio_user_scsi_bdev(client, path, name, vq_count=None, vq_size=None):
+ """Connect to virtio user scsi device.
+ Args:
+ path: path to Virtio SCSI socket
+ name: use this name as base instead of 'VirtioScsiN'
+ vq_count: number of virtual queues to be used
+ vq_size: size of each queue
+ """
+ params = {
+ 'path': path,
+ 'name': name,
+ }
+ if vq_count:
+ params['vq_count'] = vq_count
+ if vq_size:
+ params['vq_size'] = vq_size
+ return client.call('construct_virtio_user_scsi_bdev', params)
+
+
+def construct_virtio_pci_scsi_bdev(client, pci_address, name):
+ """Create a Virtio SCSI device from a virtio-pci device.
+ Args:
+ pci_address: PCI address in domain:bus:device.function format or
+ domain.bus.device.function format
+ name: Name for the virtio device. It will be inhereted by all created
+ bdevs, which are named n the following format:
+ <name>t<target_id>
+ """
+ params = {
+ 'pci_address': pci_address,
+ 'name': name,
+ }
+ return client.call('construct_virtio_pci_scsi_bdev', params)
+
+
+def remove_virtio_scsi_bdev(client, name):
+ """Remove a Virtio-SCSI device
+ This will delete all bdevs exposed by this device.
+ Args:
+ name: virtio device name
+ """
+ params = {'name': name}
+ return client.call('remove_virtio_scsi_bdev', params)
+
+
+def remove_virtio_bdev(client, name):
+ """Remove a Virtio device
+ This will delete all bdevs exposed by this device.
+ Args:
+ name: virtio device name
+ """
+ params = {'name': name}
+ return client.call('remove_virtio_bdev', params)
+
+
+def get_virtio_scsi_devs(client):
+ """Get list of virtio scsi devices."""
+ return client.call('get_virtio_scsi_devs')
+
+
+def construct_virtio_user_blk_bdev(client, path, name, vq_count=None, vq_size=None):
+ """Connect to virtio user BLK device.
+ Args:
+ path: path to Virtio BLK socket
+ name: use this name as base instead of 'VirtioScsiN'
+ vq_count: number of virtual queues to be used
+ vq_size: size of each queue
+ """
+ params = {
+ 'path': path,
+ 'name': name,
+ }
+ if vq_count:
+ params['vq_count'] = vq_count
+ if vq_size:
+ params['vq_size'] = vq_size
+ return client.call('construct_virtio_user_blk_bdev', params)
+
+
+def construct_virtio_pci_blk_bdev(client, pci_address, name):
+ """Create a Virtio Blk device from a virtio-pci device.
+ Args:
+ pci_address: PCI address in domain:bus:device.function format or
+ domain.bus.device.function format
+ name: name for the blk device
+ """
+ params = {
+ 'pci_address': pci_address,
+ 'name': name,
+ }
+ return client.call('construct_virtio_pci_blk_bdev', params)