summaryrefslogtreecommitdiffstats
path: root/src/spdk/scripts/rpc
diff options
context:
space:
mode:
Diffstat (limited to '')
-rwxr-xr-xsrc/spdk/scripts/rpc.py2507
-rw-r--r--src/spdk/scripts/rpc/__init__.py201
-rw-r--r--src/spdk/scripts/rpc/app.py78
-rw-r--r--src/spdk/scripts/rpc/bdev.py1105
-rw-r--r--src/spdk/scripts/rpc/blobfs.py57
-rw-r--r--src/spdk/scripts/rpc/client.py183
-rw-r--r--src/spdk/scripts/rpc/env_dpdk.py8
-rw-r--r--src/spdk/scripts/rpc/helpers.py16
-rw-r--r--src/spdk/scripts/rpc/idxd.py8
-rw-r--r--src/spdk/scripts/rpc/ioat.py17
-rw-r--r--src/spdk/scripts/rpc/iscsi.py558
-rw-r--r--src/spdk/scripts/rpc/log.py75
-rw-r--r--src/spdk/scripts/rpc/lvol.py228
-rw-r--r--src/spdk/scripts/rpc/nbd.py25
-rw-r--r--src/spdk/scripts/rpc/net.py35
-rw-r--r--src/spdk/scripts/rpc/notify.py30
-rw-r--r--src/spdk/scripts/rpc/nvme.py87
-rw-r--r--src/spdk/scripts/rpc/nvmf.py483
-rw-r--r--src/spdk/scripts/rpc/pmem.py35
-rw-r--r--src/spdk/scripts/rpc/sock.py41
-rw-r--r--src/spdk/scripts/rpc/subsystem.py12
-rw-r--r--src/spdk/scripts/rpc/trace.py33
-rw-r--r--src/spdk/scripts/rpc/vhost.py190
-rw-r--r--src/spdk/scripts/rpc/vmd.py3
-rwxr-xr-xsrc/spdk/scripts/rpc_http_proxy.py124
25 files changed, 6139 insertions, 0 deletions
diff --git a/src/spdk/scripts/rpc.py b/src/spdk/scripts/rpc.py
new file mode 100755
index 000000000..140bfefd9
--- /dev/null
+++ b/src/spdk/scripts/rpc.py
@@ -0,0 +1,2507 @@
+#!/usr/bin/env python3
+
+from rpc.client import print_dict, print_json, JSONRPCException
+from rpc.helpers import deprecated_aliases
+
+import logging
+import argparse
+import importlib
+import rpc
+import sys
+import shlex
+import json
+
+try:
+ from shlex import quote
+except ImportError:
+ from pipes import quote
+
+
+def print_array(a):
+ print(" ".join((quote(v) for v in a)))
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description='SPDK RPC command line interface', usage='%(prog)s [options]')
+ parser.add_argument('-s', dest='server_addr',
+ help='RPC domain socket path or IP address', default='/var/tmp/spdk.sock')
+ parser.add_argument('-p', dest='port',
+ help='RPC port number (if server_addr is IP address)',
+ default=5260, type=int)
+ parser.add_argument('-t', dest='timeout',
+ help='Timeout as a floating point number expressed in seconds waiting for response. Default: 60.0',
+ default=60.0, type=float)
+ parser.add_argument('-r', dest='conn_retries',
+ help='Retry connecting to the RPC server N times with 0.2s interval. Default: 0',
+ default=0, type=int)
+ parser.add_argument('-v', dest='verbose', action='store_const', const="INFO",
+ help='Set verbose mode to INFO', default="ERROR")
+ parser.add_argument('--verbose', dest='verbose', choices=['DEBUG', 'INFO', 'ERROR'],
+ help="""Set verbose level. """)
+ parser.add_argument('--dry_run', dest='dry_run', action='store_true', help="Display request and exit")
+ parser.set_defaults(dry_run=False)
+ parser.add_argument('--server', dest='is_server', action='store_true',
+ help="Start listening on stdin, parse each line as a regular rpc.py execution and create \
+ a separate connection for each command. Each command's output ends with either \
+ **STATUS=0 if the command succeeded or **STATUS=1 if it failed. --server is meant \
+ to be used in conjunction with bash coproc, where stdin and stdout are connected to \
+ pipes and can be used as a faster way to send RPC commands. If enabled, rpc.py \
+ must be executed without any other parameters.")
+ parser.set_defaults(is_server=False)
+ parser.add_argument('--plugin', dest='rpc_plugin', help='Module name of plugin with additional RPC commands')
+ subparsers = parser.add_subparsers(help='RPC methods', dest='called_rpc_name', metavar='')
+
+ def framework_start_init(args):
+ rpc.framework_start_init(args.client)
+
+ p = subparsers.add_parser('framework_start_init', aliases=['start_subsystem_init'],
+ help='Start initialization of subsystems')
+ p.set_defaults(func=framework_start_init)
+
+ def framework_wait_init(args):
+ rpc.framework_wait_init(args.client)
+
+ p = subparsers.add_parser('framework_wait_init', aliases=['wait_subsystem_init'],
+ help='Block until subsystems have been initialized')
+ p.set_defaults(func=framework_wait_init)
+
+ def rpc_get_methods(args):
+ print_dict(rpc.rpc_get_methods(args.client,
+ current=args.current,
+ include_aliases=args.include_aliases))
+
+ p = subparsers.add_parser('rpc_get_methods', aliases=['get_rpc_methods'],
+ help='Get list of supported RPC methods')
+ p.add_argument('-c', '--current', help='Get list of RPC methods only callable in the current state.', action='store_true')
+ p.add_argument('-i', '--include-aliases', help='include RPC aliases', action='store_true')
+ p.set_defaults(func=rpc_get_methods)
+
+ def spdk_get_version(args):
+ print_json(rpc.spdk_get_version(args.client))
+
+ p = subparsers.add_parser('spdk_get_version', aliases=['get_spdk_version'],
+ help='Get SPDK version')
+ p.set_defaults(func=spdk_get_version)
+
+ def save_config(args):
+ rpc.save_config(args.client,
+ sys.stdout,
+ indent=args.indent)
+
+ p = subparsers.add_parser('save_config', help="""Write current (live) configuration of SPDK subsystems and targets to stdout.
+ """)
+ p.add_argument('-i', '--indent', help="""Indent level. Value less than 0 mean compact mode. Default indent level is 2.
+ """, type=int, default=2)
+ p.set_defaults(func=save_config)
+
+ def load_config(args):
+ rpc.load_config(args.client, args.json_conf,
+ include_aliases=args.include_aliases)
+
+ p = subparsers.add_parser('load_config', help="""Configure SPDK subsystems and targets using JSON RPC.""")
+ p.add_argument('-i', '--include-aliases', help='include RPC aliases', action='store_true')
+ p.add_argument('-j', '--json_conf', help='Valid JSON configuration', default=sys.stdin)
+ p.set_defaults(func=load_config)
+
+ def save_subsystem_config(args):
+ rpc.save_subsystem_config(args.client,
+ sys.stdout,
+ indent=args.indent,
+ name=args.name)
+
+ p = subparsers.add_parser('save_subsystem_config', help="""Write current (live) configuration of SPDK subsystem to stdout.
+ """)
+ p.add_argument('-i', '--indent', help="""Indent level. Value less than 0 mean compact mode. Default indent level is 2.
+ """, type=int, default=2)
+ p.add_argument('-n', '--name', help='Name of subsystem', required=True)
+ p.set_defaults(func=save_subsystem_config)
+
+ def load_subsystem_config(args):
+ rpc.load_subsystem_config(args.client,
+ args.json_conf)
+
+ p = subparsers.add_parser('load_subsystem_config', help="""Configure SPDK subsystem using JSON RPC.""")
+ p.add_argument('-j', '--json_conf', help='Valid JSON configuration', default=sys.stdin)
+ p.set_defaults(func=load_subsystem_config)
+
+ # app
+ def spdk_kill_instance(args):
+ rpc.app.spdk_kill_instance(args.client,
+ sig_name=args.sig_name)
+
+ p = subparsers.add_parser('spdk_kill_instance', aliases=['kill_instance'],
+ help='Send signal to instance')
+ p.add_argument('sig_name', help='signal will be sent to server.')
+ p.set_defaults(func=spdk_kill_instance)
+
+ def framework_monitor_context_switch(args):
+ enabled = None
+ if args.enable:
+ enabled = True
+ if args.disable:
+ enabled = False
+ print_dict(rpc.app.framework_monitor_context_switch(args.client,
+ enabled=enabled))
+
+ p = subparsers.add_parser('framework_monitor_context_switch', aliases=['context_switch_monitor'],
+ help='Control whether the context switch monitor is enabled')
+ p.add_argument('-e', '--enable', action='store_true', help='Enable context switch monitoring')
+ p.add_argument('-d', '--disable', action='store_true', help='Disable context switch monitoring')
+ p.set_defaults(func=framework_monitor_context_switch)
+
+ def framework_get_reactors(args):
+ print_dict(rpc.app.framework_get_reactors(args.client))
+
+ p = subparsers.add_parser(
+ 'framework_get_reactors', help='Display list of all reactors')
+ p.set_defaults(func=framework_get_reactors)
+
+ # bdev
+ def bdev_set_options(args):
+ rpc.bdev.bdev_set_options(args.client,
+ bdev_io_pool_size=args.bdev_io_pool_size,
+ bdev_io_cache_size=args.bdev_io_cache_size,
+ bdev_auto_examine=args.bdev_auto_examine)
+
+ p = subparsers.add_parser('bdev_set_options', aliases=['set_bdev_options'],
+ help="""Set options of bdev subsystem""")
+ p.add_argument('-p', '--bdev-io-pool-size', help='Number of bdev_io structures in shared buffer pool', type=int)
+ p.add_argument('-c', '--bdev-io-cache-size', help='Maximum number of bdev_io structures cached per thread', type=int)
+ group = p.add_mutually_exclusive_group()
+ group.add_argument('-e', '--enable-auto-examine', dest='bdev_auto_examine', help='Allow to auto examine', action='store_true')
+ group.add_argument('-d', '--disable-auto-examine', dest='bdev_auto_examine', help='Not allow to auto examine', action='store_false')
+ p.set_defaults(bdev_auto_examine=True)
+ p.set_defaults(func=bdev_set_options)
+
+ def bdev_compress_create(args):
+ print_json(rpc.bdev.bdev_compress_create(args.client,
+ base_bdev_name=args.base_bdev_name,
+ pm_path=args.pm_path,
+ lb_size=args.lb_size))
+
+ p = subparsers.add_parser('bdev_compress_create', aliases=['construct_compress_bdev'],
+ help='Add a compress vbdev')
+ p.add_argument('-b', '--base_bdev_name', help="Name of the base bdev")
+ p.add_argument('-p', '--pm_path', help="Path to persistent memory")
+ p.add_argument('-l', '--lb_size', help="Compressed vol logical block size (optional, if used must be 512 or 4096)", type=int, default=0)
+ p.set_defaults(func=bdev_compress_create)
+
+ def bdev_compress_delete(args):
+ rpc.bdev.bdev_compress_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_compress_delete', aliases=['delete_compress_bdev'],
+ help='Delete a compress disk')
+ p.add_argument('name', help='compress bdev name')
+ p.set_defaults(func=bdev_compress_delete)
+
+ def compress_set_pmd(args):
+ rpc.bdev.compress_set_pmd(args.client,
+ pmd=args.pmd)
+ p = subparsers.add_parser('compress_set_pmd', aliases=['set_compress_pmd'],
+ help='Set pmd option for a compress disk')
+ p.add_argument('-p', '--pmd', type=int, help='0 = auto-select, 1= QAT only, 2 = ISAL only')
+ p.set_defaults(func=compress_set_pmd)
+
+ def bdev_compress_get_orphans(args):
+ print_dict(rpc.bdev.bdev_compress_get_orphans(args.client,
+ name=args.name))
+ p = subparsers.add_parser(
+ 'bdev_compress_get_orphans', help='Display list of orphaned compress bdevs.')
+ p.add_argument('-b', '--name', help="Name of a comp bdev. Example: COMP_Nvme0n1", required=False)
+ p.set_defaults(func=bdev_compress_get_orphans)
+
+ def bdev_crypto_create(args):
+ print_json(rpc.bdev.bdev_crypto_create(args.client,
+ base_bdev_name=args.base_bdev_name,
+ name=args.name,
+ crypto_pmd=args.crypto_pmd,
+ key=args.key,
+ cipher=args.cipher,
+ key2=args.key2))
+ p = subparsers.add_parser('bdev_crypto_create', aliases=['construct_crypto_bdev'],
+ help='Add a crypto vbdev')
+ p.add_argument('base_bdev_name', help="Name of the base bdev")
+ p.add_argument('name', help="Name of the crypto vbdev")
+ p.add_argument('crypto_pmd', help="Name of the crypto device driver")
+ p.add_argument('key', help="Key")
+ p.add_argument('-c', '--cipher', help="cipher to use, AES_CBC or AES_XTS (QAT only)", default="AES_CBC")
+ p.add_argument('-k2', '--key2', help="2nd key for cipher AET_XTS", default=None)
+ p.set_defaults(func=bdev_crypto_create)
+
+ def bdev_crypto_delete(args):
+ rpc.bdev.bdev_crypto_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_crypto_delete', aliases=['delete_crypto_bdev'],
+ help='Delete a crypto disk')
+ p.add_argument('name', help='crypto bdev name')
+ p.set_defaults(func=bdev_crypto_delete)
+
+ def bdev_ocf_create(args):
+ print_json(rpc.bdev.bdev_ocf_create(args.client,
+ name=args.name,
+ mode=args.mode,
+ cache_bdev_name=args.cache_bdev_name,
+ core_bdev_name=args.core_bdev_name))
+ p = subparsers.add_parser('bdev_ocf_create', aliases=['construct_ocf_bdev'],
+ help='Add an OCF block device')
+ p.add_argument('name', help='Name of resulting OCF bdev')
+ p.add_argument('mode', help='OCF cache mode', choices=['wb', 'wt', 'pt', 'wa', 'wi', 'wo'])
+ p.add_argument('cache_bdev_name', help='Name of underlying cache bdev')
+ p.add_argument('core_bdev_name', help='Name of unerlying core bdev')
+ p.set_defaults(func=bdev_ocf_create)
+
+ def bdev_ocf_delete(args):
+ rpc.bdev.bdev_ocf_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_ocf_delete', aliases=['delete_ocf_bdev'],
+ help='Delete an OCF block device')
+ p.add_argument('name', help='Name of OCF bdev')
+ p.set_defaults(func=bdev_ocf_delete)
+
+ def bdev_ocf_get_stats(args):
+ print_dict(rpc.bdev.bdev_ocf_get_stats(args.client,
+ name=args.name))
+ p = subparsers.add_parser('bdev_ocf_get_stats', aliases=['get_ocf_stats'],
+ help='Get statistics of chosen OCF block device')
+ p.add_argument('name', help='Name of OCF bdev')
+ p.set_defaults(func=bdev_ocf_get_stats)
+
+ def bdev_ocf_get_bdevs(args):
+ print_dict(rpc.bdev.bdev_ocf_get_bdevs(args.client,
+ name=args.name))
+ p = subparsers.add_parser('bdev_ocf_get_bdevs', aliases=['get_ocf_bdevs'],
+ help='Get list of OCF devices including unregistered ones')
+ p.add_argument('name', nargs='?', default=None, help='name of OCF vbdev or name of cache device or name of core device (optional)')
+ p.set_defaults(func=bdev_ocf_get_bdevs)
+
+ def bdev_malloc_create(args):
+ num_blocks = (args.total_size * 1024 * 1024) // args.block_size
+ print_json(rpc.bdev.bdev_malloc_create(args.client,
+ num_blocks=int(num_blocks),
+ block_size=args.block_size,
+ name=args.name,
+ uuid=args.uuid))
+ p = subparsers.add_parser('bdev_malloc_create', aliases=['construct_malloc_bdev'],
+ help='Create a bdev with malloc backend')
+ p.add_argument('-b', '--name', help="Name of the bdev")
+ p.add_argument('-u', '--uuid', help="UUID of the bdev")
+ p.add_argument(
+ 'total_size', help='Size of malloc bdev in MB (float > 0)', type=float)
+ p.add_argument('block_size', help='Block size for this bdev', type=int)
+ p.set_defaults(func=bdev_malloc_create)
+
+ def bdev_malloc_delete(args):
+ rpc.bdev.bdev_malloc_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_malloc_delete', aliases=['delete_malloc_bdev'],
+ help='Delete a malloc disk')
+ p.add_argument('name', help='malloc bdev name')
+ p.set_defaults(func=bdev_malloc_delete)
+
+ def bdev_null_create(args):
+ num_blocks = (args.total_size * 1024 * 1024) // args.block_size
+ print_json(rpc.bdev.bdev_null_create(args.client,
+ num_blocks=num_blocks,
+ block_size=args.block_size,
+ name=args.name,
+ uuid=args.uuid,
+ md_size=args.md_size,
+ dif_type=args.dif_type,
+ dif_is_head_of_md=args.dif_is_head_of_md))
+
+ p = subparsers.add_parser('bdev_null_create', aliases=['construct_null_bdev'],
+ help='Add a bdev with null backend')
+ p.add_argument('name', help='Block device name')
+ p.add_argument('-u', '--uuid', help='UUID of the bdev')
+ p.add_argument(
+ 'total_size', help='Size of null bdev in MB (int > 0)', type=int)
+ p.add_argument('block_size', help='Block size for this bdev', type=int)
+ p.add_argument('-m', '--md-size', type=int,
+ help='Metadata size for this bdev. Default 0')
+ p.add_argument('-t', '--dif-type', type=int, choices=[0, 1, 2, 3],
+ help='Protection information type. Default: 0 - no protection')
+ p.add_argument('-d', '--dif-is-head-of-md', action='store_true',
+ help='Protection information is in the first 8 bytes of metadata. Default: in the last 8 bytes')
+ p.set_defaults(func=bdev_null_create)
+
+ def bdev_null_delete(args):
+ rpc.bdev.bdev_null_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_null_delete', aliases=['delete_null_bdev'],
+ help='Delete a null bdev')
+ p.add_argument('name', help='null bdev name')
+ p.set_defaults(func=bdev_null_delete)
+
+ def bdev_aio_create(args):
+ print_json(rpc.bdev.bdev_aio_create(args.client,
+ filename=args.filename,
+ name=args.name,
+ block_size=args.block_size))
+
+ p = subparsers.add_parser('bdev_aio_create', aliases=['construct_aio_bdev'],
+ help='Add a bdev with aio backend')
+ p.add_argument('filename', help='Path to device or file (ex: /dev/sda)')
+ p.add_argument('name', help='Block device name')
+ p.add_argument('block_size', help='Block size for this bdev', type=int, nargs='?', default=0)
+ p.set_defaults(func=bdev_aio_create)
+
+ def bdev_aio_delete(args):
+ rpc.bdev.bdev_aio_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_aio_delete', aliases=['delete_aio_bdev'],
+ help='Delete an aio disk')
+ p.add_argument('name', help='aio bdev name')
+ p.set_defaults(func=bdev_aio_delete)
+
+ def bdev_uring_create(args):
+ print_json(rpc.bdev.bdev_uring_create(args.client,
+ filename=args.filename,
+ name=args.name,
+ block_size=args.block_size))
+
+ p = subparsers.add_parser('bdev_uring_create', help='Create a bdev with io_uring backend')
+ p.add_argument('filename', help='Path to device or file (ex: /dev/nvme0n1)')
+ p.add_argument('name', help='bdev name')
+ p.add_argument('block_size', help='Block size for this bdev', type=int, nargs='?', default=0)
+ p.set_defaults(func=bdev_uring_create)
+
+ def bdev_uring_delete(args):
+ rpc.bdev.bdev_uring_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_uring_delete', help='Delete a uring bdev')
+ p.add_argument('name', help='uring bdev name')
+ p.set_defaults(func=bdev_uring_delete)
+
+ def bdev_nvme_set_options(args):
+ rpc.bdev.bdev_nvme_set_options(args.client,
+ action_on_timeout=args.action_on_timeout,
+ timeout_us=args.timeout_us,
+ retry_count=args.retry_count,
+ arbitration_burst=args.arbitration_burst,
+ low_priority_weight=args.low_priority_weight,
+ medium_priority_weight=args.medium_priority_weight,
+ high_priority_weight=args.high_priority_weight,
+ nvme_adminq_poll_period_us=args.nvme_adminq_poll_period_us,
+ nvme_ioq_poll_period_us=args.nvme_ioq_poll_period_us,
+ io_queue_requests=args.io_queue_requests,
+ delay_cmd_submit=args.delay_cmd_submit)
+
+ p = subparsers.add_parser('bdev_nvme_set_options', aliases=['set_bdev_nvme_options'],
+ help='Set options for the bdev nvme type. This is startup command.')
+ p.add_argument('-a', '--action-on-timeout',
+ help="Action to take on command time out. Valid valies are: none, reset, abort")
+ p.add_argument('-t', '--timeout-us',
+ help="Timeout for each command, in microseconds. If 0, don't track timeouts.", type=int)
+ p.add_argument('-n', '--retry-count',
+ help='the number of attempts per I/O when an I/O fails', type=int)
+ p.add_argument('--arbitration-burst',
+ help='the value is expressed as a power of two', type=int)
+ p.add_argument('--low-priority-weight',
+ help='the maximum number of commands that the controller may launch at one time from a low priority queue', type=int)
+ p.add_argument('--medium-priority-weight',
+ help='the maximum number of commands that the controller may launch at one time from a medium priority queue', type=int)
+ p.add_argument('--high-priority-weight',
+ help='the maximum number of commands that the controller may launch at one time from a high priority queue', type=int)
+ p.add_argument('-p', '--nvme-adminq-poll-period-us',
+ help='How often the admin queue is polled for asynchronous events', type=int)
+ p.add_argument('-i', '--nvme-ioq-poll-period-us',
+ help='How often to poll I/O queues for completions', type=int)
+ p.add_argument('-s', '--io-queue-requests',
+ help='The number of requests allocated for each NVMe I/O queue. Default: 512', type=int)
+ p.add_argument('-d', '--disable-delay-cmd-submit',
+ help='Disable delaying NVMe command submission, i.e. no batching of multiple commands',
+ action='store_false', dest='delay_cmd_submit', default=True)
+ p.set_defaults(func=bdev_nvme_set_options)
+
+ def bdev_nvme_set_hotplug(args):
+ rpc.bdev.bdev_nvme_set_hotplug(args.client, enable=args.enable, period_us=args.period_us)
+
+ p = subparsers.add_parser('bdev_nvme_set_hotplug', aliases=['set_bdev_nvme_hotplug'],
+ help='Set hotplug options for bdev nvme type.')
+ p.add_argument('-d', '--disable', dest='enable', default=False, action='store_false', help="Disable hotplug (default)")
+ p.add_argument('-e', '--enable', dest='enable', action='store_true', help="Enable hotplug")
+ p.add_argument('-r', '--period-us',
+ help='How often the hotplug is processed for insert and remove events', type=int)
+ p.set_defaults(func=bdev_nvme_set_hotplug)
+
+ def bdev_nvme_attach_controller(args):
+ print_array(rpc.bdev.bdev_nvme_attach_controller(args.client,
+ name=args.name,
+ trtype=args.trtype,
+ traddr=args.traddr,
+ adrfam=args.adrfam,
+ trsvcid=args.trsvcid,
+ priority=args.priority,
+ subnqn=args.subnqn,
+ hostnqn=args.hostnqn,
+ hostaddr=args.hostaddr,
+ hostsvcid=args.hostsvcid,
+ prchk_reftag=args.prchk_reftag,
+ prchk_guard=args.prchk_guard))
+
+ p = subparsers.add_parser('bdev_nvme_attach_controller', aliases=['construct_nvme_bdev'],
+ help='Add bdevs with nvme backend')
+ p.add_argument('-b', '--name', help="Name of the NVMe controller, prefix for each bdev name", required=True)
+ p.add_argument('-t', '--trtype',
+ help='NVMe-oF target trtype: e.g., rdma, pcie', required=True)
+ p.add_argument('-a', '--traddr',
+ help='NVMe-oF target address: e.g., an ip address or BDF', required=True)
+ p.add_argument('-f', '--adrfam',
+ help='NVMe-oF target adrfam: e.g., ipv4, ipv6, ib, fc, intra_host')
+ p.add_argument('-s', '--trsvcid',
+ help='NVMe-oF target trsvcid: e.g., a port number')
+ p.add_argument('-p', '--priority',
+ help='NVMe-oF connection priority: e.g., a priority number')
+ p.add_argument('-n', '--subnqn', help='NVMe-oF target subnqn')
+ p.add_argument('-q', '--hostnqn', help='NVMe-oF host subnqn')
+ p.add_argument('-i', '--hostaddr',
+ help='NVMe-oF host address: e.g., an ip address')
+ p.add_argument('-c', '--hostsvcid',
+ help='NVMe-oF host svcid: e.g., a port number')
+ p.add_argument('-r', '--prchk-reftag',
+ help='Enable checking of PI reference tag for I/O processing.', action='store_true')
+ p.add_argument('-g', '--prchk-guard',
+ help='Enable checking of PI guard for I/O processing.', action='store_true')
+ p.set_defaults(func=bdev_nvme_attach_controller)
+
+ def bdev_nvme_get_controllers(args):
+ print_dict(rpc.nvme.bdev_nvme_get_controllers(args.client,
+ name=args.name))
+
+ p = subparsers.add_parser(
+ 'bdev_nvme_get_controllers', aliases=['get_nvme_controllers'],
+ help='Display current NVMe controllers list or required NVMe controller')
+ p.add_argument('-n', '--name', help="Name of the NVMe controller. Example: Nvme0", required=False)
+ p.set_defaults(func=bdev_nvme_get_controllers)
+
+ def bdev_nvme_detach_controller(args):
+ rpc.bdev.bdev_nvme_detach_controller(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_nvme_detach_controller', aliases=['delete_nvme_controller'],
+ help='Detach an NVMe controller and delete any associated bdevs')
+ p.add_argument('name', help="Name of the controller")
+ p.set_defaults(func=bdev_nvme_detach_controller)
+
+ def bdev_nvme_cuse_register(args):
+ rpc.bdev.bdev_nvme_cuse_register(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_nvme_cuse_register',
+ help='Register CUSE devices on NVMe controller')
+ p.add_argument('-n', '--name',
+ help='Name of the NVMe controller. Example: Nvme0', required=True)
+ p.set_defaults(func=bdev_nvme_cuse_register)
+
+ def bdev_nvme_cuse_unregister(args):
+ rpc.bdev.bdev_nvme_cuse_unregister(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_nvme_cuse_unregister',
+ help='Unregister CUSE devices on NVMe controller')
+ p.add_argument('-n', '--name',
+ help='Name of the NVMe controller. Example: Nvme0', required=True)
+ p.set_defaults(func=bdev_nvme_cuse_unregister)
+
+ def bdev_zone_block_create(args):
+ print_json(rpc.bdev.bdev_zone_block_create(args.client,
+ name=args.name,
+ base_bdev=args.base_bdev,
+ zone_capacity=args.zone_capacity,
+ optimal_open_zones=args.optimal_open_zones))
+
+ p = subparsers.add_parser('bdev_zone_block_create',
+ help='Create virtual zone namespace device with block device backend')
+ p.add_argument('-b', '--name', help="Name of the zone device", required=True)
+ p.add_argument('-n', '--base-bdev', help='Name of underlying, non-zoned bdev', required=True)
+ p.add_argument('-z', '--zone-capacity', help='Surfaced zone capacity in blocks', type=int, required=True)
+ p.add_argument('-o', '--optimal-open-zones', help='Number of zones required to reach optimal write speed', type=int, required=True)
+ p.set_defaults(func=bdev_zone_block_create)
+
+ def bdev_zone_block_delete(args):
+ rpc.bdev.bdev_zone_block_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_zone_block_delete', help='Delete a virtual zone namespace device')
+ p.add_argument('name', help='Virtual zone bdev name')
+ p.set_defaults(func=bdev_zone_block_delete)
+
+ def bdev_rbd_create(args):
+ config = None
+ if args.config:
+ config = {}
+ for entry in args.config:
+ parts = entry.split('=', 1)
+ if len(parts) != 2:
+ raise Exception('--config %s not in key=value form' % entry)
+ config[parts[0]] = parts[1]
+ print_json(rpc.bdev.bdev_rbd_create(args.client,
+ name=args.name,
+ user=args.user,
+ config=config,
+ pool_name=args.pool_name,
+ rbd_name=args.rbd_name,
+ block_size=args.block_size))
+
+ p = subparsers.add_parser('bdev_rbd_create', aliases=['construct_rbd_bdev'],
+ help='Add a bdev with ceph rbd backend')
+ p.add_argument('-b', '--name', help="Name of the bdev", required=False)
+ p.add_argument('--user', help="Ceph user name (i.e. admin, not client.admin)", required=False)
+ p.add_argument('--config', action='append', metavar='key=value',
+ help="adds a key=value configuration option for rados_conf_set (default: rely on config file)")
+ p.add_argument('pool_name', help='rbd pool name')
+ p.add_argument('rbd_name', help='rbd image name')
+ p.add_argument('block_size', help='rbd block size', type=int)
+ p.set_defaults(func=bdev_rbd_create)
+
+ def bdev_rbd_delete(args):
+ rpc.bdev.bdev_rbd_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_rbd_delete', aliases=['delete_rbd_bdev'],
+ help='Delete a rbd bdev')
+ p.add_argument('name', help='rbd bdev name')
+ p.set_defaults(func=bdev_rbd_delete)
+
+ def bdev_rbd_resize(args):
+ print_json(rpc.bdev.bdev_rbd_resize(args.client,
+ name=args.name,
+ new_size=int(args.new_size)))
+ rpc.bdev.bdev_rbd_resize(args.client,
+ name=args.name,
+ new_size=int(args.new_size))
+
+ p = subparsers.add_parser('bdev_rbd_resize',
+ help='Resize a rbd bdev')
+ p.add_argument('name', help='rbd bdev name')
+ p.add_argument('new_size', help='new bdev size for resize operation. The unit is MiB')
+ p.set_defaults(func=bdev_rbd_resize)
+
+ def bdev_delay_create(args):
+ print_json(rpc.bdev.bdev_delay_create(args.client,
+ base_bdev_name=args.base_bdev_name,
+ name=args.name,
+ avg_read_latency=args.avg_read_latency,
+ p99_read_latency=args.nine_nine_read_latency,
+ avg_write_latency=args.avg_write_latency,
+ p99_write_latency=args.nine_nine_write_latency))
+
+ p = subparsers.add_parser('bdev_delay_create',
+ help='Add a delay bdev on existing bdev')
+ p.add_argument('-b', '--base-bdev-name', help="Name of the existing bdev", required=True)
+ p.add_argument('-d', '--name', help="Name of the delay bdev", required=True)
+ p.add_argument('-r', '--avg-read-latency',
+ help="Average latency to apply before completing read ops (in microseconds)", required=True, type=int)
+ p.add_argument('-t', '--nine-nine-read-latency',
+ help="latency to apply to 1 in 100 read ops (in microseconds)", required=True, type=int)
+ p.add_argument('-w', '--avg-write-latency',
+ help="Average latency to apply before completing write ops (in microseconds)", required=True, type=int)
+ p.add_argument('-n', '--nine-nine-write-latency',
+ help="latency to apply to 1 in 100 write ops (in microseconds)", required=True, type=int)
+ p.set_defaults(func=bdev_delay_create)
+
+ def bdev_delay_delete(args):
+ rpc.bdev.bdev_delay_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_delay_delete', help='Delete a delay bdev')
+ p.add_argument('name', help='delay bdev name')
+ p.set_defaults(func=bdev_delay_delete)
+
+ def bdev_delay_update_latency(args):
+ print_json(rpc.bdev.bdev_delay_update_latency(args.client,
+ delay_bdev_name=args.delay_bdev_name,
+ latency_type=args.latency_type,
+ latency_us=args.latency_us))
+ p = subparsers.add_parser('bdev_delay_update_latency',
+ help='Update one of the latency values for a given delay bdev')
+ p.add_argument('delay_bdev_name', help='The name of the given delay bdev')
+ p.add_argument('latency_type', help='one of: avg_read, avg_write, p99_read, p99_write. No other values accepted.')
+ p.add_argument('latency_us', help='new latency value in microseconds.', type=int)
+ p.set_defaults(func=bdev_delay_update_latency)
+
+ def bdev_error_create(args):
+ print_json(rpc.bdev.bdev_error_create(args.client,
+ base_name=args.base_name))
+
+ p = subparsers.add_parser('bdev_error_create', aliases=['construct_error_bdev'],
+ help='Add bdev with error injection backend')
+ p.add_argument('base_name', help='base bdev name')
+ p.set_defaults(func=bdev_error_create)
+
+ def bdev_error_delete(args):
+ rpc.bdev.bdev_error_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_error_delete', aliases=['delete_error_bdev'],
+ help='Delete an error bdev')
+ p.add_argument('name', help='error bdev name')
+ p.set_defaults(func=bdev_error_delete)
+
+ def bdev_iscsi_create(args):
+ print_json(rpc.bdev.bdev_iscsi_create(args.client,
+ name=args.name,
+ url=args.url,
+ initiator_iqn=args.initiator_iqn))
+
+ p = subparsers.add_parser('bdev_iscsi_create', aliases=['construct_iscsi_bdev'],
+ help='Add bdev with iSCSI initiator backend')
+ p.add_argument('-b', '--name', help="Name of the bdev", required=True)
+ p.add_argument('-i', '--initiator-iqn', help="Initiator IQN", required=True)
+ p.add_argument('--url', help="iSCSI Lun URL", required=True)
+ p.set_defaults(func=bdev_iscsi_create)
+
+ def bdev_iscsi_delete(args):
+ rpc.bdev.bdev_iscsi_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_iscsi_delete', aliases=['delete_iscsi_bdev'],
+ help='Delete an iSCSI bdev')
+ p.add_argument('name', help='iSCSI bdev name')
+ p.set_defaults(func=bdev_iscsi_delete)
+
+ def bdev_pmem_create(args):
+ print_json(rpc.bdev.bdev_pmem_create(args.client,
+ pmem_file=args.pmem_file,
+ name=args.name))
+
+ p = subparsers.add_parser('bdev_pmem_create', aliases=['construct_pmem_bdev'],
+ help='Add a bdev with pmem backend')
+ p.add_argument('pmem_file', help='Path to pmemblk pool file')
+ p.add_argument('-n', '--name', help='Block device name', required=True)
+ p.set_defaults(func=bdev_pmem_create)
+
+ def bdev_pmem_delete(args):
+ rpc.bdev.bdev_pmem_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_pmem_delete', aliases=['delete_pmem_bdev'],
+ help='Delete a pmem bdev')
+ p.add_argument('name', help='pmem bdev name')
+ p.set_defaults(func=bdev_pmem_delete)
+
+ def bdev_passthru_create(args):
+ print_json(rpc.bdev.bdev_passthru_create(args.client,
+ base_bdev_name=args.base_bdev_name,
+ name=args.name))
+
+ p = subparsers.add_parser('bdev_passthru_create', aliases=['construct_passthru_bdev'],
+ help='Add a pass through bdev on existing bdev')
+ p.add_argument('-b', '--base-bdev-name', help="Name of the existing bdev", required=True)
+ p.add_argument('-p', '--name', help="Name of the pass through bdev", required=True)
+ p.set_defaults(func=bdev_passthru_create)
+
+ def bdev_passthru_delete(args):
+ rpc.bdev.bdev_passthru_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_passthru_delete', aliases=['delete_passthru_bdev'],
+ help='Delete a pass through bdev')
+ p.add_argument('name', help='pass through bdev name')
+ p.set_defaults(func=bdev_passthru_delete)
+
+ def bdev_get_bdevs(args):
+ print_dict(rpc.bdev.bdev_get_bdevs(args.client,
+ name=args.name))
+
+ p = subparsers.add_parser('bdev_get_bdevs', aliases=['get_bdevs'],
+ help='Display current blockdev list or required blockdev')
+ p.add_argument('-b', '--name', help="Name of the Blockdev. Example: Nvme0n1", required=False)
+ p.set_defaults(func=bdev_get_bdevs)
+
+ def bdev_get_iostat(args):
+ print_dict(rpc.bdev.bdev_get_iostat(args.client,
+ name=args.name))
+
+ p = subparsers.add_parser('bdev_get_iostat', aliases=['get_bdevs_iostat'],
+ help='Display current I/O statistics of all the blockdevs or required blockdev.')
+ p.add_argument('-b', '--name', help="Name of the Blockdev. Example: Nvme0n1", required=False)
+ p.set_defaults(func=bdev_get_iostat)
+
+ def bdev_enable_histogram(args):
+ rpc.bdev.bdev_enable_histogram(args.client, name=args.name, enable=args.enable)
+
+ p = subparsers.add_parser('bdev_enable_histogram', aliases=['enable_bdev_histogram'],
+ help='Enable or disable histogram for specified bdev')
+ p.add_argument('-e', '--enable', default=True, dest='enable', action='store_true', help='Enable histograms on specified device')
+ p.add_argument('-d', '--disable', dest='enable', action='store_false', help='Disable histograms on specified device')
+ p.add_argument('name', help='bdev name')
+ p.set_defaults(func=bdev_enable_histogram)
+
+ def bdev_get_histogram(args):
+ print_dict(rpc.bdev.bdev_get_histogram(args.client, name=args.name))
+
+ p = subparsers.add_parser('bdev_get_histogram', aliases=['get_bdev_histogram'],
+ help='Get histogram for specified bdev')
+ p.add_argument('name', help='bdev name')
+ p.set_defaults(func=bdev_get_histogram)
+
+ def bdev_set_qd_sampling_period(args):
+ rpc.bdev.bdev_set_qd_sampling_period(args.client,
+ name=args.name,
+ period=args.period)
+
+ p = subparsers.add_parser('bdev_set_qd_sampling_period', aliases=['set_bdev_qd_sampling_period'],
+ help="Enable or disable tracking of a bdev's queue depth.")
+ p.add_argument('name', help='Blockdev name. Example: Malloc0')
+ p.add_argument('period', help='Period with which to poll the block device queue depth in microseconds.'
+ ' If set to 0, polling will be disabled.',
+ type=int)
+ p.set_defaults(func=bdev_set_qd_sampling_period)
+
+ def bdev_set_qos_limit(args):
+ rpc.bdev.bdev_set_qos_limit(args.client,
+ name=args.name,
+ rw_ios_per_sec=args.rw_ios_per_sec,
+ rw_mbytes_per_sec=args.rw_mbytes_per_sec,
+ r_mbytes_per_sec=args.r_mbytes_per_sec,
+ w_mbytes_per_sec=args.w_mbytes_per_sec)
+
+ p = subparsers.add_parser('bdev_set_qos_limit', aliases=['set_bdev_qos_limit'],
+ help='Set QoS rate limit on a blockdev')
+ p.add_argument('name', help='Blockdev name to set QoS. Example: Malloc0')
+ p.add_argument('--rw_ios_per_sec',
+ help='R/W IOs per second limit (>=10000, example: 20000). 0 means unlimited.',
+ type=int, required=False)
+ p.add_argument('--rw_mbytes_per_sec',
+ help="R/W megabytes per second limit (>=10, example: 100). 0 means unlimited.",
+ type=int, required=False)
+ p.add_argument('--r_mbytes_per_sec',
+ help="Read megabytes per second limit (>=10, example: 100). 0 means unlimited.",
+ type=int, required=False)
+ p.add_argument('--w_mbytes_per_sec',
+ help="Write megabytes per second limit (>=10, example: 100). 0 means unlimited.",
+ type=int, required=False)
+ p.set_defaults(func=bdev_set_qos_limit)
+
+ def bdev_error_inject_error(args):
+ rpc.bdev.bdev_error_inject_error(args.client,
+ name=args.name,
+ io_type=args.io_type,
+ error_type=args.error_type,
+ num=args.num)
+
+ p = subparsers.add_parser('bdev_error_inject_error', aliases=['bdev_inject_error'],
+ help='bdev inject error')
+ p.add_argument('name', help="""the name of the error injection bdev""")
+ p.add_argument('io_type', help="""io_type: 'clear' 'read' 'write' 'unmap' 'flush' 'all'""")
+ p.add_argument('error_type', help="""error_type: 'failure' 'pending'""")
+ p.add_argument(
+ '-n', '--num', help='the number of commands you want to fail', type=int, default=1)
+ p.set_defaults(func=bdev_error_inject_error)
+
+ def bdev_nvme_apply_firmware(args):
+ print_dict(rpc.bdev.bdev_nvme_apply_firmware(args.client,
+ bdev_name=args.bdev_name,
+ filename=args.filename))
+
+ p = subparsers.add_parser('bdev_nvme_apply_firmware', aliases=['apply_firmware'],
+ help='Download and commit firmware to NVMe device')
+ p.add_argument('filename', help='filename of the firmware to download')
+ p.add_argument('bdev_name', help='name of the NVMe device')
+ p.set_defaults(func=bdev_nvme_apply_firmware)
+
+ # iSCSI
+ def iscsi_set_options(args):
+ rpc.iscsi.iscsi_set_options(
+ args.client,
+ auth_file=args.auth_file,
+ node_base=args.node_base,
+ nop_timeout=args.nop_timeout,
+ nop_in_interval=args.nop_in_interval,
+ disable_chap=args.disable_chap,
+ require_chap=args.require_chap,
+ mutual_chap=args.mutual_chap,
+ chap_group=args.chap_group,
+ max_sessions=args.max_sessions,
+ max_queue_depth=args.max_queue_depth,
+ max_connections_per_session=args.max_connections_per_session,
+ default_time2wait=args.default_time2wait,
+ default_time2retain=args.default_time2retain,
+ first_burst_length=args.first_burst_length,
+ immediate_data=args.immediate_data,
+ error_recovery_level=args.error_recovery_level,
+ allow_duplicated_isid=args.allow_duplicated_isid)
+
+ p = subparsers.add_parser('iscsi_set_options', aliases=['set_iscsi_options'],
+ help="""Set options of iSCSI subsystem""")
+ p.add_argument('-f', '--auth-file', help='Path to CHAP shared secret file')
+ p.add_argument('-b', '--node-base', help='Prefix of the name of iSCSI target node')
+ p.add_argument('-o', '--nop-timeout', help='Timeout in seconds to nop-in request to the initiator', type=int)
+ p.add_argument('-n', '--nop-in-interval', help='Time interval in secs between nop-in requests by the target', type=int)
+ p.add_argument('-d', '--disable-chap', help="""CHAP for discovery session should be disabled.
+ *** Mutually exclusive with --require-chap""", action='store_true')
+ p.add_argument('-r', '--require-chap', help="""CHAP for discovery session should be required.
+ *** Mutually exclusive with --disable-chap""", action='store_true')
+ p.add_argument('-m', '--mutual-chap', help='CHAP for discovery session should be mutual', action='store_true')
+ p.add_argument('-g', '--chap-group', help="""Authentication group ID for discovery session.
+ *** Authentication group must be precreated ***""", type=int)
+ p.add_argument('-a', '--max-sessions', help='Maximum number of sessions in the host.', type=int)
+ p.add_argument('-q', '--max-queue-depth', help='Max number of outstanding I/Os per queue.', type=int)
+ p.add_argument('-c', '--max-connections-per-session', help='Negotiated parameter, MaxConnections.', type=int)
+ p.add_argument('-w', '--default-time2wait', help='Negotiated parameter, DefaultTime2Wait.', type=int)
+ p.add_argument('-v', '--default-time2retain', help='Negotiated parameter, DefaultTime2Retain.', type=int)
+ p.add_argument('-s', '--first-burst-length', help='Negotiated parameter, FirstBurstLength.', type=int)
+ p.add_argument('-i', '--immediate-data', help='Negotiated parameter, ImmediateData.', action='store_true')
+ p.add_argument('-l', '--error-recovery-level', help='Negotiated parameter, ErrorRecoveryLevel', type=int)
+ p.add_argument('-p', '--allow-duplicated-isid', help='Allow duplicated initiator session ID.', action='store_true')
+ p.set_defaults(func=iscsi_set_options)
+
+ def iscsi_set_discovery_auth(args):
+ rpc.iscsi.iscsi_set_discovery_auth(
+ args.client,
+ disable_chap=args.disable_chap,
+ require_chap=args.require_chap,
+ mutual_chap=args.mutual_chap,
+ chap_group=args.chap_group)
+
+ p = subparsers.add_parser('iscsi_set_discovery_auth', aliases=['set_iscsi_discovery_auth'],
+ help="""Set CHAP authentication for discovery session.""")
+ p.add_argument('-d', '--disable-chap', help="""CHAP for discovery session should be disabled.
+ *** Mutually exclusive with --require-chap""", action='store_true')
+ p.add_argument('-r', '--require-chap', help="""CHAP for discovery session should be required.
+ *** Mutually exclusive with --disable-chap""", action='store_true')
+ p.add_argument('-m', '--mutual-chap', help='CHAP for discovery session should be mutual', action='store_true')
+ p.add_argument('-g', '--chap-group', help="""Authentication group ID for discovery session.
+ *** Authentication group must be precreated ***""", type=int)
+ p.set_defaults(func=iscsi_set_discovery_auth)
+
+ def iscsi_create_auth_group(args):
+ secrets = None
+ if args.secrets:
+ secrets = [dict(u.split(":") for u in a.split(" ")) for a in args.secrets.split(",")]
+
+ rpc.iscsi.iscsi_create_auth_group(args.client, tag=args.tag, secrets=secrets)
+
+ p = subparsers.add_parser('iscsi_create_auth_group', aliases=['add_iscsi_auth_group'],
+ help='Create authentication group for CHAP authentication.')
+ p.add_argument('tag', help='Authentication group tag (unique, integer > 0).', type=int)
+ p.add_argument('-c', '--secrets', help="""Comma-separated list of CHAP secrets
+<user:user_name secret:chap_secret muser:mutual_user_name msecret:mutual_chap_secret> enclosed in quotes.
+Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 msecret:ms2'""", required=False)
+ p.set_defaults(func=iscsi_create_auth_group)
+
+ def iscsi_delete_auth_group(args):
+ rpc.iscsi.iscsi_delete_auth_group(args.client, tag=args.tag)
+
+ p = subparsers.add_parser('iscsi_delete_auth_group', aliases=['delete_iscsi_auth_group'],
+ help='Delete an authentication group.')
+ p.add_argument('tag', help='Authentication group tag', type=int)
+ p.set_defaults(func=iscsi_delete_auth_group)
+
+ def iscsi_auth_group_add_secret(args):
+ rpc.iscsi.iscsi_auth_group_add_secret(
+ args.client,
+ tag=args.tag,
+ user=args.user,
+ secret=args.secret,
+ muser=args.muser,
+ msecret=args.msecret)
+
+ p = subparsers.add_parser('iscsi_auth_group_add_secret', aliases=['add_secret_to_iscsi_auth_group'],
+ help='Add a secret to an authentication group.')
+ p.add_argument('tag', help='Authentication group tag', type=int)
+ p.add_argument('-u', '--user', help='User name for one-way CHAP authentication', required=True)
+ p.add_argument('-s', '--secret', help='Secret for one-way CHAP authentication', required=True)
+ p.add_argument('-m', '--muser', help='User name for mutual CHAP authentication')
+ p.add_argument('-r', '--msecret', help='Secret for mutual CHAP authentication')
+ p.set_defaults(func=iscsi_auth_group_add_secret)
+
+ def iscsi_auth_group_remove_secret(args):
+ rpc.iscsi.iscsi_auth_group_remove_secret(args.client, tag=args.tag, user=args.user)
+
+ p = subparsers.add_parser('iscsi_auth_group_remove_secret', aliases=['delete_secret_from_iscsi_auth_group'],
+ help='Remove a secret from an authentication group.')
+ p.add_argument('tag', help='Authentication group tag', type=int)
+ p.add_argument('-u', '--user', help='User name for one-way CHAP authentication', required=True)
+ p.set_defaults(func=iscsi_auth_group_remove_secret)
+
+ def iscsi_get_auth_groups(args):
+ print_dict(rpc.iscsi.iscsi_get_auth_groups(args.client))
+
+ p = subparsers.add_parser('iscsi_get_auth_groups', aliases=['get_iscsi_auth_groups'],
+ help='Display current authentication group configuration')
+ p.set_defaults(func=iscsi_get_auth_groups)
+
+ def iscsi_get_portal_groups(args):
+ print_dict(rpc.iscsi.iscsi_get_portal_groups(args.client))
+
+ p = subparsers.add_parser(
+ 'iscsi_get_portal_groups', aliases=['get_portal_groups'],
+ help='Display current portal group configuration')
+ p.set_defaults(func=iscsi_get_portal_groups)
+
+ def iscsi_get_initiator_groups(args):
+ print_dict(rpc.iscsi.iscsi_get_initiator_groups(args.client))
+
+ p = subparsers.add_parser('iscsi_get_initiator_groups',
+ aliases=['get_initiator_groups'],
+ help='Display current initiator group configuration')
+ p.set_defaults(func=iscsi_get_initiator_groups)
+
+ def iscsi_get_target_nodes(args):
+ print_dict(rpc.iscsi.iscsi_get_target_nodes(args.client))
+
+ p = subparsers.add_parser('iscsi_get_target_nodes', aliases=['get_target_nodes'],
+ help='Display target nodes')
+ p.set_defaults(func=iscsi_get_target_nodes)
+
+ def iscsi_create_target_node(args):
+ luns = []
+ for u in args.bdev_name_id_pairs.strip().split(" "):
+ bdev_name, lun_id = u.split(":")
+ luns.append({"bdev_name": bdev_name, "lun_id": int(lun_id)})
+
+ pg_ig_maps = []
+ for u in args.pg_ig_mappings.strip().split(" "):
+ pg, ig = u.split(":")
+ pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
+
+ rpc.iscsi.iscsi_create_target_node(
+ args.client,
+ luns=luns,
+ pg_ig_maps=pg_ig_maps,
+ name=args.name,
+ alias_name=args.alias_name,
+ queue_depth=args.queue_depth,
+ chap_group=args.chap_group,
+ disable_chap=args.disable_chap,
+ require_chap=args.require_chap,
+ mutual_chap=args.mutual_chap,
+ header_digest=args.header_digest,
+ data_digest=args.data_digest)
+
+ p = subparsers.add_parser('iscsi_create_target_node', aliases=['construct_target_node'],
+ help='Add a target node')
+ p.add_argument('name', help='Target node name (ASCII)')
+ p.add_argument('alias_name', help='Target node alias name (ASCII)')
+ p.add_argument('bdev_name_id_pairs', help="""Whitespace-separated list of <bdev name:LUN ID> pairs enclosed
+ in quotes. Format: 'bdev_name0:id0 bdev_name1:id1' etc
+ Example: 'Malloc0:0 Malloc1:1 Malloc5:2'
+ *** The bdevs must pre-exist ***
+ *** LUN0 (id = 0) is required ***
+ *** bdevs names cannot contain space or colon characters ***""")
+ p.add_argument('pg_ig_mappings', help="""List of (Portal_Group_Tag:Initiator_Group_Tag) mappings
+ Whitespace separated, quoted, mapping defined with colon
+ separated list of "tags" (int > 0)
+ Example: '1:1 2:2 2:1'
+ *** The Portal/Initiator Groups must be precreated ***""")
+ p.add_argument('queue_depth', help='Desired target queue depth', type=int)
+ p.add_argument('-g', '--chap-group', help="""Authentication group ID for this target node.
+ *** Authentication group must be precreated ***""", type=int, default=0)
+ p.add_argument('-d', '--disable-chap', help="""CHAP authentication should be disabled for this target node.
+ *** Mutually exclusive with --require-chap ***""", action='store_true')
+ p.add_argument('-r', '--require-chap', help="""CHAP authentication should be required for this target node.
+ *** Mutually exclusive with --disable-chap ***""", action='store_true')
+ p.add_argument(
+ '-m', '--mutual-chap', help='CHAP authentication should be mutual/bidirectional.', action='store_true')
+ p.add_argument('-H', '--header-digest',
+ help='Header Digest should be required for this target node.', action='store_true')
+ p.add_argument('-D', '--data-digest',
+ help='Data Digest should be required for this target node.', action='store_true')
+ p.set_defaults(func=iscsi_create_target_node)
+
+ def iscsi_target_node_add_lun(args):
+ rpc.iscsi.iscsi_target_node_add_lun(
+ args.client,
+ name=args.name,
+ bdev_name=args.bdev_name,
+ lun_id=args.lun_id)
+
+ p = subparsers.add_parser('iscsi_target_node_add_lun', aliases=['target_node_add_lun'],
+ help='Add LUN to the target node')
+ p.add_argument('name', help='Target node name (ASCII)')
+ p.add_argument('bdev_name', help="""bdev name enclosed in quotes.
+ *** bdev name cannot contain space or colon characters ***""")
+ p.add_argument('-i', dest='lun_id', help="""LUN ID (integer >= 0)
+ *** If LUN ID is omitted or -1, the lowest free one is assigned ***""", type=int, required=False)
+ p.set_defaults(func=iscsi_target_node_add_lun)
+
+ def iscsi_target_node_set_auth(args):
+ rpc.iscsi.iscsi_target_node_set_auth(
+ args.client,
+ name=args.name,
+ chap_group=args.chap_group,
+ disable_chap=args.disable_chap,
+ require_chap=args.require_chap,
+ mutual_chap=args.mutual_chap)
+
+ p = subparsers.add_parser('iscsi_target_node_set_auth', aliases=['set_iscsi_target_node_auth'],
+ help='Set CHAP authentication for the target node')
+ p.add_argument('name', help='Target node name (ASCII)')
+ p.add_argument('-g', '--chap-group', help="""Authentication group ID for this target node.
+ *** Authentication group must be precreated ***""", type=int, default=0)
+ p.add_argument('-d', '--disable-chap', help="""CHAP authentication should be disabled for this target node.
+ *** Mutually exclusive with --require-chap ***""", action='store_true')
+ p.add_argument('-r', '--require-chap', help="""CHAP authentication should be required for this target node.
+ *** Mutually exclusive with --disable-chap ***""", action='store_true')
+ p.add_argument('-m', '--mutual-chap', help='CHAP authentication should be mutual/bidirectional.',
+ action='store_true')
+ p.set_defaults(func=iscsi_target_node_set_auth)
+
+ def iscsi_target_node_add_pg_ig_maps(args):
+ pg_ig_maps = []
+ for u in args.pg_ig_mappings.strip().split(" "):
+ pg, ig = u.split(":")
+ pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
+ rpc.iscsi.iscsi_target_node_add_pg_ig_maps(
+ args.client,
+ pg_ig_maps=pg_ig_maps,
+ name=args.name)
+
+ p = subparsers.add_parser('iscsi_target_node_add_pg_ig_maps',
+ aliases=['add_pg_ig_maps'],
+ help='Add PG-IG maps to the target node')
+ p.add_argument('name', help='Target node name (ASCII)')
+ p.add_argument('pg_ig_mappings', help="""List of (Portal_Group_Tag:Initiator_Group_Tag) mappings
+ Whitespace separated, quoted, mapping defined with colon
+ separated list of "tags" (int > 0)
+ Example: '1:1 2:2 2:1'
+ *** The Portal/Initiator Groups must be precreated ***""")
+ p.set_defaults(func=iscsi_target_node_add_pg_ig_maps)
+
+ def iscsi_target_node_remove_pg_ig_maps(args):
+ pg_ig_maps = []
+ for u in args.pg_ig_mappings.strip().split(" "):
+ pg, ig = u.split(":")
+ pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
+ rpc.iscsi.iscsi_target_node_remove_pg_ig_maps(
+ args.client, pg_ig_maps=pg_ig_maps, name=args.name)
+
+ p = subparsers.add_parser('iscsi_target_node_remove_pg_ig_maps',
+ aliases=['delete_pg_ig_maps'],
+ help='Delete PG-IG maps from the target node')
+ p.add_argument('name', help='Target node name (ASCII)')
+ p.add_argument('pg_ig_mappings', help="""List of (Portal_Group_Tag:Initiator_Group_Tag) mappings
+ Whitespace separated, quoted, mapping defined with colon
+ separated list of "tags" (int > 0)
+ Example: '1:1 2:2 2:1'
+ *** The Portal/Initiator Groups must be precreated ***""")
+ p.set_defaults(func=iscsi_target_node_remove_pg_ig_maps)
+
+ def iscsi_create_portal_group(args):
+ portals = []
+ for p in args.portal_list.strip().split(' '):
+ ip, separator, port_cpumask = p.rpartition(':')
+ split_port_cpumask = port_cpumask.split('@')
+ if len(split_port_cpumask) == 1:
+ port = port_cpumask
+ portals.append({'host': ip, 'port': port})
+ else:
+ port = split_port_cpumask[0]
+ cpumask = split_port_cpumask[1]
+ portals.append({'host': ip, 'port': port})
+ print("WARNING: Specifying a portal group with a CPU mask is no longer supported. Ignoring it.")
+ rpc.iscsi.iscsi_create_portal_group(
+ args.client,
+ portals=portals,
+ tag=args.tag)
+
+ p = subparsers.add_parser('iscsi_create_portal_group', aliases=['add_portal_group'],
+ help='Add a portal group')
+ p.add_argument(
+ 'tag', help='Portal group tag (unique, integer > 0)', type=int)
+ p.add_argument('portal_list', help="""List of portals in host:port format, separated by whitespace
+ Example: '192.168.100.100:3260 192.168.100.100:3261 192.168.100.100:3262""")
+ p.set_defaults(func=iscsi_create_portal_group)
+
+ def iscsi_create_initiator_group(args):
+ initiators = []
+ netmasks = []
+ for i in args.initiator_list.strip().split(' '):
+ initiators.append(i)
+ for n in args.netmask_list.strip().split(' '):
+ netmasks.append(n)
+ rpc.iscsi.iscsi_create_initiator_group(
+ args.client,
+ tag=args.tag,
+ initiators=initiators,
+ netmasks=netmasks)
+
+ p = subparsers.add_parser('iscsi_create_initiator_group', aliases=['add_initiator_group'],
+ help='Add an initiator group')
+ p.add_argument(
+ 'tag', help='Initiator group tag (unique, integer > 0)', type=int)
+ p.add_argument('initiator_list', help="""Whitespace-separated list of initiator hostnames or IP addresses,
+ enclosed in quotes. Example: 'ANY' or '127.0.0.1 192.168.200.100'""")
+ p.add_argument('netmask_list', help="""Whitespace-separated list of initiator netmasks enclosed in quotes.
+ Example: '255.255.0.0 255.248.0.0' etc""")
+ p.set_defaults(func=iscsi_create_initiator_group)
+
+ def iscsi_initiator_group_add_initiators(args):
+ initiators = None
+ netmasks = None
+ if args.initiator_list:
+ initiators = []
+ for i in args.initiator_list.strip().split(' '):
+ initiators.append(i)
+ if args.netmask_list:
+ netmasks = []
+ for n in args.netmask_list.strip().split(' '):
+ netmasks.append(n)
+ rpc.iscsi.iscsi_initiator_group_add_initiators(
+ args.client,
+ tag=args.tag,
+ initiators=initiators,
+ netmasks=netmasks)
+
+ p = subparsers.add_parser('iscsi_initiator_group_add_initiators',
+ aliases=['add_initiators_to_initiator_group'],
+ help='Add initiators to an existing initiator group')
+ p.add_argument(
+ 'tag', help='Initiator group tag (unique, integer > 0)', type=int)
+ p.add_argument('-n', dest='initiator_list', help="""Whitespace-separated list of initiator hostnames or IP addresses,
+ enclosed in quotes. This parameter can be omitted. Example: 'ANY' or '127.0.0.1 192.168.200.100'""", required=False)
+ p.add_argument('-m', dest='netmask_list', help="""Whitespace-separated list of initiator netmasks enclosed in quotes.
+ This parameter can be omitted. Example: '255.255.0.0 255.248.0.0' etc""", required=False)
+ p.set_defaults(func=iscsi_initiator_group_add_initiators)
+
+ def iscsi_initiator_group_remove_initiators(args):
+ initiators = None
+ netmasks = None
+ if args.initiator_list:
+ initiators = []
+ for i in args.initiator_list.strip().split(' '):
+ initiators.append(i)
+ if args.netmask_list:
+ netmasks = []
+ for n in args.netmask_list.strip().split(' '):
+ netmasks.append(n)
+ rpc.iscsi.iscsi_initiator_group_remove_initiators(
+ args.client,
+ tag=args.tag,
+ initiators=initiators,
+ netmasks=netmasks)
+
+ p = subparsers.add_parser('iscsi_initiator_group_remove_initiators',
+ aliases=['delete_initiators_from_initiator_group'],
+ help='Delete initiators from an existing initiator group')
+ p.add_argument(
+ 'tag', help='Initiator group tag (unique, integer > 0)', type=int)
+ p.add_argument('-n', dest='initiator_list', help="""Whitespace-separated list of initiator hostnames or IP addresses,
+ enclosed in quotes. This parameter can be omitted. Example: 'ANY' or '127.0.0.1 192.168.200.100'""", required=False)
+ p.add_argument('-m', dest='netmask_list', help="""Whitespace-separated list of initiator netmasks enclosed in quotes.
+ This parameter can be omitted. Example: '255.255.0.0 255.248.0.0' etc""", required=False)
+ p.set_defaults(func=iscsi_initiator_group_remove_initiators)
+
+ def iscsi_delete_target_node(args):
+ rpc.iscsi.iscsi_delete_target_node(
+ args.client, target_node_name=args.target_node_name)
+
+ p = subparsers.add_parser('iscsi_delete_target_node', aliases=['delete_target_node'],
+ help='Delete a target node')
+ p.add_argument('target_node_name',
+ help='Target node name to be deleted. Example: iqn.2016-06.io.spdk:disk1.')
+ p.set_defaults(func=iscsi_delete_target_node)
+
+ def iscsi_delete_portal_group(args):
+ rpc.iscsi.iscsi_delete_portal_group(args.client, tag=args.tag)
+
+ p = subparsers.add_parser('iscsi_delete_portal_group',
+ aliases=['delete_portal_group'],
+ help='Delete a portal group')
+ p.add_argument(
+ 'tag', help='Portal group tag (unique, integer > 0)', type=int)
+ p.set_defaults(func=iscsi_delete_portal_group)
+
+ def iscsi_delete_initiator_group(args):
+ rpc.iscsi.iscsi_delete_initiator_group(args.client, tag=args.tag)
+
+ p = subparsers.add_parser('iscsi_delete_initiator_group',
+ aliases=['delete_initiator_group'],
+ help='Delete an initiator group')
+ p.add_argument(
+ 'tag', help='Initiator group tag (unique, integer > 0)', type=int)
+ p.set_defaults(func=iscsi_delete_initiator_group)
+
+ def iscsi_portal_group_set_auth(args):
+ rpc.iscsi.iscsi_portal_group_set_auth(
+ args.client,
+ tag=args.tag,
+ chap_group=args.chap_group,
+ disable_chap=args.disable_chap,
+ require_chap=args.require_chap,
+ mutual_chap=args.mutual_chap)
+
+ p = subparsers.add_parser('iscsi_portal_group_set_auth',
+ help='Set CHAP authentication for discovery sessions specific for the portal group')
+ p.add_argument('tag', help='Portal group tag (unique, integer > 0)', type=int)
+ p.add_argument('-g', '--chap-group', help="""Authentication group ID for this portal group.
+ *** Authentication group must be precreated ***""", type=int, default=0)
+ p.add_argument('-d', '--disable-chap', help="""CHAP authentication should be disabled for this portal group.
+ *** Mutually exclusive with --require-chap ***""", action='store_true')
+ p.add_argument('-r', '--require-chap', help="""CHAP authentication should be required for this portal group.
+ *** Mutually exclusive with --disable-chap ***""", action='store_true')
+ p.add_argument('-m', '--mutual-chap', help='CHAP authentication should be mutual/bidirectional.',
+ action='store_true')
+ p.set_defaults(func=iscsi_portal_group_set_auth)
+
+ def iscsi_get_connections(args):
+ print_dict(rpc.iscsi.iscsi_get_connections(args.client))
+
+ p = subparsers.add_parser('iscsi_get_connections', aliases=['get_iscsi_connections'],
+ help='Display iSCSI connections')
+ p.set_defaults(func=iscsi_get_connections)
+
+ def iscsi_get_options(args):
+ print_dict(rpc.iscsi.iscsi_get_options(args.client))
+
+ p = subparsers.add_parser('iscsi_get_options', aliases=['get_iscsi_global_params'],
+ help='Display iSCSI global parameters')
+ p.set_defaults(func=iscsi_get_options)
+
+ def scsi_get_devices(args):
+ print_dict(rpc.iscsi.scsi_get_devices(args.client))
+
+ p = subparsers.add_parser('scsi_get_devices', aliases=['get_scsi_devices'],
+ help='Display SCSI devices')
+ p.set_defaults(func=scsi_get_devices)
+
+ # trace
+ def trace_enable_tpoint_group(args):
+ rpc.trace.trace_enable_tpoint_group(args.client, name=args.name)
+
+ p = subparsers.add_parser('trace_enable_tpoint_group', aliases=['enable_tpoint_group'],
+ help='enable trace on a specific tpoint group')
+ p.add_argument(
+ 'name', help="""trace group name we want to enable in tpoint_group_mask.
+ (for example "bdev" for bdev trace group, "all" for all trace groups).""")
+ p.set_defaults(func=trace_enable_tpoint_group)
+
+ def trace_disable_tpoint_group(args):
+ rpc.trace.trace_disable_tpoint_group(args.client, name=args.name)
+
+ p = subparsers.add_parser('trace_disable_tpoint_group', aliases=['disable_tpoint_group'],
+ help='disable trace on a specific tpoint group')
+ p.add_argument(
+ 'name', help="""trace group name we want to disable in tpoint_group_mask.
+ (for example "bdev" for bdev trace group, "all" for all trace groups).""")
+ p.set_defaults(func=trace_disable_tpoint_group)
+
+ def trace_get_tpoint_group_mask(args):
+ print_dict(rpc.trace.trace_get_tpoint_group_mask(args.client))
+
+ p = subparsers.add_parser('trace_get_tpoint_group_mask', aliases=['get_tpoint_group_mask'],
+ help='get trace point group mask')
+ p.set_defaults(func=trace_get_tpoint_group_mask)
+
+ # log
+ def log_set_flag(args):
+ rpc.log.log_set_flag(args.client, flag=args.flag)
+
+ p = subparsers.add_parser('log_set_flag', help='set log flag', aliases=['set_log_flag'])
+ p.add_argument(
+ 'flag', help='log flag we want to set. (for example "nvme").')
+ p.set_defaults(func=log_set_flag)
+
+ def log_clear_flag(args):
+ rpc.log.log_clear_flag(args.client, flag=args.flag)
+
+ p = subparsers.add_parser('log_clear_flag', help='clear log flag', aliases=['clear_log_flag'])
+ p.add_argument(
+ 'flag', help='log flag we want to clear. (for example "nvme").')
+ p.set_defaults(func=log_clear_flag)
+
+ def log_get_flags(args):
+ print_dict(rpc.log.log_get_flags(args.client))
+
+ p = subparsers.add_parser('log_get_flags', help='get log flags', aliases=['get_log_flags'])
+ p.set_defaults(func=log_get_flags)
+
+ def log_set_level(args):
+ rpc.log.log_set_level(args.client, level=args.level)
+
+ p = subparsers.add_parser('log_set_level', aliases=['set_log_level'],
+ help='set log level')
+ p.add_argument('level', help='log level we want to set. (for example "DEBUG").')
+ p.set_defaults(func=log_set_level)
+
+ def log_get_level(args):
+ print_dict(rpc.log.log_get_level(args.client))
+
+ p = subparsers.add_parser('log_get_level', aliases=['get_log_level'],
+ help='get log level')
+ p.set_defaults(func=log_get_level)
+
+ def log_set_print_level(args):
+ rpc.log.log_set_print_level(args.client, level=args.level)
+
+ p = subparsers.add_parser('log_set_print_level', aliases=['set_log_print_level'],
+ help='set log print level')
+ p.add_argument('level', help='log print level we want to set. (for example "DEBUG").')
+ p.set_defaults(func=log_set_print_level)
+
+ def log_get_print_level(args):
+ print_dict(rpc.log.log_get_print_level(args.client))
+
+ p = subparsers.add_parser('log_get_print_level', aliases=['get_log_print_level'],
+ help='get log print level')
+ p.set_defaults(func=log_get_print_level)
+
+ # lvol
+ def bdev_lvol_create_lvstore(args):
+ print_json(rpc.lvol.bdev_lvol_create_lvstore(args.client,
+ bdev_name=args.bdev_name,
+ lvs_name=args.lvs_name,
+ cluster_sz=args.cluster_sz,
+ clear_method=args.clear_method))
+
+ p = subparsers.add_parser('bdev_lvol_create_lvstore', aliases=['construct_lvol_store'],
+ help='Add logical volume store on base bdev')
+ p.add_argument('bdev_name', help='base bdev name')
+ p.add_argument('lvs_name', help='name for lvol store')
+ p.add_argument('-c', '--cluster-sz', help='size of cluster (in bytes)', type=int, required=False)
+ p.add_argument('--clear-method', help="""Change clear method for data region.
+ Available: none, unmap, write_zeroes""", required=False)
+ p.set_defaults(func=bdev_lvol_create_lvstore)
+
+ def bdev_lvol_rename_lvstore(args):
+ rpc.lvol.bdev_lvol_rename_lvstore(args.client,
+ old_name=args.old_name,
+ new_name=args.new_name)
+
+ p = subparsers.add_parser('bdev_lvol_rename_lvstore', aliases=['rename_lvol_store'],
+ help='Change logical volume store name')
+ p.add_argument('old_name', help='old name')
+ p.add_argument('new_name', help='new name')
+ p.set_defaults(func=bdev_lvol_rename_lvstore)
+
+ def bdev_lvol_create(args):
+ print_json(rpc.lvol.bdev_lvol_create(args.client,
+ lvol_name=args.lvol_name,
+ size=args.size * 1024 * 1024,
+ thin_provision=args.thin_provision,
+ clear_method=args.clear_method,
+ uuid=args.uuid,
+ lvs_name=args.lvs_name))
+
+ p = subparsers.add_parser('bdev_lvol_create', aliases=['construct_lvol_bdev'],
+ help='Add a bdev with an logical volume backend')
+ p.add_argument('-u', '--uuid', help='lvol store UUID', required=False)
+ p.add_argument('-l', '--lvs-name', help='lvol store name', required=False)
+ p.add_argument('-t', '--thin-provision', action='store_true', help='create lvol bdev as thin provisioned')
+ p.add_argument('-c', '--clear-method', help="""Change default data clusters clear method.
+ Available: none, unmap, write_zeroes""", required=False)
+ p.add_argument('lvol_name', help='name for this lvol')
+ p.add_argument('size', help='size in MiB for this bdev', type=int)
+ p.set_defaults(func=bdev_lvol_create)
+
+ def bdev_lvol_snapshot(args):
+ print_json(rpc.lvol.bdev_lvol_snapshot(args.client,
+ lvol_name=args.lvol_name,
+ snapshot_name=args.snapshot_name))
+
+ p = subparsers.add_parser('bdev_lvol_snapshot', aliases=['snapshot_lvol_bdev'],
+ help='Create a snapshot of an lvol bdev')
+ p.add_argument('lvol_name', help='lvol bdev name')
+ p.add_argument('snapshot_name', help='lvol snapshot name')
+ p.set_defaults(func=bdev_lvol_snapshot)
+
+ def bdev_lvol_clone(args):
+ print_json(rpc.lvol.bdev_lvol_clone(args.client,
+ snapshot_name=args.snapshot_name,
+ clone_name=args.clone_name))
+
+ p = subparsers.add_parser('bdev_lvol_clone', aliases=['clone_lvol_bdev'],
+ help='Create a clone of an lvol snapshot')
+ p.add_argument('snapshot_name', help='lvol snapshot name')
+ p.add_argument('clone_name', help='lvol clone name')
+ p.set_defaults(func=bdev_lvol_clone)
+
+ def bdev_lvol_rename(args):
+ rpc.lvol.bdev_lvol_rename(args.client,
+ old_name=args.old_name,
+ new_name=args.new_name)
+
+ p = subparsers.add_parser('bdev_lvol_rename', aliases=['rename_lvol_bdev'],
+ help='Change lvol bdev name')
+ p.add_argument('old_name', help='lvol bdev name')
+ p.add_argument('new_name', help='new lvol name')
+ p.set_defaults(func=bdev_lvol_rename)
+
+ def bdev_lvol_inflate(args):
+ rpc.lvol.bdev_lvol_inflate(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_lvol_inflate', aliases=['inflate_lvol_bdev'],
+ help='Make thin provisioned lvol a thick provisioned lvol')
+ p.add_argument('name', help='lvol bdev name')
+ p.set_defaults(func=bdev_lvol_inflate)
+
+ def bdev_lvol_decouple_parent(args):
+ rpc.lvol.bdev_lvol_decouple_parent(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_lvol_decouple_parent', aliases=['decouple_parent_lvol_bdev'],
+ help='Decouple parent of lvol')
+ p.add_argument('name', help='lvol bdev name')
+ p.set_defaults(func=bdev_lvol_decouple_parent)
+
+ def bdev_lvol_resize(args):
+ rpc.lvol.bdev_lvol_resize(args.client,
+ name=args.name,
+ size=args.size * 1024 * 1024)
+
+ p = subparsers.add_parser('bdev_lvol_resize', aliases=['resize_lvol_bdev'],
+ help='Resize existing lvol bdev')
+ p.add_argument('name', help='lvol bdev name')
+ p.add_argument('size', help='new size in MiB for this bdev', type=int)
+ p.set_defaults(func=bdev_lvol_resize)
+
+ def bdev_lvol_set_read_only(args):
+ rpc.lvol.bdev_lvol_set_read_only(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_lvol_set_read_only', aliases=['set_read_only_lvol_bdev'],
+ help='Mark lvol bdev as read only')
+ p.add_argument('name', help='lvol bdev name')
+ p.set_defaults(func=bdev_lvol_set_read_only)
+
+ def bdev_lvol_delete(args):
+ rpc.lvol.bdev_lvol_delete(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_lvol_delete', aliases=['destroy_lvol_bdev'],
+ help='Destroy a logical volume')
+ p.add_argument('name', help='lvol bdev name')
+ p.set_defaults(func=bdev_lvol_delete)
+
+ def bdev_lvol_delete_lvstore(args):
+ rpc.lvol.bdev_lvol_delete_lvstore(args.client,
+ uuid=args.uuid,
+ lvs_name=args.lvs_name)
+
+ p = subparsers.add_parser('bdev_lvol_delete_lvstore', aliases=['destroy_lvol_store'],
+ help='Destroy an logical volume store')
+ p.add_argument('-u', '--uuid', help='lvol store UUID', required=False)
+ p.add_argument('-l', '--lvs-name', help='lvol store name', required=False)
+ p.set_defaults(func=bdev_lvol_delete_lvstore)
+
+ def bdev_lvol_get_lvstores(args):
+ print_dict(rpc.lvol.bdev_lvol_get_lvstores(args.client,
+ uuid=args.uuid,
+ lvs_name=args.lvs_name))
+
+ p = subparsers.add_parser('bdev_lvol_get_lvstores', aliases=['get_lvol_stores'],
+ help='Display current logical volume store list')
+ p.add_argument('-u', '--uuid', help='lvol store UUID', required=False)
+ p.add_argument('-l', '--lvs-name', help='lvol store name', required=False)
+ p.set_defaults(func=bdev_lvol_get_lvstores)
+
+ def bdev_raid_get_bdevs(args):
+ print_array(rpc.bdev.bdev_raid_get_bdevs(args.client,
+ category=args.category))
+
+ p = subparsers.add_parser('bdev_raid_get_bdevs', aliases=['get_raid_bdevs'],
+ help="""This is used to list all the raid bdev names based on the input category
+ requested. Category should be one of 'all', 'online', 'configuring' or 'offline'. 'all' means all the raid bdevs whether
+ they are online or configuring or offline. 'online' is the raid bdev which is registered with bdev layer. 'configuring'
+ is the raid bdev which does not have full configuration discovered yet. 'offline' is the raid bdev which is not registered
+ with bdev as of now and it has encountered any error or user has requested to offline the raid bdev""")
+ p.add_argument('category', help='all or online or configuring or offline')
+ p.set_defaults(func=bdev_raid_get_bdevs)
+
+ def bdev_raid_create(args):
+ base_bdevs = []
+ for u in args.base_bdevs.strip().split(" "):
+ base_bdevs.append(u)
+
+ rpc.bdev.bdev_raid_create(args.client,
+ name=args.name,
+ strip_size=args.strip_size,
+ strip_size_kb=args.strip_size_kb,
+ raid_level=args.raid_level,
+ base_bdevs=base_bdevs)
+ p = subparsers.add_parser('bdev_raid_create', aliases=['construct_raid_bdev'],
+ help='Create new raid bdev')
+ p.add_argument('-n', '--name', help='raid bdev name', required=True)
+ p.add_argument('-s', '--strip-size', help='strip size in KB (deprecated)', type=int)
+ p.add_argument('-z', '--strip-size_kb', help='strip size in KB', type=int)
+ p.add_argument('-r', '--raid-level', help='raid level, only raid level 0 is supported', required=True)
+ p.add_argument('-b', '--base-bdevs', help='base bdevs name, whitespace separated list in quotes', required=True)
+ p.set_defaults(func=bdev_raid_create)
+
+ def bdev_raid_delete(args):
+ rpc.bdev.bdev_raid_delete(args.client,
+ name=args.name)
+ p = subparsers.add_parser('bdev_raid_delete', aliases=['destroy_raid_bdev'],
+ help='Delete existing raid bdev')
+ p.add_argument('name', help='raid bdev name')
+ p.set_defaults(func=bdev_raid_delete)
+
+ # split
+ def bdev_split_create(args):
+ print_array(rpc.bdev.bdev_split_create(args.client,
+ base_bdev=args.base_bdev,
+ split_count=args.split_count,
+ split_size_mb=args.split_size_mb))
+
+ p = subparsers.add_parser('bdev_split_create', aliases=['construct_split_vbdev'],
+ help="""Add given disk name to split config. If bdev with base_name
+ name exist the split bdevs will be created right away, if not split bdevs will be created when base bdev became
+ available (during examination process).""")
+ p.add_argument('base_bdev', help='base bdev name')
+ p.add_argument('-s', '--split-size-mb', help='size in MiB for each bdev', type=int, default=0)
+ p.add_argument('split_count', help="""Optional - number of split bdevs to create. Total size * split_count must not
+ exceed the base bdev size.""", type=int)
+ p.set_defaults(func=bdev_split_create)
+
+ def bdev_split_delete(args):
+ rpc.bdev.bdev_split_delete(args.client,
+ base_bdev=args.base_bdev)
+
+ p = subparsers.add_parser('bdev_split_delete', aliases=['destruct_split_vbdev'],
+ help="""Delete split config with all created splits.""")
+ p.add_argument('base_bdev', help='base bdev name')
+ p.set_defaults(func=bdev_split_delete)
+
+ # ftl
+ ftl_valid_limits = ('crit', 'high', 'low', 'start')
+
+ def bdev_ftl_create(args):
+ def parse_limits(limits, arg_dict, key_suffix=''):
+ for limit in limits.split(','):
+ key, value = limit.split(':', 1)
+ if key in ftl_valid_limits:
+ arg_dict['limit_' + key + key_suffix] = int(value)
+ else:
+ raise ValueError('Limit {} is not supported'.format(key))
+
+ arg_limits = {}
+ if args.limit_threshold:
+ parse_limits(args.limit_threshold, arg_limits, '_threshold')
+
+ if args.limit:
+ parse_limits(args.limit, arg_limits)
+
+ print_dict(rpc.bdev.bdev_ftl_create(args.client,
+ name=args.name,
+ base_bdev=args.base_bdev,
+ uuid=args.uuid,
+ cache=args.cache,
+ allow_open_bands=args.allow_open_bands,
+ overprovisioning=args.overprovisioning,
+ l2p_path=args.l2p_path,
+ use_append=args.use_append,
+ **arg_limits))
+
+ p = subparsers.add_parser('bdev_ftl_create', aliases=['construct_ftl_bdev'], help='Add FTL bdev')
+ p.add_argument('-b', '--name', help="Name of the bdev", required=True)
+ p.add_argument('-d', '--base_bdev', help='Name of zoned bdev used as underlying device',
+ required=True)
+ p.add_argument('-u', '--uuid', help='UUID of restored bdev (not applicable when creating new '
+ 'instance): e.g. b286d19a-0059-4709-abcd-9f7732b1567d (optional)')
+ p.add_argument('-c', '--cache', help='Name of the bdev to be used as a write buffer cache (optional)')
+ p.add_argument('-o', '--allow_open_bands', help='Restoring after dirty shutdown without cache will'
+ ' result in partial data recovery, instead of error', action='store_true')
+ p.add_argument('--overprovisioning', help='Percentage of device used for relocation, not exposed'
+ ' to user (optional)', type=int)
+ p.add_argument('--l2p_path', help='Path to persistent memory file or device to store l2p onto, '
+ 'by default l2p is kept in DRAM and is volatile (optional)')
+ p.add_argument('--use_append', help='Use appends instead of writes', action='store_true')
+
+ limits = p.add_argument_group('Defrag limits', 'Configures defrag limits and thresholds for'
+ ' levels ' + str(ftl_valid_limits)[1:-1])
+ limits.add_argument('--limit', help='Percentage of allowed user versus internal writes at given'
+ ' levels, e.g. crit:0,high:20,low:80')
+ limits.add_argument('--limit-threshold', help='Number of free bands triggering a given level of'
+ ' write limiting e.g. crit:1,high:2,low:3,start:4')
+ p.set_defaults(func=bdev_ftl_create)
+
+ def bdev_ftl_delete(args):
+ print_dict(rpc.bdev.bdev_ftl_delete(args.client, name=args.name))
+
+ p = subparsers.add_parser('bdev_ftl_delete', aliases=['delete_ftl_bdev'],
+ help='Delete FTL bdev')
+ p.add_argument('-b', '--name', help="Name of the bdev", required=True)
+ p.set_defaults(func=bdev_ftl_delete)
+
+ # vmd
+ def enable_vmd(args):
+ print_dict(rpc.vmd.enable_vmd(args.client))
+
+ p = subparsers.add_parser('enable_vmd', help='Enable VMD enumeration')
+ p.set_defaults(func=enable_vmd)
+
+ # nbd
+ def nbd_start_disk(args):
+ print(rpc.nbd.nbd_start_disk(args.client,
+ bdev_name=args.bdev_name,
+ nbd_device=args.nbd_device))
+
+ p = subparsers.add_parser('nbd_start_disk', aliases=['start_nbd_disk'],
+ help='Export a bdev as an nbd disk')
+ p.add_argument('bdev_name', help='Blockdev name to be exported. Example: Malloc0.')
+ p.add_argument('nbd_device', help='Nbd device name to be assigned. Example: /dev/nbd0.', nargs='?')
+ p.set_defaults(func=nbd_start_disk)
+
+ def nbd_stop_disk(args):
+ rpc.nbd.nbd_stop_disk(args.client,
+ nbd_device=args.nbd_device)
+
+ p = subparsers.add_parser('nbd_stop_disk', aliases=['stop_nbd_disk'],
+ help='Stop an nbd disk')
+ p.add_argument('nbd_device', help='Nbd device name to be stopped. Example: /dev/nbd0.')
+ p.set_defaults(func=nbd_stop_disk)
+
+ def nbd_get_disks(args):
+ print_dict(rpc.nbd.nbd_get_disks(args.client,
+ nbd_device=args.nbd_device))
+
+ p = subparsers.add_parser('nbd_get_disks', aliases=['get_nbd_disks'],
+ help='Display full or specified nbd device list')
+ p.add_argument('-n', '--nbd-device', help="Path of the nbd device. Example: /dev/nbd0", required=False)
+ p.set_defaults(func=nbd_get_disks)
+
+ # net
+ def net_interface_add_ip_address(args):
+ rpc.net.net_interface_add_ip_address(args.client, ifc_index=args.ifc_index, ip_addr=args.ip_addr)
+
+ p = subparsers.add_parser('net_interface_add_ip_address', aliases=['add_ip_address'],
+ help='Add IP address')
+ p.add_argument('ifc_index', help='ifc index of the nic device.', type=int)
+ p.add_argument('ip_addr', help='ip address will be added.')
+ p.set_defaults(func=net_interface_add_ip_address)
+
+ def net_interface_delete_ip_address(args):
+ rpc.net.net_interface_delete_ip_address(args.client, ifc_index=args.ifc_index, ip_addr=args.ip_addr)
+
+ p = subparsers.add_parser('net_interface_delete_ip_address', aliases=['delete_ip_address'],
+ help='Delete IP address')
+ p.add_argument('ifc_index', help='ifc index of the nic device.', type=int)
+ p.add_argument('ip_addr', help='ip address will be deleted.')
+ p.set_defaults(func=net_interface_delete_ip_address)
+
+ def net_get_interfaces(args):
+ print_dict(rpc.net.net_get_interfaces(args.client))
+
+ p = subparsers.add_parser(
+ 'net_get_interfaces', aliases=['get_interfaces'], help='Display current interface list')
+ p.set_defaults(func=net_get_interfaces)
+
+ # NVMe-oF
+ def nvmf_set_max_subsystems(args):
+ rpc.nvmf.nvmf_set_max_subsystems(args.client,
+ max_subsystems=args.max_subsystems)
+
+ p = subparsers.add_parser('nvmf_set_max_subsystems', aliases=['set_nvmf_target_max_subsystems'],
+ help='Set the maximum number of NVMf target subsystems')
+ p.add_argument('-x', '--max-subsystems', help='Max number of NVMf subsystems', type=int, required=True)
+ p.set_defaults(func=nvmf_set_max_subsystems)
+
+ def nvmf_set_config(args):
+ rpc.nvmf.nvmf_set_config(args.client,
+ acceptor_poll_rate=args.acceptor_poll_rate,
+ conn_sched=args.conn_sched,
+ passthru_identify_ctrlr=args.passthru_identify_ctrlr)
+
+ p = subparsers.add_parser('nvmf_set_config', aliases=['set_nvmf_target_config'],
+ help='Set NVMf target config')
+ p.add_argument('-r', '--acceptor-poll-rate', help='Polling interval of the acceptor for incoming connections (usec)', type=int)
+ p.add_argument('-s', '--conn-sched', help='(Deprecated). Ignored.')
+ p.add_argument('-i', '--passthru-identify-ctrlr', help="""Passthrough fields like serial number and model number
+ when the controller has a single namespace that is an NVMe bdev""", action='store_true')
+ p.set_defaults(func=nvmf_set_config)
+
+ def nvmf_create_transport(args):
+ rpc.nvmf.nvmf_create_transport(args.client,
+ trtype=args.trtype,
+ tgt_name=args.tgt_name,
+ max_queue_depth=args.max_queue_depth,
+ max_qpairs_per_ctrlr=args.max_qpairs_per_ctrlr,
+ max_io_qpairs_per_ctrlr=args.max_io_qpairs_per_ctrlr,
+ in_capsule_data_size=args.in_capsule_data_size,
+ max_io_size=args.max_io_size,
+ io_unit_size=args.io_unit_size,
+ max_aq_depth=args.max_aq_depth,
+ num_shared_buffers=args.num_shared_buffers,
+ buf_cache_size=args.buf_cache_size,
+ max_srq_depth=args.max_srq_depth,
+ no_srq=args.no_srq,
+ c2h_success=args.c2h_success,
+ dif_insert_or_strip=args.dif_insert_or_strip,
+ sock_priority=args.sock_priority,
+ acceptor_backlog=args.acceptor_backlog,
+ abort_timeout_sec=args.abort_timeout_sec)
+
+ p = subparsers.add_parser('nvmf_create_transport', help='Create NVMf transport')
+ p.add_argument('-t', '--trtype', help='Transport type (ex. RDMA)', type=str, required=True)
+ p.add_argument('-g', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.add_argument('-q', '--max-queue-depth', help='Max number of outstanding I/O per queue', type=int)
+ p.add_argument('-p', '--max-qpairs-per-ctrlr', help="""Max number of SQ and CQ per controller.
+ Deprecated, use max-io-qpairs-per-ctrlr""", type=int)
+ p.add_argument('-m', '--max-io-qpairs-per-ctrlr', help='Max number of IO qpairs per controller', type=int)
+ p.add_argument('-c', '--in-capsule-data-size', help='Max number of in-capsule data size', type=int)
+ p.add_argument('-i', '--max-io-size', help='Max I/O size (bytes)', type=int)
+ p.add_argument('-u', '--io-unit-size', help='I/O unit size (bytes)', type=int)
+ p.add_argument('-a', '--max-aq-depth', help='Max number of admin cmds per AQ', type=int)
+ p.add_argument('-n', '--num-shared-buffers', help='The number of pooled data buffers available to the transport', type=int)
+ p.add_argument('-b', '--buf-cache-size', help='The number of shared buffers to reserve for each poll group', type=int)
+ p.add_argument('-s', '--max-srq-depth', help='Max number of outstanding I/O per SRQ. Relevant only for RDMA transport', type=int)
+ p.add_argument('-r', '--no-srq', action='store_true', help='Disable per-thread shared receive queue. Relevant only for RDMA transport')
+ p.add_argument('-o', '--c2h-success', action='store_false', help='Disable C2H success optimization. Relevant only for TCP transport')
+ p.add_argument('-f', '--dif-insert-or-strip', action='store_true', help='Enable DIF insert/strip. Relevant only for TCP transport')
+ p.add_argument('-y', '--sock-priority', help='The sock priority of the tcp connection. Relevant only for TCP transport', type=int)
+ p.add_argument('-l', '--acceptor_backlog', help='Pending connections allowed at one time. Relevant only for RDMA transport', type=int)
+ p.add_argument('-x', '--abort-timeout-sec', help='Abort execution timeout value, in seconds', type=int)
+ p.set_defaults(func=nvmf_create_transport)
+
+ def nvmf_get_transports(args):
+ print_dict(rpc.nvmf.nvmf_get_transports(args.client, tgt_name=args.tgt_name))
+
+ p = subparsers.add_parser('nvmf_get_transports', aliases=['get_nvmf_transports'],
+ help='Display nvmf transports')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.set_defaults(func=nvmf_get_transports)
+
+ def nvmf_get_subsystems(args):
+ print_dict(rpc.nvmf.nvmf_get_subsystems(args.client, tgt_name=args.tgt_name))
+
+ p = subparsers.add_parser('nvmf_get_subsystems', aliases=['get_nvmf_subsystems'],
+ help='Display nvmf subsystems')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.set_defaults(func=nvmf_get_subsystems)
+
+ def nvmf_create_subsystem(args):
+ rpc.nvmf.nvmf_create_subsystem(args.client,
+ nqn=args.nqn,
+ tgt_name=args.tgt_name,
+ serial_number=args.serial_number,
+ model_number=args.model_number,
+ allow_any_host=args.allow_any_host,
+ max_namespaces=args.max_namespaces)
+
+ p = subparsers.add_parser('nvmf_create_subsystem', aliases=['nvmf_subsystem_create'],
+ help='Create an NVMe-oF subsystem')
+ p.add_argument('nqn', help='Subsystem NQN (ASCII)')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.add_argument("-s", "--serial-number", help="""
+ Format: 'sn' etc
+ Example: 'SPDK00000000000001'""", default='00000000000000000000')
+ p.add_argument("-d", "--model-number", help="""
+ Format: 'mn' etc
+ Example: 'SPDK Controller'""", default='SPDK bdev Controller')
+ p.add_argument("-a", "--allow-any-host", action='store_true', help="Allow any host to connect (don't enforce host NQN whitelist)")
+ p.add_argument("-m", "--max-namespaces", help="Maximum number of namespaces allowed",
+ type=int, default=0)
+ p.set_defaults(func=nvmf_create_subsystem)
+
+ def nvmf_delete_subsystem(args):
+ rpc.nvmf.nvmf_delete_subsystem(args.client,
+ nqn=args.subsystem_nqn,
+ tgt_name=args.tgt_name)
+
+ p = subparsers.add_parser('nvmf_delete_subsystem', aliases=['delete_nvmf_subsystem'],
+ help='Delete a nvmf subsystem')
+ p.add_argument('subsystem_nqn',
+ help='subsystem nqn to be deleted. Example: nqn.2016-06.io.spdk:cnode1.')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.set_defaults(func=nvmf_delete_subsystem)
+
+ def nvmf_subsystem_add_listener(args):
+ rpc.nvmf.nvmf_subsystem_add_listener(args.client,
+ nqn=args.nqn,
+ trtype=args.trtype,
+ traddr=args.traddr,
+ tgt_name=args.tgt_name,
+ adrfam=args.adrfam,
+ trsvcid=args.trsvcid)
+
+ p = subparsers.add_parser('nvmf_subsystem_add_listener', help='Add a listener to an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('-t', '--trtype', help='NVMe-oF transport type: e.g., rdma', required=True)
+ p.add_argument('-a', '--traddr', help='NVMe-oF transport address: e.g., an ip address', required=True)
+ p.add_argument('-p', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.add_argument('-f', '--adrfam', help='NVMe-oF transport adrfam: e.g., ipv4, ipv6, ib, fc, intra_host')
+ p.add_argument('-s', '--trsvcid', help='NVMe-oF transport service id: e.g., a port number')
+ p.set_defaults(func=nvmf_subsystem_add_listener)
+
+ def nvmf_subsystem_remove_listener(args):
+ rpc.nvmf.nvmf_subsystem_remove_listener(args.client,
+ nqn=args.nqn,
+ trtype=args.trtype,
+ traddr=args.traddr,
+ tgt_name=args.tgt_name,
+ adrfam=args.adrfam,
+ trsvcid=args.trsvcid)
+
+ p = subparsers.add_parser('nvmf_subsystem_remove_listener', help='Remove a listener from an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('-t', '--trtype', help='NVMe-oF transport type: e.g., rdma', required=True)
+ p.add_argument('-a', '--traddr', help='NVMe-oF transport address: e.g., an ip address', required=True)
+ p.add_argument('-p', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.add_argument('-f', '--adrfam', help='NVMe-oF transport adrfam: e.g., ipv4, ipv6, ib, fc, intra_host')
+ p.add_argument('-s', '--trsvcid', help='NVMe-oF transport service id: e.g., a port number')
+ p.set_defaults(func=nvmf_subsystem_remove_listener)
+
+ def nvmf_subsystem_add_ns(args):
+ rpc.nvmf.nvmf_subsystem_add_ns(args.client,
+ nqn=args.nqn,
+ bdev_name=args.bdev_name,
+ tgt_name=args.tgt_name,
+ ptpl_file=args.ptpl_file,
+ nsid=args.nsid,
+ nguid=args.nguid,
+ eui64=args.eui64,
+ uuid=args.uuid)
+
+ p = subparsers.add_parser('nvmf_subsystem_add_ns', help='Add a namespace to an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('bdev_name', help='The name of the bdev that will back this namespace')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.add_argument('-p', '--ptpl-file', help='The persistent reservation storage location (optional)', type=str)
+ p.add_argument('-n', '--nsid', help='The requested NSID (optional)', type=int)
+ p.add_argument('-g', '--nguid', help='Namespace globally unique identifier (optional)')
+ p.add_argument('-e', '--eui64', help='Namespace EUI-64 identifier (optional)')
+ p.add_argument('-u', '--uuid', help='Namespace UUID (optional)')
+ p.set_defaults(func=nvmf_subsystem_add_ns)
+
+ def nvmf_subsystem_remove_ns(args):
+ rpc.nvmf.nvmf_subsystem_remove_ns(args.client,
+ nqn=args.nqn,
+ nsid=args.nsid,
+ tgt_name=args.tgt_name)
+
+ p = subparsers.add_parser('nvmf_subsystem_remove_ns', help='Remove a namespace to an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('nsid', help='The requested NSID', type=int)
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.set_defaults(func=nvmf_subsystem_remove_ns)
+
+ def nvmf_subsystem_add_host(args):
+ rpc.nvmf.nvmf_subsystem_add_host(args.client,
+ nqn=args.nqn,
+ host=args.host,
+ tgt_name=args.tgt_name)
+
+ p = subparsers.add_parser('nvmf_subsystem_add_host', help='Add a host to an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('host', help='Host NQN to allow')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.set_defaults(func=nvmf_subsystem_add_host)
+
+ def nvmf_subsystem_remove_host(args):
+ rpc.nvmf.nvmf_subsystem_remove_host(args.client,
+ nqn=args.nqn,
+ host=args.host,
+ tgt_name=args.tgt_name)
+
+ p = subparsers.add_parser('nvmf_subsystem_remove_host', help='Remove a host from an NVMe-oF subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('host', help='Host NQN to remove')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.set_defaults(func=nvmf_subsystem_remove_host)
+
+ def nvmf_subsystem_allow_any_host(args):
+ rpc.nvmf.nvmf_subsystem_allow_any_host(args.client,
+ nqn=args.nqn,
+ disable=args.disable,
+ tgt_name=args.tgt_name)
+
+ p = subparsers.add_parser('nvmf_subsystem_allow_any_host', help='Allow any host to connect to the subsystem')
+ p.add_argument('nqn', help='NVMe-oF subsystem NQN')
+ p.add_argument('-e', '--enable', action='store_true', help='Enable allowing any host')
+ p.add_argument('-d', '--disable', action='store_true', help='Disable allowing any host')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.set_defaults(func=nvmf_subsystem_allow_any_host)
+
+ def nvmf_get_stats(args):
+ print_dict(rpc.nvmf.nvmf_get_stats(args.client, tgt_name=args.tgt_name))
+
+ p = subparsers.add_parser(
+ 'nvmf_get_stats', help='Display current statistics for NVMf subsystem')
+ p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
+ p.set_defaults(func=nvmf_get_stats)
+
+ # pmem
+ def bdev_pmem_create_pool(args):
+ num_blocks = int((args.total_size * 1024 * 1024) / args.block_size)
+ rpc.pmem.bdev_pmem_create_pool(args.client,
+ pmem_file=args.pmem_file,
+ num_blocks=num_blocks,
+ block_size=args.block_size)
+
+ p = subparsers.add_parser('bdev_pmem_create_pool', aliases=['create_pmem_pool'],
+ help='Create pmem pool')
+ p.add_argument('pmem_file', help='Path to pmemblk pool file')
+ p.add_argument('total_size', help='Size of malloc bdev in MB (int > 0)', type=int)
+ p.add_argument('block_size', help='Block size for this pmem pool', type=int)
+ p.set_defaults(func=bdev_pmem_create_pool)
+
+ def bdev_pmem_get_pool_info(args):
+ print_dict(rpc.pmem.bdev_pmem_get_pool_info(args.client,
+ pmem_file=args.pmem_file))
+
+ p = subparsers.add_parser('bdev_pmem_get_pool_info', aliases=['pmem_pool_info'],
+ help='Display pmem pool info and check consistency')
+ p.add_argument('pmem_file', help='Path to pmemblk pool file')
+ p.set_defaults(func=bdev_pmem_get_pool_info)
+
+ def bdev_pmem_delete_pool(args):
+ rpc.pmem.bdev_pmem_delete_pool(args.client,
+ pmem_file=args.pmem_file)
+
+ p = subparsers.add_parser('bdev_pmem_delete_pool', aliases=['delete_pmem_pool'],
+ help='Delete pmem pool')
+ p.add_argument('pmem_file', help='Path to pmemblk pool file')
+ p.set_defaults(func=bdev_pmem_delete_pool)
+
+ # subsystem
+ def framework_get_subsystems(args):
+ print_dict(rpc.subsystem.framework_get_subsystems(args.client))
+
+ p = subparsers.add_parser('framework_get_subsystems', aliases=['get_subsystems'],
+ help="""Print subsystems array in initialization order. Each subsystem
+ entry contain (unsorted) array of subsystems it depends on.""")
+ p.set_defaults(func=framework_get_subsystems)
+
+ def framework_get_config(args):
+ print_dict(rpc.subsystem.framework_get_config(args.client, args.name))
+
+ p = subparsers.add_parser('framework_get_config', aliases=['get_subsystem_config'],
+ help="""Print subsystem configuration""")
+ p.add_argument('name', help='Name of subsystem to query')
+ p.set_defaults(func=framework_get_config)
+
+ # vhost
+ def vhost_controller_set_coalescing(args):
+ rpc.vhost.vhost_controller_set_coalescing(args.client,
+ ctrlr=args.ctrlr,
+ delay_base_us=args.delay_base_us,
+ iops_threshold=args.iops_threshold)
+
+ p = subparsers.add_parser('vhost_controller_set_coalescing', aliases=['set_vhost_controller_coalescing'],
+ help='Set vhost controller coalescing')
+ p.add_argument('ctrlr', help='controller name')
+ p.add_argument('delay_base_us', help='Base delay time', type=int)
+ p.add_argument('iops_threshold', help='IOPS threshold when coalescing is enabled', type=int)
+ p.set_defaults(func=vhost_controller_set_coalescing)
+
+ def vhost_create_scsi_controller(args):
+ rpc.vhost.vhost_create_scsi_controller(args.client,
+ ctrlr=args.ctrlr,
+ cpumask=args.cpumask)
+
+ p = subparsers.add_parser(
+ 'vhost_create_scsi_controller', aliases=['construct_vhost_scsi_controller'],
+ help='Add new vhost controller')
+ p.add_argument('ctrlr', help='controller name')
+ p.add_argument('--cpumask', help='cpu mask for this controller')
+ p.set_defaults(func=vhost_create_scsi_controller)
+
+ def vhost_scsi_controller_add_target(args):
+ print_json(rpc.vhost.vhost_scsi_controller_add_target(args.client,
+ ctrlr=args.ctrlr,
+ scsi_target_num=args.scsi_target_num,
+ bdev_name=args.bdev_name))
+
+ p = subparsers.add_parser('vhost_scsi_controller_add_target',
+ aliases=['add_vhost_scsi_lun'],
+ help='Add lun to vhost controller')
+ p.add_argument('ctrlr', help='conntroller name where add lun')
+ p.add_argument('scsi_target_num', help='scsi_target_num', type=int)
+ p.add_argument('bdev_name', help='bdev name')
+ p.set_defaults(func=vhost_scsi_controller_add_target)
+
+ def vhost_scsi_controller_remove_target(args):
+ rpc.vhost.vhost_scsi_controller_remove_target(args.client,
+ ctrlr=args.ctrlr,
+ scsi_target_num=args.scsi_target_num)
+
+ p = subparsers.add_parser('vhost_scsi_controller_remove_target',
+ aliases=['remove_vhost_scsi_target'],
+ help='Remove target from vhost controller')
+ p.add_argument('ctrlr', help='controller name to remove target from')
+ p.add_argument('scsi_target_num', help='scsi_target_num', type=int)
+ p.set_defaults(func=vhost_scsi_controller_remove_target)
+
+ def vhost_create_blk_controller(args):
+ rpc.vhost.vhost_create_blk_controller(args.client,
+ ctrlr=args.ctrlr,
+ dev_name=args.dev_name,
+ cpumask=args.cpumask,
+ readonly=args.readonly,
+ packed_ring=args.packed_ring)
+
+ p = subparsers.add_parser('vhost_create_blk_controller',
+ aliases=['construct_vhost_blk_controller'],
+ help='Add a new vhost block controller')
+ p.add_argument('ctrlr', help='controller name')
+ p.add_argument('dev_name', help='device name')
+ p.add_argument('--cpumask', help='cpu mask for this controller')
+ p.add_argument("-r", "--readonly", action='store_true', help='Set controller as read-only')
+ p.add_argument("-p", "--packed_ring", action='store_true', help='Set controller as packed ring supported')
+ p.set_defaults(func=vhost_create_blk_controller)
+
+ def vhost_create_nvme_controller(args):
+ rpc.vhost.vhost_create_nvme_controller(args.client,
+ ctrlr=args.ctrlr,
+ io_queues=args.io_queues,
+ cpumask=args.cpumask)
+
+ p = subparsers.add_parser('vhost_create_nvme_controller', aliases=['vhost_create_nvme_controller'],
+ help='Add new vhost controller')
+ p.add_argument('ctrlr', help='controller name')
+ p.add_argument('io_queues', help='number of IO queues for the controller', type=int)
+ p.add_argument('--cpumask', help='cpu mask for this controller')
+ p.set_defaults(func=vhost_create_nvme_controller)
+
+ def vhost_nvme_controller_add_ns(args):
+ rpc.vhost.vhost_nvme_controller_add_ns(args.client,
+ ctrlr=args.ctrlr,
+ bdev_name=args.bdev_name)
+
+ p = subparsers.add_parser('vhost_nvme_controller_add_ns', aliases=['add_vhost_nvme_ns'],
+ help='Add a Namespace to vhost controller')
+ p.add_argument('ctrlr', help='conntroller name where add a Namespace')
+ p.add_argument('bdev_name', help='block device name for a new Namespace')
+ p.set_defaults(func=vhost_nvme_controller_add_ns)
+
+ def vhost_get_controllers(args):
+ print_dict(rpc.vhost.vhost_get_controllers(args.client, args.name))
+
+ p = subparsers.add_parser('vhost_get_controllers', aliases=['get_vhost_controllers'],
+ help='List all or specific vhost controller(s)')
+ p.add_argument('-n', '--name', help="Name of vhost controller", required=False)
+ p.set_defaults(func=vhost_get_controllers)
+
+ def vhost_delete_controller(args):
+ rpc.vhost.vhost_delete_controller(args.client,
+ ctrlr=args.ctrlr)
+
+ p = subparsers.add_parser('vhost_delete_controller', aliases=['remove_vhost_controller'],
+ help='Delete a vhost controller')
+ p.add_argument('ctrlr', help='controller name')
+ p.set_defaults(func=vhost_delete_controller)
+
+ def bdev_virtio_attach_controller(args):
+ print_array(rpc.vhost.bdev_virtio_attach_controller(args.client,
+ name=args.name,
+ trtype=args.trtype,
+ traddr=args.traddr,
+ dev_type=args.dev_type,
+ vq_count=args.vq_count,
+ vq_size=args.vq_size))
+
+ p = subparsers.add_parser('bdev_virtio_attach_controller', aliases=['construct_virtio_dev'],
+ help="""Attach virtio controller using provided
+ transport type and device type. This will also create bdevs for any block devices connected to the
+ controller (for example, SCSI devices for a virtio-scsi controller).
+ Result is array of added bdevs.""")
+ p.add_argument('name', help="Use this name as base for new created bdevs")
+ p.add_argument('-t', '--trtype',
+ help='Virtio target transport type: pci or user', required=True)
+ p.add_argument('-a', '--traddr',
+ help='Transport type specific target address: e.g. UNIX domain socket path or BDF', required=True)
+ p.add_argument('-d', '--dev-type',
+ help='Device type: blk or scsi', required=True)
+ p.add_argument('--vq-count', help='Number of virtual queues to be used.', type=int)
+ p.add_argument('--vq-size', help='Size of each queue', type=int)
+ p.set_defaults(func=bdev_virtio_attach_controller)
+
+ def bdev_virtio_scsi_get_devices(args):
+ print_dict(rpc.vhost.bdev_virtio_scsi_get_devices(args.client))
+
+ p = subparsers.add_parser('bdev_virtio_scsi_get_devices', aliases=['get_virtio_scsi_devs'],
+ help='List all Virtio-SCSI devices.')
+ p.set_defaults(func=bdev_virtio_scsi_get_devices)
+
+ def bdev_virtio_detach_controller(args):
+ rpc.vhost.bdev_virtio_detach_controller(args.client,
+ name=args.name)
+
+ p = subparsers.add_parser('bdev_virtio_detach_controller', aliases=['remove_virtio_bdev'],
+ help="""Remove a Virtio device
+ This will delete all bdevs exposed by this device""")
+ p.add_argument('name', help='Virtio device name. E.g. VirtioUser0')
+ p.set_defaults(func=bdev_virtio_detach_controller)
+
+ # OCSSD
+ def bdev_ocssd_create(args):
+ nsid = int(args.nsid) if args.nsid is not None else None
+ print_json(rpc.bdev.bdev_ocssd_create(args.client,
+ ctrlr_name=args.ctrlr_name,
+ bdev_name=args.name,
+ nsid=nsid,
+ range=args.range))
+
+ p = subparsers.add_parser('bdev_ocssd_create',
+ help='Creates zoned bdev on specified Open Channel controller')
+ p.add_argument('-c', '--ctrlr_name', help='Name of the OC NVMe controller', required=True)
+ p.add_argument('-b', '--name', help='Name of the bdev to create', required=True)
+ p.add_argument('-n', '--nsid', help='Namespace ID', required=False)
+ p.add_argument('-r', '--range', help='Parallel unit range (in the form of BEGIN-END (inclusive))',
+ required=False)
+ p.set_defaults(func=bdev_ocssd_create)
+
+ def bdev_ocssd_delete(args):
+ print_json(rpc.bdev.bdev_ocssd_delete(args.client,
+ name=args.name))
+
+ p = subparsers.add_parser('bdev_ocssd_delete',
+ help='Deletes Open Channel bdev')
+ p.add_argument('name', help='Name of the Open Channel bdev')
+ p.set_defaults(func=bdev_ocssd_delete)
+
+ # ioat
+ def ioat_scan_accel_engine(args):
+ pci_whitelist = []
+ if args.pci_whitelist:
+ for w in args.pci_whitelist.strip().split(" "):
+ pci_whitelist.append(w)
+ rpc.ioat.ioat_scan_accel_engine(args.client, pci_whitelist)
+
+ p = subparsers.add_parser('ioat_scan_accel_engine',
+ aliases=['ioat_scan_copy_engine', 'scan_ioat_copy_engine'],
+ help='Set scan and enable IOAT accel engine offload.')
+ p.add_argument('-w', '--pci-whitelist', help="""Whitespace-separated list of PCI addresses in
+ domain:bus:device.function format or domain.bus.device.function format""")
+ p.set_defaults(func=ioat_scan_accel_engine)
+
+ # idxd
+ def idxd_scan_accel_engine(args):
+ rpc.idxd.idxd_scan_accel_engine(args.client, config_number=args.config_number)
+
+ p = subparsers.add_parser('idxd_scan_accel_engine',
+ help='Set config and enable idxd accel engine offload.')
+ p.add_argument('-c', '--config-number', help="""Pre-defined configuration number to use. See docs.""", type=int)
+ p.set_defaults(func=idxd_scan_accel_engine)
+
+ # opal
+ def bdev_nvme_opal_init(args):
+ rpc.nvme.bdev_nvme_opal_init(args.client,
+ nvme_ctrlr_name=args.nvme_ctrlr_name,
+ password=args.password)
+
+ p = subparsers.add_parser('bdev_nvme_opal_init', help='take ownership and activate')
+ p.add_argument('-b', '--nvme-ctrlr-name', help='nvme ctrlr name')
+ p.add_argument('-p', '--password', help='password for admin')
+ p.set_defaults(func=bdev_nvme_opal_init)
+
+ def bdev_nvme_opal_revert(args):
+ rpc.nvme.bdev_nvme_opal_revert(args.client,
+ nvme_ctrlr_name=args.nvme_ctrlr_name,
+ password=args.password)
+ p = subparsers.add_parser('bdev_nvme_opal_revert', help='Revert to default factory settings')
+ p.add_argument('-b', '--nvme-ctrlr-name', help='nvme ctrlr name')
+ p.add_argument('-p', '--password', help='password')
+ p.set_defaults(func=bdev_nvme_opal_revert)
+
+ def bdev_opal_create(args):
+ print_json(rpc.bdev.bdev_opal_create(args.client,
+ nvme_ctrlr_name=args.nvme_ctrlr_name,
+ nsid=args.nsid,
+ locking_range_id=args.locking_range_id,
+ range_start=args.range_start,
+ range_length=args.range_length,
+ password=args.password))
+
+ p = subparsers.add_parser('bdev_opal_create', help="""Create opal bdev on specified NVMe controller""")
+ p.add_argument('-b', '--nvme-ctrlr-name', help='nvme ctrlr name', required=True)
+ p.add_argument('-n', '--nsid', help='namespace ID (only support nsid=1 for now)', type=int, required=True)
+ p.add_argument('-i', '--locking-range-id', help='locking range id', type=int, required=True)
+ p.add_argument('-s', '--range-start', help='locking range start LBA', type=int, required=True)
+ p.add_argument('-l', '--range-length', help='locking range length (in blocks)', type=int, required=True)
+ p.add_argument('-p', '--password', help='admin password', required=True)
+ p.set_defaults(func=bdev_opal_create)
+
+ def bdev_opal_get_info(args):
+ print_dict(rpc.bdev.bdev_opal_get_info(args.client,
+ bdev_name=args.bdev_name,
+ password=args.password))
+
+ p = subparsers.add_parser('bdev_opal_get_info', help='get opal locking range info for this bdev')
+ p.add_argument('-b', '--bdev-name', help='opal bdev')
+ p.add_argument('-p', '--password', help='password')
+ p.set_defaults(func=bdev_opal_get_info)
+
+ def bdev_opal_delete(args):
+ rpc.bdev.bdev_opal_delete(args.client,
+ bdev_name=args.bdev_name,
+ password=args.password)
+
+ p = subparsers.add_parser('bdev_opal_delete', help="""delete a virtual opal bdev""")
+ p.add_argument('-b', '--bdev-name', help='opal virtual bdev', required=True)
+ p.add_argument('-p', '--password', help='admin password', required=True)
+ p.set_defaults(func=bdev_opal_delete)
+
+ def bdev_opal_new_user(args):
+ rpc.bdev.bdev_opal_new_user(args.client,
+ bdev_name=args.bdev_name,
+ admin_password=args.admin_password,
+ user_id=args.user_id,
+ user_password=args.user_password)
+
+ p = subparsers.add_parser('bdev_opal_new_user', help="""Add a user to opal bdev who can set lock state for this bdev""")
+ p.add_argument('-b', '--bdev-name', help='opal bdev', required=True)
+ p.add_argument('-p', '--admin-password', help='admin password', required=True)
+ p.add_argument('-i', '--user-id', help='ID for new user', type=int, required=True)
+ p.add_argument('-u', '--user-password', help='password set for this user', required=True)
+ p.set_defaults(func=bdev_opal_new_user)
+
+ def bdev_opal_set_lock_state(args):
+ rpc.bdev.bdev_opal_set_lock_state(args.client,
+ bdev_name=args.bdev_name,
+ user_id=args.user_id,
+ password=args.password,
+ lock_state=args.lock_state)
+
+ p = subparsers.add_parser('bdev_opal_set_lock_state', help="""set lock state for an opal bdev""")
+ p.add_argument('-b', '--bdev-name', help='opal bdev', required=True)
+ p.add_argument('-i', '--user-id', help='ID of the user who want to set lock state, either admin or a user assigned to this bdev',
+ type=int, required=True)
+ p.add_argument('-p', '--password', help='password of this user', required=True)
+ p.add_argument('-l', '--lock-state', help='lock state to set, choose from {readwrite, readonly, rwlock}', required=True)
+ p.set_defaults(func=bdev_opal_set_lock_state)
+
+ # bdev_nvme_send_cmd
+ def bdev_nvme_send_cmd(args):
+ print_dict(rpc.nvme.bdev_nvme_send_cmd(args.client,
+ name=args.nvme_name,
+ cmd_type=args.cmd_type,
+ data_direction=args.data_direction,
+ cmdbuf=args.cmdbuf,
+ data=args.data,
+ metadata=args.metadata,
+ data_len=args.data_length,
+ metadata_len=args.metadata_length,
+ timeout_ms=args.timeout_ms))
+
+ p = subparsers.add_parser('bdev_nvme_send_cmd', aliases=['send_nvme_cmd'],
+ help='NVMe passthrough cmd.')
+ p.add_argument('-n', '--nvme-name', help="""Name of the operating NVMe controller""")
+ p.add_argument('-t', '--cmd-type', help="""Type of nvme cmd. Valid values are: admin, io""")
+ p.add_argument('-r', '--data-direction', help="""Direction of data transfer. Valid values are: c2h, h2c""")
+ p.add_argument('-c', '--cmdbuf', help="""NVMe command encoded by base64 urlsafe""")
+ p.add_argument('-d', '--data', help="""Data transferring to controller from host, encoded by base64 urlsafe""")
+ p.add_argument('-m', '--metadata', help="""Metadata transferring to controller from host, encoded by base64 urlsafe""")
+ p.add_argument('-D', '--data-length', help="""Data length required to transfer from controller to host""", type=int)
+ p.add_argument('-M', '--metadata-length', help="""Metadata length required to transfer from controller to host""", type=int)
+ p.add_argument('-T', '--timeout-ms',
+ help="""Command execution timeout value, in milliseconds, if 0, don't track timeout""", type=int, default=0)
+ p.set_defaults(func=bdev_nvme_send_cmd)
+
+ # Notifications
+ def notify_get_types(args):
+ print_dict(rpc.notify.notify_get_types(args.client))
+
+ p = subparsers.add_parser('notify_get_types', aliases=['get_notification_types'],
+ help='List available notifications that user can subscribe to.')
+ p.set_defaults(func=notify_get_types)
+
+ def notify_get_notifications(args):
+ ret = rpc.notify.notify_get_notifications(args.client,
+ id=args.id,
+ max=args.max)
+ print_dict(ret)
+
+ p = subparsers.add_parser('notify_get_notifications', aliases=['get_notifications'],
+ help='Get notifications')
+ p.add_argument('-i', '--id', help="""First ID to start fetching from""", type=int)
+ p.add_argument('-n', '--max', help="""Maximum number of notifications to return in response""", type=int)
+ p.set_defaults(func=notify_get_notifications)
+
+ def thread_get_stats(args):
+ print_dict(rpc.app.thread_get_stats(args.client))
+
+ p = subparsers.add_parser(
+ 'thread_get_stats', help='Display current statistics of all the threads')
+ p.set_defaults(func=thread_get_stats)
+
+ def thread_set_cpumask(args):
+ ret = rpc.app.thread_set_cpumask(args.client,
+ id=args.id,
+ cpumask=args.cpumask)
+ p = subparsers.add_parser('thread_set_cpumask',
+ help="""set the cpumask of the thread whose ID matches to the
+ specified value. The thread may be migrated to one of the specified CPUs.""")
+ p.add_argument('-i', '--id', type=int, help='thread ID')
+ p.add_argument('-m', '--cpumask', help='cpumask for this thread')
+ p.set_defaults(func=thread_set_cpumask)
+
+ def thread_get_pollers(args):
+ print_dict(rpc.app.thread_get_pollers(args.client))
+
+ p = subparsers.add_parser(
+ 'thread_get_pollers', help='Display current pollers of all the threads')
+ p.set_defaults(func=thread_get_pollers)
+
+ def thread_get_io_channels(args):
+ print_dict(rpc.app.thread_get_io_channels(args.client))
+
+ p = subparsers.add_parser(
+ 'thread_get_io_channels', help='Display current IO channels of all the threads')
+ p.set_defaults(func=thread_get_io_channels)
+
+ def env_dpdk_get_mem_stats(args):
+ print_dict(rpc.env_dpdk.env_dpdk_get_mem_stats(args.client))
+
+ p = subparsers.add_parser(
+ 'env_dpdk_get_mem_stats', help='write the dpdk memory stats to a file.')
+ p.set_defaults(func=env_dpdk_get_mem_stats)
+
+ # blobfs
+ def blobfs_detect(args):
+ print(rpc.blobfs.blobfs_detect(args.client,
+ bdev_name=args.bdev_name))
+
+ p = subparsers.add_parser('blobfs_detect', help='Detect whether a blobfs exists on bdev')
+ p.add_argument('bdev_name', help='Blockdev name to detect blobfs. Example: Malloc0.')
+ p.set_defaults(func=blobfs_detect)
+
+ def blobfs_create(args):
+ print(rpc.blobfs.blobfs_create(args.client,
+ bdev_name=args.bdev_name,
+ cluster_sz=args.cluster_sz))
+
+ p = subparsers.add_parser('blobfs_create', help='Build a blobfs on bdev')
+ p.add_argument('bdev_name', help='Blockdev name to build blobfs. Example: Malloc0.')
+ p.add_argument('-c', '--cluster_sz',
+ help="""Size of cluster in bytes (Optional). Must be multiple of 4KB page size. Default and minimal value is 1M.""")
+ p.set_defaults(func=blobfs_create)
+
+ def blobfs_mount(args):
+ print(rpc.blobfs.blobfs_mount(args.client,
+ bdev_name=args.bdev_name,
+ mountpoint=args.mountpoint))
+
+ p = subparsers.add_parser('blobfs_mount', help='Mount a blobfs on bdev to host path by FUSE')
+ p.add_argument('bdev_name', help='Blockdev name where the blobfs is. Example: Malloc0.')
+ p.add_argument('mountpoint', help='Mountpoint path in host to mount blobfs. Example: /mnt/.')
+ p.set_defaults(func=blobfs_mount)
+
+ def blobfs_set_cache_size(args):
+ print(rpc.blobfs.blobfs_set_cache_size(args.client,
+ size_in_mb=args.size_in_mb))
+
+ p = subparsers.add_parser('blobfs_set_cache_size', help='Set cache size for blobfs')
+ p.add_argument('size_in_mb', help='Cache size for blobfs in megabytes.', type=int)
+ p.set_defaults(func=blobfs_set_cache_size)
+
+ # sock
+ def sock_impl_get_options(args):
+ print_json(rpc.sock.sock_impl_get_options(args.client,
+ impl_name=args.impl))
+
+ p = subparsers.add_parser('sock_impl_get_options', help="""Get options of socket layer implementation""")
+ p.add_argument('-i', '--impl', help='Socket implementation name, e.g. posix', required=True)
+ p.set_defaults(func=sock_impl_get_options)
+
+ def sock_impl_set_options(args):
+ rpc.sock.sock_impl_set_options(args.client,
+ impl_name=args.impl,
+ recv_buf_size=args.recv_buf_size,
+ send_buf_size=args.send_buf_size,
+ enable_recv_pipe=args.enable_recv_pipe,
+ enable_zerocopy_send=args.enable_zerocopy_send)
+
+ p = subparsers.add_parser('sock_impl_set_options', help="""Set options of socket layer implementation""")
+ p.add_argument('-i', '--impl', help='Socket implementation name, e.g. posix', required=True)
+ p.add_argument('-r', '--recv-buf-size', help='Size of receive buffer on socket in bytes', type=int)
+ p.add_argument('-s', '--send-buf-size', help='Size of send buffer on socket in bytes', type=int)
+ p.add_argument('--enable-recv-pipe', help='Enable receive pipe',
+ action='store_true', dest='enable_recv_pipe')
+ p.add_argument('--disable-recv-pipe', help='Disable receive pipe',
+ action='store_false', dest='enable_recv_pipe')
+ p.add_argument('--enable-zerocopy-send', help='Enable zerocopy on send',
+ action='store_true', dest='enable_zerocopy_send')
+ p.add_argument('--disable-zerocopy-send', help='Disable zerocopy on send',
+ action='store_false', dest='enable_zerocopy_send')
+ p.set_defaults(func=sock_impl_set_options, enable_recv_pipe=None, enable_zerocopy_send=None)
+
+ def check_called_name(name):
+ if name in deprecated_aliases:
+ print("{} is deprecated, use {} instead.".format(name, deprecated_aliases[name]), file=sys.stderr)
+
+ class dry_run_client:
+ def call(self, method, params=None):
+ print("Request:\n" + json.dumps({"method": method, "params": params}, indent=2))
+
+ def null_print(arg):
+ pass
+
+ def call_rpc_func(args):
+ args.func(args)
+ check_called_name(args.called_rpc_name)
+
+ def execute_script(parser, client, fd):
+ executed_rpc = ""
+ for rpc_call in map(str.rstrip, fd):
+ if not rpc_call.strip():
+ continue
+ executed_rpc = "\n".join([executed_rpc, rpc_call])
+ args = parser.parse_args(shlex.split(rpc_call))
+ args.client = client
+ try:
+ call_rpc_func(args)
+ except JSONRPCException as ex:
+ print("Exception:")
+ print(executed_rpc.strip() + " <<<")
+ print(ex.message)
+ exit(1)
+
+ # Create temporary parser, pull out the plugin parameter, load the module, and then run the real argument parser
+ plugin_parser = argparse.ArgumentParser(add_help=False)
+ plugin_parser.add_argument('--plugin', dest='rpc_plugin', help='Module name of plugin with additional RPC commands')
+
+ rpc_module = plugin_parser.parse_known_args()[0].rpc_plugin
+ if rpc_module is not None:
+ try:
+ rpc_plugin = importlib.import_module(rpc_module)
+ try:
+ rpc_plugin.spdk_rpc_plugin_initialize(subparsers)
+ except AttributeError:
+ print("Module %s does not contain 'spdk_rpc_plugin_initialize' function" % rpc_module)
+ except ModuleNotFoundError:
+ print("Module %s not found" % rpc_module)
+
+ args = parser.parse_args()
+
+ if sys.stdin.isatty() and not hasattr(args, 'func'):
+ # No arguments and no data piped through stdin
+ parser.print_help()
+ exit(1)
+ if args.is_server:
+ for input in sys.stdin:
+ cmd = shlex.split(input)
+ try:
+ tmp_args = parser.parse_args(cmd)
+ except SystemExit as ex:
+ print("**STATUS=1", flush=True)
+ continue
+
+ try:
+ tmp_args.client = rpc.client.JSONRPCClient(
+ tmp_args.server_addr, tmp_args.port, tmp_args.timeout,
+ log_level=getattr(logging, tmp_args.verbose.upper()), conn_retries=tmp_args.conn_retries)
+ call_rpc_func(tmp_args)
+ print("**STATUS=0", flush=True)
+ except JSONRPCException as ex:
+ print(ex.message)
+ print("**STATUS=1", flush=True)
+ exit(0)
+ elif args.dry_run:
+ args.client = dry_run_client()
+ print_dict = null_print
+ print_json = null_print
+ print_array = null_print
+ else:
+ args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout,
+ log_level=getattr(logging, args.verbose.upper()),
+ conn_retries=args.conn_retries)
+ if hasattr(args, 'func'):
+ try:
+ call_rpc_func(args)
+ except JSONRPCException as ex:
+ print(ex.message)
+ exit(1)
+ else:
+ execute_script(parser, args.client, sys.stdin)
diff --git a/src/spdk/scripts/rpc/__init__.py b/src/spdk/scripts/rpc/__init__.py
new file mode 100644
index 000000000..f764d7ae5
--- /dev/null
+++ b/src/spdk/scripts/rpc/__init__.py
@@ -0,0 +1,201 @@
+import json
+import os
+import sys
+
+from io import IOBase as io
+
+from . import app
+from . import bdev
+from . import blobfs
+from . import env_dpdk
+from . import idxd
+from . import ioat
+from . import iscsi
+from . import log
+from . import lvol
+from . import nbd
+from . import net
+from . import notify
+from . import nvme
+from . import nvmf
+from . import pmem
+from . import subsystem
+from . import trace
+from . import vhost
+from . import vmd
+from . import sock
+from . import client as rpc_client
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('start_subsystem_init')
+def framework_start_init(client):
+ """Start initialization of subsystems"""
+ return client.call('framework_start_init')
+
+
+@deprecated_alias('wait_subsystem_init')
+def framework_wait_init(client):
+ """Block until subsystems have been initialized"""
+ return client.call('framework_wait_init')
+
+
+@deprecated_alias("get_rpc_methods")
+def rpc_get_methods(client, current=None, include_aliases=None):
+ """Get list of supported RPC methods.
+ Args:
+ current: Get list of RPC methods only callable in the current state.
+ include_aliases: Include aliases in the list with RPC methods.
+ """
+ params = {}
+
+ if current:
+ params['current'] = current
+ if include_aliases:
+ params['include_aliases'] = include_aliases
+
+ return client.call('rpc_get_methods', params)
+
+
+@deprecated_alias("get_spdk_version")
+def spdk_get_version(client):
+ """Get SPDK version"""
+ return client.call('spdk_get_version')
+
+
+def _json_dump(config, fd, indent):
+ if indent is None:
+ indent = 2
+ elif indent < 0:
+ indent = None
+ json.dump(config, fd, indent=indent)
+ fd.write('\n')
+
+
+def _json_load(j):
+ if j == sys.stdin or isinstance(j, io):
+ json_conf = json.load(j)
+ elif os.path.exists(j):
+ with open(j, "r") as j:
+ json_conf = json.load(j)
+ else:
+ json_conf = json.loads(j)
+ return json_conf
+
+
+def save_config(client, fd, indent=2):
+ """Write current (live) configuration of SPDK subsystems and targets to stdout.
+ Args:
+ fd: opened file descriptor where data will be saved
+ indent: Indent level. Value less than 0 mean compact mode.
+ Default indent level is 2.
+ """
+ config = {
+ 'subsystems': []
+ }
+
+ for elem in client.call('framework_get_subsystems'):
+ cfg = {
+ 'subsystem': elem['subsystem'],
+ 'config': client.call('framework_get_config', {"name": elem['subsystem']})
+ }
+ config['subsystems'].append(cfg)
+
+ _json_dump(config, fd, indent)
+
+
+def load_config(client, fd, include_aliases=False):
+ """Configure SPDK subsystems and targets using JSON RPC read from stdin.
+ Args:
+ fd: opened file descriptor where data will be taken from
+ """
+ json_config = _json_load(fd)
+
+ # remove subsystems with no config
+ subsystems = json_config['subsystems']
+ for subsystem in list(subsystems):
+ if not subsystem['config']:
+ subsystems.remove(subsystem)
+
+ # check if methods in the config file are known
+ allowed_methods = client.call('rpc_get_methods', {'include_aliases': include_aliases})
+ if not subsystems and 'framework_start_init' in allowed_methods:
+ framework_start_init(client)
+ return
+
+ for subsystem in list(subsystems):
+ config = subsystem['config']
+ for elem in list(config):
+ if 'method' not in elem or elem['method'] not in allowed_methods:
+ raise rpc_client.JSONRPCException("Unknown method was included in the config file")
+
+ while subsystems:
+ allowed_methods = client.call('rpc_get_methods', {'current': True,
+ 'include_aliases': include_aliases})
+ allowed_found = False
+
+ for subsystem in list(subsystems):
+ config = subsystem['config']
+ for elem in list(config):
+ if 'method' not in elem or elem['method'] not in allowed_methods:
+ continue
+
+ client.call(elem['method'], elem['params'])
+ config.remove(elem)
+ allowed_found = True
+
+ if not config:
+ subsystems.remove(subsystem)
+
+ if 'framework_start_init' in allowed_methods:
+ framework_start_init(client)
+ allowed_found = True
+
+ if not allowed_found:
+ break
+
+ if subsystems:
+ print("Some configs were skipped because the RPC state that can call them passed over.")
+
+
+def save_subsystem_config(client, fd, indent=2, name=None):
+ """Write current (live) configuration of SPDK subsystem to stdout.
+ Args:
+ fd: opened file descriptor where data will be saved
+ indent: Indent level. Value less than 0 mean compact mode.
+ Default is indent level 2.
+ """
+ cfg = {
+ 'subsystem': name,
+ 'config': client.call('framework_get_config', {"name": name})
+ }
+
+ _json_dump(cfg, fd, indent)
+
+
+def load_subsystem_config(client, fd):
+ """Configure SPDK subsystem using JSON RPC read from stdin.
+ Args:
+ fd: opened file descriptor where data will be taken from
+ """
+ subsystem = _json_load(fd)
+
+ if not subsystem['config']:
+ return
+
+ allowed_methods = client.call('rpc_get_methods')
+ config = subsystem['config']
+ for elem in list(config):
+ if 'method' not in elem or elem['method'] not in allowed_methods:
+ raise rpc_client.JSONRPCException("Unknown method was included in the config file")
+
+ allowed_methods = client.call('rpc_get_methods', {'current': True})
+ for elem in list(config):
+ if 'method' not in elem or elem['method'] not in allowed_methods:
+ continue
+
+ client.call(elem['method'], elem['params'])
+ config.remove(elem)
+
+ if config:
+ print("Some configs were skipped because they cannot be called in the current RPC state.")
diff --git a/src/spdk/scripts/rpc/app.py b/src/spdk/scripts/rpc/app.py
new file mode 100644
index 000000000..9412de17d
--- /dev/null
+++ b/src/spdk/scripts/rpc/app.py
@@ -0,0 +1,78 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('kill_instance')
+def spdk_kill_instance(client, sig_name):
+ """Send a signal to the SPDK process.
+
+ Args:
+ sig_name: signal to send ("SIGINT", "SIGTERM", "SIGQUIT", "SIGHUP", or "SIGKILL")
+ """
+ params = {'sig_name': sig_name}
+ return client.call('spdk_kill_instance', params)
+
+
+@deprecated_alias('context_switch_monitor')
+def framework_monitor_context_switch(client, enabled=None):
+ """Query or set state of context switch monitoring.
+
+ Args:
+ enabled: True to enable monitoring; False to disable monitoring; None to query (optional)
+
+ Returns:
+ Current context switch monitoring state (after applying enabled flag).
+ """
+ params = {}
+ if enabled is not None:
+ params['enabled'] = enabled
+ return client.call('framework_monitor_context_switch', params)
+
+
+def framework_get_reactors(client):
+ """Query list of all reactors.
+
+ Returns:
+ List of all reactors.
+ """
+ return client.call('framework_get_reactors')
+
+
+def thread_get_stats(client):
+ """Query threads statistics.
+
+ Returns:
+ Current threads statistics.
+ """
+ return client.call('thread_get_stats')
+
+
+def thread_set_cpumask(client, id, cpumask):
+ """Set the cpumask of the thread whose ID matches to the specified value.
+
+ Args:
+ id: thread ID
+ cpumask: cpumask for this thread
+
+ Returns:
+ True or False
+ """
+ params = {'id': id, 'cpumask': cpumask}
+ return client.call('thread_set_cpumask', params)
+
+
+def thread_get_pollers(client):
+ """Query current pollers.
+
+ Returns:
+ Current pollers.
+ """
+ return client.call('thread_get_pollers')
+
+
+def thread_get_io_channels(client):
+ """Query current IO channels.
+
+ Returns:
+ Current IO channels.
+ """
+ return client.call('thread_get_io_channels')
diff --git a/src/spdk/scripts/rpc/bdev.py b/src/spdk/scripts/rpc/bdev.py
new file mode 100644
index 000000000..8c669c0b2
--- /dev/null
+++ b/src/spdk/scripts/rpc/bdev.py
@@ -0,0 +1,1105 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('set_bdev_options')
+def bdev_set_options(client, bdev_io_pool_size=None, bdev_io_cache_size=None, bdev_auto_examine=None):
+ """Set parameters for the bdev subsystem.
+
+ Args:
+ bdev_io_pool_size: number of bdev_io structures in shared buffer pool (optional)
+ bdev_io_cache_size: maximum number of bdev_io structures cached per thread (optional)
+ bdev_auto_examine: if set to false, the bdev layer will not examine every disks automatically (optional)
+ """
+ params = {}
+
+ if bdev_io_pool_size:
+ params['bdev_io_pool_size'] = bdev_io_pool_size
+ if bdev_io_cache_size:
+ params['bdev_io_cache_size'] = bdev_io_cache_size
+ if bdev_auto_examine is not None:
+ params["bdev_auto_examine"] = bdev_auto_examine
+
+ return client.call('bdev_set_options', params)
+
+
+@deprecated_alias('construct_compress_bdev')
+def bdev_compress_create(client, base_bdev_name, pm_path, lb_size):
+ """Construct a compress virtual block device.
+
+ Args:
+ base_bdev_name: name of the underlying base bdev
+ pm_path: path to persistent memory
+ lb_size: logical block size for the compressed vol in bytes. Must be 4K or 512.
+
+ Returns:
+ Name of created virtual block device.
+ """
+ params = {'base_bdev_name': base_bdev_name, 'pm_path': pm_path, 'lb_size': lb_size}
+
+ return client.call('bdev_compress_create', params)
+
+
+@deprecated_alias('delete_compress_bdev')
+def bdev_compress_delete(client, name):
+ """Delete compress virtual block device.
+
+ Args:
+ name: name of compress vbdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_compress_delete', params)
+
+
+@deprecated_alias('set_compress_pmd')
+def compress_set_pmd(client, pmd):
+ """Set pmd options for the bdev compress.
+
+ Args:
+ pmd: 0 = auto-select, 1 = QAT, 2 = ISAL
+ """
+ params = {'pmd': pmd}
+
+ return client.call('compress_set_pmd', params)
+
+
+def bdev_compress_get_orphans(client, name=None):
+ """Get a list of comp bdevs that do not have a pmem file (aka orphaned).
+
+ Args:
+ name: comp bdev name to query (optional; if omitted, query all comp bdevs)
+
+ Returns:
+ List of comp bdev names.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('bdev_compress_get_orphans', params)
+
+
+@deprecated_alias('construct_crypto_bdev')
+def bdev_crypto_create(client, base_bdev_name, name, crypto_pmd, key, cipher=None, key2=None):
+ """Construct a crypto virtual block device.
+
+ Args:
+ base_bdev_name: name of the underlying base bdev
+ name: name for the crypto vbdev
+ crypto_pmd: name of of the DPDK crypto driver to use
+ key: key
+
+ Returns:
+ Name of created virtual block device.
+ """
+ params = {'base_bdev_name': base_bdev_name, 'name': name, 'crypto_pmd': crypto_pmd, 'key': key}
+ if cipher:
+ params['cipher'] = cipher
+ if key2:
+ params['key2'] = key2
+ return client.call('bdev_crypto_create', params)
+
+
+@deprecated_alias('delete_crypto_bdev')
+def bdev_crypto_delete(client, name):
+ """Delete crypto virtual block device.
+
+ Args:
+ name: name of crypto vbdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_crypto_delete', params)
+
+
+@deprecated_alias('construct_ocf_bdev')
+def bdev_ocf_create(client, name, mode, cache_bdev_name, core_bdev_name):
+ """Add an OCF block device
+
+ Args:
+ name: name of constructed OCF bdev
+ mode: OCF cache mode: {'wb', 'wt', 'pt', 'wa', 'wi', 'wo'}
+ cache_bdev_name: name of underlying cache bdev
+ core_bdev_name: name of underlying core bdev
+
+ Returns:
+ Name of created block device
+ """
+ params = {'name': name, 'mode': mode, 'cache_bdev_name': cache_bdev_name, 'core_bdev_name': core_bdev_name}
+
+ return client.call('bdev_ocf_create', params)
+
+
+@deprecated_alias('delete_ocf_bdev')
+def bdev_ocf_delete(client, name):
+ """Delete an OCF device
+
+ Args:
+ name: name of OCF bdev
+
+ """
+ params = {'name': name}
+
+ return client.call('bdev_ocf_delete', params)
+
+
+@deprecated_alias('get_ocf_stats')
+def bdev_ocf_get_stats(client, name):
+ """Get statistics of chosen OCF block device
+
+ Args:
+ name: name of OCF bdev
+
+ Returns:
+ Statistics as json object
+ """
+ params = {'name': name}
+
+ return client.call('bdev_ocf_get_stats', params)
+
+
+@deprecated_alias('get_ocf_stats')
+def bdev_ocf_get_bdevs(client, name=None):
+ """Get list of OCF devices including unregistered ones
+
+ Args:
+ name: name of OCF vbdev or name of cache device or name of core device (optional)
+
+ Returns:
+ Array of OCF devices with their current status
+ """
+ params = None
+ if name:
+ params = {'name': name}
+ return client.call('bdev_ocf_get_bdevs', params)
+
+
+@deprecated_alias('construct_malloc_bdev')
+def bdev_malloc_create(client, num_blocks, block_size, name=None, uuid=None):
+ """Construct a malloc block device.
+
+ Args:
+ num_blocks: size of block device in blocks
+ block_size: block size of device; must be a power of 2 and at least 512
+ name: name of block device (optional)
+ uuid: UUID of block device (optional)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {'num_blocks': num_blocks, 'block_size': block_size}
+ if name:
+ params['name'] = name
+ if uuid:
+ params['uuid'] = uuid
+ return client.call('bdev_malloc_create', params)
+
+
+@deprecated_alias('delete_malloc_bdev')
+def bdev_malloc_delete(client, name):
+ """Delete malloc block device.
+
+ Args:
+ bdev_name: name of malloc bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_malloc_delete', params)
+
+
+@deprecated_alias('construct_null_bdev')
+def bdev_null_create(client, num_blocks, block_size, name, uuid=None, md_size=None,
+ dif_type=None, dif_is_head_of_md=None):
+ """Construct a null block device.
+
+ Args:
+ num_blocks: size of block device in blocks
+ block_size: block size of device; data part size must be a power of 2 and at least 512
+ name: name of block device
+ uuid: UUID of block device (optional)
+ md_size: metadata size of device (optional)
+ dif_type: protection information type (optional)
+ dif_is_head_of_md: protection information is in the first 8 bytes of metadata (optional)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {'name': name, 'num_blocks': num_blocks,
+ 'block_size': block_size}
+ if uuid:
+ params['uuid'] = uuid
+ if md_size:
+ params['md_size'] = md_size
+ if dif_type:
+ params['dif_type'] = dif_type
+ if dif_is_head_of_md:
+ params['dif_is_head_of_md'] = dif_is_head_of_md
+ return client.call('bdev_null_create', params)
+
+
+@deprecated_alias('delete_null_bdev')
+def bdev_null_delete(client, name):
+ """Remove null bdev from the system.
+
+ Args:
+ name: name of null bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_null_delete', params)
+
+
+@deprecated_alias('get_raid_bdevs')
+def bdev_raid_get_bdevs(client, category):
+ """Get list of raid bdevs based on category
+
+ Args:
+ category: any one of all or online or configuring or offline
+
+ Returns:
+ List of raid bdev names
+ """
+ params = {'category': category}
+ return client.call('bdev_raid_get_bdevs', params)
+
+
+@deprecated_alias('construct_raid_bdev')
+def bdev_raid_create(client, name, raid_level, base_bdevs, strip_size=None, strip_size_kb=None):
+ """Create raid bdev. Either strip size arg will work but one is required.
+
+ Args:
+ name: user defined raid bdev name
+ strip_size (deprecated): strip size of raid bdev in KB, supported values like 8, 16, 32, 64, 128, 256, etc
+ strip_size_kb: strip size of raid bdev in KB, supported values like 8, 16, 32, 64, 128, 256, etc
+ raid_level: raid level of raid bdev, supported values 0
+ base_bdevs: Space separated names of Nvme bdevs in double quotes, like "Nvme0n1 Nvme1n1 Nvme2n1"
+
+ Returns:
+ None
+ """
+ params = {'name': name, 'raid_level': raid_level, 'base_bdevs': base_bdevs}
+
+ if strip_size:
+ params['strip_size'] = strip_size
+
+ if strip_size_kb:
+ params['strip_size_kb'] = strip_size_kb
+
+ return client.call('bdev_raid_create', params)
+
+
+@deprecated_alias('destroy_raid_bdev')
+def bdev_raid_delete(client, name):
+ """Delete raid bdev
+
+ Args:
+ name: raid bdev name
+
+ Returns:
+ None
+ """
+ params = {'name': name}
+ return client.call('bdev_raid_delete', params)
+
+
+@deprecated_alias('construct_aio_bdev')
+def bdev_aio_create(client, filename, name, block_size=None):
+ """Construct a Linux AIO block device.
+
+ Args:
+ filename: path to device or file (ex: /dev/sda)
+ name: name of block device
+ block_size: block size of device (optional; autodetected if omitted)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {'name': name,
+ 'filename': filename}
+
+ if block_size:
+ params['block_size'] = block_size
+
+ return client.call('bdev_aio_create', params)
+
+
+@deprecated_alias('delete_aio_bdev')
+def bdev_aio_delete(client, name):
+ """Remove aio bdev from the system.
+
+ Args:
+ bdev_name: name of aio bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_aio_delete', params)
+
+
+def bdev_uring_create(client, filename, name, block_size=None):
+ """Create a bdev with Linux io_uring backend.
+
+ Args:
+ filename: path to device or file (ex: /dev/nvme0n1)
+ name: name of bdev
+ block_size: block size of device (optional; autodetected if omitted)
+
+ Returns:
+ Name of created bdev.
+ """
+ params = {'name': name,
+ 'filename': filename}
+
+ if block_size:
+ params['block_size'] = block_size
+
+ return client.call('bdev_uring_create', params)
+
+
+def bdev_uring_delete(client, name):
+ """Delete a uring bdev.
+
+ Args:
+ name: name of uring bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_uring_delete', params)
+
+
+@deprecated_alias('set_bdev_nvme_options')
+def bdev_nvme_set_options(client, action_on_timeout=None, timeout_us=None, retry_count=None,
+ arbitration_burst=None, low_priority_weight=None,
+ medium_priority_weight=None, high_priority_weight=None,
+ nvme_adminq_poll_period_us=None, nvme_ioq_poll_period_us=None, io_queue_requests=None,
+ delay_cmd_submit=None):
+ """Set options for the bdev nvme. This is startup command.
+
+ Args:
+ action_on_timeout: action to take on command time out. Valid values are: none, reset, abort (optional)
+ timeout_us: Timeout for each command, in microseconds. If 0, don't track timeouts (optional)
+ retry_count: The number of attempts per I/O when an I/O fails (optional)
+ arbitration_burst: The value is expressed as a power of two (optional)
+ low_prioity_weight: The number of commands that may be executed from the low priority queue at one time (optional)
+ medium_prioity_weight: The number of commands that may be executed from the medium priority queue at one time (optional)
+ high_prioity_weight: The number of commands that may be executed from the high priority queue at one time (optional)
+ nvme_adminq_poll_period_us: How often the admin queue is polled for asynchronous events in microseconds (optional)
+ nvme_ioq_poll_period_us: How often to poll I/O queues for completions in microseconds (optional)
+ io_queue_requests: The number of requests allocated for each NVMe I/O queue. Default: 512 (optional)
+ delay_cmd_submit: Enable delayed NVMe command submission to allow batching of multiple commands (optional)
+ """
+ params = {}
+
+ if action_on_timeout:
+ params['action_on_timeout'] = action_on_timeout
+
+ if timeout_us:
+ params['timeout_us'] = timeout_us
+
+ if retry_count:
+ params['retry_count'] = retry_count
+
+ if arbitration_burst:
+ params['arbitration_burst'] = arbitration_burst
+
+ if low_priority_weight:
+ params['low_priority_weight'] = low_priority_weight
+
+ if medium_priority_weight:
+ params['medium_priority_weight'] = medium_priority_weight
+
+ if high_priority_weight:
+ params['high_priority_weight'] = high_priority_weight
+
+ if nvme_adminq_poll_period_us:
+ params['nvme_adminq_poll_period_us'] = nvme_adminq_poll_period_us
+
+ if nvme_ioq_poll_period_us:
+ params['nvme_ioq_poll_period_us'] = nvme_ioq_poll_period_us
+
+ if io_queue_requests:
+ params['io_queue_requests'] = io_queue_requests
+
+ if delay_cmd_submit is not None:
+ params['delay_cmd_submit'] = delay_cmd_submit
+
+ return client.call('bdev_nvme_set_options', params)
+
+
+@deprecated_alias('set_bdev_nvme_hotplug')
+def bdev_nvme_set_hotplug(client, enable, period_us=None):
+ """Set options for the bdev nvme. This is startup command.
+
+ Args:
+ enable: True to enable hotplug, False to disable.
+ period_us: how often the hotplug is processed for insert and remove events. Set 0 to reset to default. (optional)
+ """
+ params = {'enable': enable}
+
+ if period_us:
+ params['period_us'] = period_us
+
+ return client.call('bdev_nvme_set_hotplug', params)
+
+
+@deprecated_alias('construct_nvme_bdev')
+def bdev_nvme_attach_controller(client, name, trtype, traddr, adrfam=None, trsvcid=None,
+ priority=None, subnqn=None, hostnqn=None, hostaddr=None,
+ hostsvcid=None, prchk_reftag=None, prchk_guard=None):
+ """Construct block device for each NVMe namespace in the attached controller.
+
+ Args:
+ name: bdev name prefix; "n" + namespace ID will be appended to create unique names
+ trtype: transport type ("PCIe", "RDMA")
+ traddr: transport address (PCI BDF or IP address)
+ adrfam: address family ("IPv4", "IPv6", "IB", or "FC") (optional for PCIe)
+ trsvcid: transport service ID (port number for IP-based addresses; optional for PCIe)
+ priority: transport connection priority (Sock priority for TCP-based transports; optional)
+ subnqn: subsystem NQN to connect to (optional)
+ hostnqn: NQN to connect from (optional)
+ hostaddr: host transport address (IP address for IP-based transports, NULL for PCIe or FC; optional)
+ hostsvcid: host transport service ID (port number for IP-based transports, NULL for PCIe or FC; optional)
+ prchk_reftag: Enable checking of PI reference tag for I/O processing (optional)
+ prchk_guard: Enable checking of PI guard for I/O processing (optional)
+
+ Returns:
+ Names of created block devices.
+ """
+ params = {'name': name,
+ 'trtype': trtype,
+ 'traddr': traddr}
+
+ if hostnqn:
+ params['hostnqn'] = hostnqn
+
+ if hostaddr:
+ params['hostaddr'] = hostaddr
+
+ if hostsvcid:
+ params['hostsvcid'] = hostsvcid
+
+ if adrfam:
+ params['adrfam'] = adrfam
+
+ if trsvcid:
+ params['trsvcid'] = trsvcid
+
+ if priority:
+ params['priority'] = priority
+
+ if subnqn:
+ params['subnqn'] = subnqn
+
+ if prchk_reftag:
+ params['prchk_reftag'] = prchk_reftag
+
+ if prchk_guard:
+ params['prchk_guard'] = prchk_guard
+
+ return client.call('bdev_nvme_attach_controller', params)
+
+
+@deprecated_alias('delete_nvme_controller')
+def bdev_nvme_detach_controller(client, name):
+ """Detach NVMe controller and delete any associated bdevs.
+
+ Args:
+ name: controller name
+ """
+
+ params = {'name': name}
+ return client.call('bdev_nvme_detach_controller', params)
+
+
+def bdev_nvme_cuse_register(client, name):
+ """Register CUSE devices on NVMe controller.
+
+ Args:
+ name: Name of the operating NVMe controller
+ """
+ params = {'name': name}
+
+ return client.call('bdev_nvme_cuse_register', params)
+
+
+def bdev_nvme_cuse_unregister(client, name):
+ """Unregister CUSE devices on NVMe controller.
+
+ Args:
+ name: Name of the operating NVMe controller
+ """
+ params = {'name': name}
+
+ return client.call('bdev_nvme_cuse_unregister', params)
+
+
+def bdev_zone_block_create(client, name, base_bdev, zone_capacity, optimal_open_zones):
+ """Creates a virtual zone device on top of existing non-zoned bdev.
+
+ Args:
+ name: Zone device name
+ base_bdev: Base Nvme bdev name
+ zone_capacity: Surfaced zone capacity in blocks
+ optimal_open_zones: Number of zones required to reach optimal write speed (optional, default: 1)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {'name': name,
+ 'base_bdev': base_bdev,
+ 'zone_capacity': zone_capacity,
+ 'optimal_open_zones': optimal_open_zones}
+
+ return client.call('bdev_zone_block_create', params)
+
+
+def bdev_zone_block_delete(client, name):
+ """Remove block zone bdev from the system.
+
+ Args:
+ name: name of block zone bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_zone_block_delete', params)
+
+
+@deprecated_alias('construct_rbd_bdev')
+def bdev_rbd_create(client, pool_name, rbd_name, block_size, name=None, user=None, config=None):
+ """Create a Ceph RBD block device.
+
+ Args:
+ pool_name: Ceph RBD pool name
+ rbd_name: Ceph RBD image name
+ block_size: block size of RBD volume
+ name: name of block device (optional)
+ user: Ceph user name (optional)
+ config: map of config keys to values (optional)
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'pool_name': pool_name,
+ 'rbd_name': rbd_name,
+ 'block_size': block_size,
+ }
+
+ if name:
+ params['name'] = name
+ if user is not None:
+ params['user_id'] = user
+ if config is not None:
+ params['config'] = config
+
+ return client.call('bdev_rbd_create', params)
+
+
+@deprecated_alias('delete_rbd_bdev')
+def bdev_rbd_delete(client, name):
+ """Remove rbd bdev from the system.
+
+ Args:
+ name: name of rbd bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_rbd_delete', params)
+
+
+def bdev_rbd_resize(client, name, new_size):
+ """Resize rbd bdev in the system.
+
+ Args:
+ name: name of rbd bdev to resize
+ new_size: new bdev size of resize operation. The unit is MiB
+ """
+ params = {
+ 'name': name,
+ 'new_size': new_size,
+ }
+ return client.call('bdev_rbd_resize', params)
+
+
+@deprecated_alias('construct_error_bdev')
+def bdev_error_create(client, base_name):
+ """Construct an error injection block device.
+
+ Args:
+ base_name: base bdev name
+ """
+ params = {'base_name': base_name}
+ return client.call('bdev_error_create', params)
+
+
+def bdev_delay_create(client, base_bdev_name, name, avg_read_latency, p99_read_latency, avg_write_latency, p99_write_latency):
+ """Construct a delay block device.
+
+ Args:
+ base_bdev_name: name of the existing bdev
+ name: name of block device
+ avg_read_latency: complete 99% of read ops with this delay
+ p99_read_latency: complete 1% of read ops with this delay
+ avg_write_latency: complete 99% of write ops with this delay
+ p99_write_latency: complete 1% of write ops with this delay
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'base_bdev_name': base_bdev_name,
+ 'name': name,
+ 'avg_read_latency': avg_read_latency,
+ 'p99_read_latency': p99_read_latency,
+ 'avg_write_latency': avg_write_latency,
+ 'p99_write_latency': p99_write_latency,
+ }
+ return client.call('bdev_delay_create', params)
+
+
+def bdev_delay_delete(client, name):
+ """Remove delay bdev from the system.
+
+ Args:
+ name: name of delay bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_delay_delete', params)
+
+
+def bdev_delay_update_latency(client, delay_bdev_name, latency_type, latency_us):
+ """Update the latency value for a delay block device
+
+ Args:
+ delay_bdev_name: name of the delay bdev
+ latency_type: 'one of: avg_read, avg_write, p99_read, p99_write. No other values accepted.'
+ latency_us: 'new latency value.'
+
+ Returns:
+ True if successful, or a specific error otherwise.
+ """
+ params = {
+ 'delay_bdev_name': delay_bdev_name,
+ 'latency_type': latency_type,
+ 'latency_us': latency_us,
+ }
+ return client.call('bdev_delay_update_latency', params)
+
+
+@deprecated_alias('delete_error_bdev')
+def bdev_error_delete(client, name):
+ """Remove error bdev from the system.
+
+ Args:
+ bdev_name: name of error bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_error_delete', params)
+
+
+@deprecated_alias('construct_iscsi_bdev')
+def bdev_iscsi_create(client, name, url, initiator_iqn):
+ """Construct an iSCSI block device.
+
+ Args:
+ name: name of block device
+ url: iSCSI URL
+ initiator_iqn: IQN name to be used by initiator
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'name': name,
+ 'url': url,
+ 'initiator_iqn': initiator_iqn,
+ }
+ return client.call('bdev_iscsi_create', params)
+
+
+@deprecated_alias('delete_iscsi_bdev')
+def bdev_iscsi_delete(client, name):
+ """Remove iSCSI bdev from the system.
+
+ Args:
+ bdev_name: name of iSCSI bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_iscsi_delete', params)
+
+
+@deprecated_alias('construct_pmem_bdev')
+def bdev_pmem_create(client, pmem_file, name):
+ """Construct a libpmemblk block device.
+
+ Args:
+ pmem_file: path to pmemblk pool file
+ name: name of block device
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'pmem_file': pmem_file,
+ 'name': name
+ }
+ return client.call('bdev_pmem_create', params)
+
+
+@deprecated_alias('delete_pmem_bdev')
+def bdev_pmem_delete(client, name):
+ """Remove pmem bdev from the system.
+
+ Args:
+ name: name of pmem bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_pmem_delete', params)
+
+
+@deprecated_alias('construct_passthru_bdev')
+def bdev_passthru_create(client, base_bdev_name, name):
+ """Construct a pass-through block device.
+
+ Args:
+ base_bdev_name: name of the existing bdev
+ name: name of block device
+
+ Returns:
+ Name of created block device.
+ """
+ params = {
+ 'base_bdev_name': base_bdev_name,
+ 'name': name,
+ }
+ return client.call('bdev_passthru_create', params)
+
+
+@deprecated_alias('delete_passthru_bdev')
+def bdev_passthru_delete(client, name):
+ """Remove pass through bdev from the system.
+
+ Args:
+ name: name of pass through bdev to delete
+ """
+ params = {'name': name}
+ return client.call('bdev_passthru_delete', params)
+
+
+def bdev_opal_create(client, nvme_ctrlr_name, nsid, locking_range_id, range_start, range_length, password):
+ """Create opal virtual block devices from a base nvme bdev.
+
+ Args:
+ nvme_ctrlr_name: name of the nvme ctrlr
+ nsid: namespace ID of nvme ctrlr
+ locking_range_id: locking range ID corresponding to this virtual bdev
+ range_start: start address of this locking range
+ range_length: length of this locking range
+ password: admin password of base nvme bdev
+
+ Returns:
+ Name of the new created block devices.
+ """
+ params = {
+ 'nvme_ctrlr_name': nvme_ctrlr_name,
+ 'nsid': nsid,
+ 'locking_range_id': locking_range_id,
+ 'range_start': range_start,
+ 'range_length': range_length,
+ 'password': password,
+ }
+
+ return client.call('bdev_opal_create', params)
+
+
+def bdev_opal_get_info(client, bdev_name, password):
+ """Get opal locking range info.
+
+ Args:
+ bdev_name: name of opal vbdev to get info
+ password: admin password
+
+ Returns:
+ Locking range info.
+ """
+ params = {
+ 'bdev_name': bdev_name,
+ 'password': password,
+ }
+
+ return client.call('bdev_opal_get_info', params)
+
+
+def bdev_opal_delete(client, bdev_name, password):
+ """Delete opal virtual bdev from the system.
+
+ Args:
+ bdev_name: name of opal vbdev to delete
+ password: admin password of base nvme bdev
+ """
+ params = {
+ 'bdev_name': bdev_name,
+ 'password': password,
+ }
+
+ return client.call('bdev_opal_delete', params)
+
+
+def bdev_opal_new_user(client, bdev_name, admin_password, user_id, user_password):
+ """Add a user to opal bdev who can set lock state for this bdev.
+
+ Args:
+ bdev_name: name of opal vbdev
+ admin_password: admin password
+ user_id: ID of the user who will be added to this opal bdev
+ user_password: password set for this user
+ """
+ params = {
+ 'bdev_name': bdev_name,
+ 'admin_password': admin_password,
+ 'user_id': user_id,
+ 'user_password': user_password,
+ }
+
+ return client.call('bdev_opal_new_user', params)
+
+
+def bdev_opal_set_lock_state(client, bdev_name, user_id, password, lock_state):
+ """set lock state for an opal bdev.
+
+ Args:
+ bdev_name: name of opal vbdev
+ user_id: ID of the user who will set lock state
+ password: password of the user
+ lock_state: lock state to set
+ """
+ params = {
+ 'bdev_name': bdev_name,
+ 'user_id': user_id,
+ 'password': password,
+ 'lock_state': lock_state,
+ }
+
+ return client.call('bdev_opal_set_lock_state', params)
+
+
+@deprecated_alias('construct_split_vbdev')
+def bdev_split_create(client, base_bdev, split_count, split_size_mb=None):
+ """Create split block devices from a base bdev.
+
+ Args:
+ base_bdev: name of bdev to split
+ split_count: number of split bdevs to create
+ split_size_mb: size of each split volume in MiB (optional)
+
+ Returns:
+ List of created block devices.
+ """
+ params = {
+ 'base_bdev': base_bdev,
+ 'split_count': split_count,
+ }
+ if split_size_mb:
+ params['split_size_mb'] = split_size_mb
+
+ return client.call('bdev_split_create', params)
+
+
+@deprecated_alias('destruct_split_vbdev')
+def bdev_split_delete(client, base_bdev):
+ """Delete split block devices.
+
+ Args:
+ base_bdev: name of previously split bdev
+ """
+ params = {
+ 'base_bdev': base_bdev,
+ }
+
+ return client.call('bdev_split_delete', params)
+
+
+@deprecated_alias('construct_ftl_bdev')
+def bdev_ftl_create(client, name, base_bdev, **kwargs):
+ """Construct FTL bdev
+
+ Args:
+ name: name of the bdev
+ base_bdev: name of the base bdev
+ kwargs: optional parameters
+ """
+ params = {'name': name,
+ 'base_bdev': base_bdev}
+ for key, value in kwargs.items():
+ if value is not None:
+ params[key] = value
+
+ return client.call('bdev_ftl_create', params)
+
+
+@deprecated_alias('delete_ftl_bdev')
+def bdev_ftl_delete(client, name):
+ """Delete FTL bdev
+
+ Args:
+ name: name of the bdev
+ """
+ params = {'name': name}
+
+ return client.call('bdev_ftl_delete', params)
+
+
+def bdev_ocssd_create(client, ctrlr_name, bdev_name, nsid=None, range=None):
+ """Creates Open Channel zoned bdev on specified Open Channel controller
+
+ Args:
+ ctrlr_name: name of the OC NVMe controller
+ bdev_name: name of the bdev to create
+ nsid: namespace ID
+ range: parallel unit range
+ """
+ params = {'ctrlr_name': ctrlr_name,
+ 'bdev_name': bdev_name}
+
+ if nsid is not None:
+ params['nsid'] = nsid
+
+ if range is not None:
+ params['range'] = range
+
+ return client.call('bdev_ocssd_create', params)
+
+
+def bdev_ocssd_delete(client, name):
+ """Deletes Open Channel bdev
+
+ Args:
+ name: name of the bdev
+ """
+ params = {'name': name}
+
+ return client.call('bdev_ocssd_delete', params)
+
+
+@deprecated_alias('get_bdevs')
+def bdev_get_bdevs(client, name=None):
+ """Get information about block devices.
+
+ Args:
+ name: bdev name to query (optional; if omitted, query all bdevs)
+
+ Returns:
+ List of bdev information objects.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('bdev_get_bdevs', params)
+
+
+@deprecated_alias('get_bdevs_iostat')
+def bdev_get_iostat(client, name=None):
+ """Get I/O statistics for block devices.
+
+ Args:
+ name: bdev name to query (optional; if omitted, query all bdevs)
+
+ Returns:
+ I/O statistics for the requested block devices.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('bdev_get_iostat', params)
+
+
+@deprecated_alias('enable_bdev_histogram')
+def bdev_enable_histogram(client, name, enable):
+ """Control whether histogram is enabled for specified bdev.
+
+ Args:
+ bdev_name: name of bdev
+ """
+ params = {'name': name, "enable": enable}
+ return client.call('bdev_enable_histogram', params)
+
+
+@deprecated_alias('get_bdev_histogram')
+def bdev_get_histogram(client, name):
+ """Get histogram for specified bdev.
+
+ Args:
+ bdev_name: name of bdev
+ """
+ params = {'name': name}
+ return client.call('bdev_get_histogram', params)
+
+
+@deprecated_alias('bdev_inject_error')
+def bdev_error_inject_error(client, name, io_type, error_type, num=1):
+ """Inject an error via an error bdev.
+
+ Args:
+ name: name of error bdev
+ io_type: one of "clear", "read", "write", "unmap", "flush", or "all"
+ error_type: one of "failure" or "pending"
+ num: number of commands to fail
+ """
+ params = {
+ 'name': name,
+ 'io_type': io_type,
+ 'error_type': error_type,
+ 'num': num,
+ }
+
+ return client.call('bdev_error_inject_error', params)
+
+
+@deprecated_alias('set_bdev_qd_sampling_period')
+def bdev_set_qd_sampling_period(client, name, period):
+ """Enable queue depth tracking on a specified bdev.
+
+ Args:
+ name: name of a bdev on which to track queue depth.
+ period: period (in microseconds) at which to update the queue depth reading. If set to 0, polling will be disabled.
+ """
+
+ params = {}
+ params['name'] = name
+ params['period'] = period
+ return client.call('bdev_set_qd_sampling_period', params)
+
+
+@deprecated_alias('set_bdev_qos_limit')
+def bdev_set_qos_limit(
+ client,
+ name,
+ rw_ios_per_sec=None,
+ rw_mbytes_per_sec=None,
+ r_mbytes_per_sec=None,
+ w_mbytes_per_sec=None):
+ """Set QoS rate limit on a block device.
+
+ Args:
+ name: name of block device
+ rw_ios_per_sec: R/W IOs per second limit (>=10000, example: 20000). 0 means unlimited.
+ rw_mbytes_per_sec: R/W megabytes per second limit (>=10, example: 100). 0 means unlimited.
+ r_mbytes_per_sec: Read megabytes per second limit (>=10, example: 100). 0 means unlimited.
+ w_mbytes_per_sec: Write megabytes per second limit (>=10, example: 100). 0 means unlimited.
+ """
+ params = {}
+ params['name'] = name
+ if rw_ios_per_sec is not None:
+ params['rw_ios_per_sec'] = rw_ios_per_sec
+ if rw_mbytes_per_sec is not None:
+ params['rw_mbytes_per_sec'] = rw_mbytes_per_sec
+ if r_mbytes_per_sec is not None:
+ params['r_mbytes_per_sec'] = r_mbytes_per_sec
+ if w_mbytes_per_sec is not None:
+ params['w_mbytes_per_sec'] = w_mbytes_per_sec
+ return client.call('bdev_set_qos_limit', params)
+
+
+@deprecated_alias('apply_firmware')
+def bdev_nvme_apply_firmware(client, bdev_name, filename):
+ """Download and commit firmware to NVMe device.
+
+ Args:
+ bdev_name: name of NVMe block device
+ filename: filename of the firmware to download
+ """
+ params = {
+ 'filename': filename,
+ 'bdev_name': bdev_name,
+ }
+ return client.call('bdev_nvme_apply_firmware', params)
diff --git a/src/spdk/scripts/rpc/blobfs.py b/src/spdk/scripts/rpc/blobfs.py
new file mode 100644
index 000000000..a064afecf
--- /dev/null
+++ b/src/spdk/scripts/rpc/blobfs.py
@@ -0,0 +1,57 @@
+def blobfs_detect(client, bdev_name):
+ """Detect whether a blobfs exists on bdev.
+
+ Args:
+ bdev_name: block device name to detect blobfs
+
+ Returns:
+ True if a blobfs exists on the bdev; False otherwise.
+ """
+ params = {
+ 'bdev_name': bdev_name
+ }
+ return client.call('blobfs_detect', params)
+
+
+def blobfs_create(client, bdev_name, cluster_sz=None):
+ """Build blobfs on bdev.
+
+ Args:
+ bdev_name: block device name to build blobfs
+ cluster_sz: Size of cluster in bytes (Optional). Must be multiple of 4KB page size. Default and minimal value is 1M.
+ """
+ params = {
+ 'bdev_name': bdev_name
+ }
+ if cluster_sz:
+ params['cluster_sz'] = cluster_sz
+ return client.call('blobfs_create', params)
+
+
+def blobfs_mount(client, bdev_name, mountpoint):
+ """Mount blobfs on bdev by FUSE.
+
+ Args:
+ bdev_name: block device name where the blobfs is
+ mountpoint: Mountpoint path in host to mount blobfs
+ """
+ params = {
+ 'bdev_name': bdev_name,
+ 'mountpoint': mountpoint
+ }
+ return client.call('blobfs_mount', params)
+
+
+def blobfs_set_cache_size(client, size_in_mb):
+ """Set cache size for the blobstore filesystem.
+
+ Args:
+ size_in_mb: Cache size in megabytes
+
+ Returns:
+ True if cache size is set successfully; False if failed to set.
+ """
+ params = {
+ 'size_in_mb': size_in_mb
+ }
+ return client.call('blobfs_set_cache_size', params)
diff --git a/src/spdk/scripts/rpc/client.py b/src/spdk/scripts/rpc/client.py
new file mode 100644
index 000000000..52ba8d216
--- /dev/null
+++ b/src/spdk/scripts/rpc/client.py
@@ -0,0 +1,183 @@
+import json
+import socket
+import time
+import os
+import logging
+import copy
+
+
+def print_dict(d):
+ print(json.dumps(d, indent=2))
+
+
+def print_json(s):
+ print(json.dumps(s, indent=2).strip('"'))
+
+
+class JSONRPCException(Exception):
+ def __init__(self, message):
+ self.message = message
+
+
+class JSONRPCClient(object):
+ def __init__(self, addr, port=None, timeout=60.0, **kwargs):
+ self.sock = None
+ ch = logging.StreamHandler()
+ ch.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
+ ch.setLevel(logging.DEBUG)
+ self._logger = logging.getLogger("JSONRPCClient(%s)" % addr)
+ self._logger.addHandler(ch)
+ self.log_set_level(kwargs.get('log_level', logging.ERROR))
+ connect_retries = kwargs.get('conn_retries', 0)
+
+ self.timeout = timeout
+ self._request_id = 0
+ self._recv_buf = ""
+ self._reqs = []
+
+ for i in range(connect_retries):
+ try:
+ self._connect(addr, port)
+ return
+ except Exception as e:
+ # ignore and retry in 200ms
+ time.sleep(0.2)
+
+ # try one last time without try/except
+ self._connect(addr, port)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exception_type, exception_value, traceback):
+ self.close()
+
+ def _connect(self, addr, port):
+ try:
+ if os.path.exists(addr):
+ self._logger.debug("Trying to connect to UNIX socket: %s", addr)
+ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.sock.connect(addr)
+ elif port:
+ if ':' in addr:
+ self._logger.debug("Trying to connect to IPv6 address addr:%s, port:%i", addr, port)
+ for res in socket.getaddrinfo(addr, port, socket.AF_INET6, socket.SOCK_STREAM, socket.SOL_TCP):
+ af, socktype, proto, canonname, sa = res
+ self.sock = socket.socket(af, socktype, proto)
+ self.sock.connect(sa)
+ else:
+ self._logger.debug("Trying to connect to IPv4 address addr:%s, port:%i'", addr, port)
+ self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.sock.connect((addr, port))
+ else:
+ raise socket.error("Unix socket '%s' does not exist" % addr)
+ except socket.error as ex:
+ raise JSONRPCException("Error while connecting to %s\n"
+ "Error details: %s" % (addr, ex))
+
+ def get_logger(self):
+ return self._logger
+
+ """Set logging level
+
+ Args:
+ lvl: Log level to set as accepted by logger.setLevel
+ """
+ def log_set_level(self, lvl):
+ self._logger.info("Setting log level to %s", lvl)
+ self._logger.setLevel(lvl)
+ self._logger.info("Log level set to %s", lvl)
+
+ def close(self):
+ if getattr(self, "sock", None):
+ self.sock.shutdown(socket.SHUT_RDWR)
+ self.sock.close()
+ self.sock = None
+
+ def add_request(self, method, params):
+ self._request_id += 1
+ req = {
+ 'jsonrpc': '2.0',
+ 'method': method,
+ 'id': self._request_id
+ }
+
+ if params:
+ req['params'] = copy.deepcopy(params)
+
+ self._logger.debug("append request:\n%s\n", json.dumps(req))
+ self._reqs.append(req)
+ return self._request_id
+
+ def flush(self):
+ self._logger.debug("Flushing buffer")
+ # TODO: We can drop indent parameter
+ reqstr = "\n".join(json.dumps(req, indent=2) for req in self._reqs)
+ self._reqs = []
+ self._logger.info("Requests:\n%s\n", reqstr)
+ self.sock.sendall(reqstr.encode("utf-8"))
+
+ def send(self, method, params=None):
+ id = self.add_request(method, params)
+ self.flush()
+ return id
+
+ def decode_one_response(self):
+ try:
+ self._logger.debug("Trying to decode response '%s'", self._recv_buf)
+ buf = self._recv_buf.lstrip()
+ obj, idx = json.JSONDecoder().raw_decode(buf)
+ self._recv_buf = buf[idx:]
+ return obj
+ except ValueError:
+ self._logger.debug("Partial response")
+ return None
+
+ def recv(self):
+ start_time = time.process_time()
+ response = self.decode_one_response()
+ while not response:
+ try:
+ timeout = self.timeout - (time.process_time() - start_time)
+ self.sock.settimeout(timeout)
+ newdata = self.sock.recv(4096)
+ if not newdata:
+ self.sock.close()
+ self.sock = None
+ raise JSONRPCException("Connection closed with partial response:\n%s\n" % self._recv_buf)
+ self._recv_buf += newdata.decode("utf-8")
+ response = self.decode_one_response()
+ except socket.timeout:
+ break # throw exception after loop to avoid Python freaking out about nested exceptions
+ except ValueError:
+ continue # incomplete response; keep buffering
+
+ if not response:
+ raise JSONRPCException("Timeout while waiting for response:\n%s\n" % self._recv_buf)
+
+ self._logger.info("response:\n%s\n", json.dumps(response, indent=2))
+ return response
+
+ def call(self, method, params={}):
+ self._logger.debug("call('%s')" % method)
+ req_id = self.send(method, params)
+ try:
+ response = self.recv()
+ except JSONRPCException as e:
+ """ Don't expect response to kill """
+ if not self.sock and method == "spdk_kill_instance":
+ self._logger.info("Connection terminated but ignoring since method is '%s'" % method)
+ return {}
+ else:
+ raise e
+
+ if 'error' in response:
+ params["method"] = method
+ params["req_id"] = req_id
+ msg = "\n".join(["request:", "%s" % json.dumps(params, indent=2),
+ "Got JSON-RPC error response",
+ "response:",
+ json.dumps(response['error'], indent=2)])
+ raise JSONRPCException(msg)
+
+ return response['result']
diff --git a/src/spdk/scripts/rpc/env_dpdk.py b/src/spdk/scripts/rpc/env_dpdk.py
new file mode 100644
index 000000000..f2c098e52
--- /dev/null
+++ b/src/spdk/scripts/rpc/env_dpdk.py
@@ -0,0 +1,8 @@
+def env_dpdk_get_mem_stats(client):
+ """Dump the applications memory stats to a file.
+
+ Returns:
+ The path to the file where the stats are written.
+ """
+
+ return client.call('env_dpdk_get_mem_stats')
diff --git a/src/spdk/scripts/rpc/helpers.py b/src/spdk/scripts/rpc/helpers.py
new file mode 100644
index 000000000..d931fcf14
--- /dev/null
+++ b/src/spdk/scripts/rpc/helpers.py
@@ -0,0 +1,16 @@
+import sys
+
+deprecated_aliases = {}
+
+
+def deprecated_alias(old_name):
+ def wrap(f):
+ def old_f(*args, **kwargs):
+ ret = f(*args, **kwargs)
+ print("{} is deprecated, use {} instead.".format(old_name, f.__name__), file=sys.stderr)
+ return ret
+ old_f.__name__ = old_name
+ deprecated_aliases[old_name] = f.__name__
+ setattr(sys.modules[f.__module__], old_name, old_f)
+ return f
+ return wrap
diff --git a/src/spdk/scripts/rpc/idxd.py b/src/spdk/scripts/rpc/idxd.py
new file mode 100644
index 000000000..3e076c68e
--- /dev/null
+++ b/src/spdk/scripts/rpc/idxd.py
@@ -0,0 +1,8 @@
+def idxd_scan_accel_engine(client, config_number):
+ """Scan and enable IDXD accel engine.
+
+ Args:
+ config_number: Pre-defined configuration number, see docs.
+ """
+ params = {'config_number': config_number}
+ return client.call('idxd_scan_accel_engine', params)
diff --git a/src/spdk/scripts/rpc/ioat.py b/src/spdk/scripts/rpc/ioat.py
new file mode 100644
index 000000000..ae43a3c3f
--- /dev/null
+++ b/src/spdk/scripts/rpc/ioat.py
@@ -0,0 +1,17 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('ioat_scan_copy_engine')
+@deprecated_alias('scan_ioat_copy_engine')
+def ioat_scan_accel_engine(client, pci_whitelist):
+ """Scan and enable IOAT accel engine.
+
+ Args:
+ pci_whitelist: Python list of PCI addresses in
+ domain:bus:device.function format or
+ domain.bus.device.function format
+ """
+ params = {}
+ if pci_whitelist:
+ params['pci_whitelist'] = pci_whitelist
+ return client.call('ioat_scan_accel_engine', params)
diff --git a/src/spdk/scripts/rpc/iscsi.py b/src/spdk/scripts/rpc/iscsi.py
new file mode 100644
index 000000000..6d64b6185
--- /dev/null
+++ b/src/spdk/scripts/rpc/iscsi.py
@@ -0,0 +1,558 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('set_iscsi_options')
+def iscsi_set_options(
+ client,
+ auth_file=None,
+ node_base=None,
+ nop_timeout=None,
+ nop_in_interval=None,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None,
+ chap_group=None,
+ max_sessions=None,
+ max_queue_depth=None,
+ max_connections_per_session=None,
+ default_time2wait=None,
+ default_time2retain=None,
+ first_burst_length=None,
+ immediate_data=None,
+ error_recovery_level=None,
+ allow_duplicated_isid=None):
+ """Set iSCSI target options.
+
+ Args:
+ auth_file: Path to CHAP shared secret file (optional)
+ node_base: Prefix of the name of iSCSI target node (optional)
+ nop_timeout: Timeout in seconds to nop-in request to the initiator (optional)
+ nop_in_interval: Time interval in secs between nop-in requests by the target (optional)
+ disable_chap: CHAP for discovery session should be disabled (optional)
+ require_chap: CHAP for discovery session should be required
+ mutual_chap: CHAP for discovery session should be mutual
+ chap_group: Authentication group ID for discovery session
+ max_sessions: Maximum number of sessions in the host
+ max_queue_depth: Maximum number of outstanding I/Os per queue
+ max_connections_per_session: Negotiated parameter, MaxConnections
+ default_time2wait: Negotiated parameter, DefaultTime2Wait
+ default_time2retain: Negotiated parameter, DefaultTime2Retain
+ first_burst_length: Negotiated parameter, FirstBurstLength
+ immediate_data: Negotiated parameter, ImmediateData
+ error_recovery_level: Negotiated parameter, ErrorRecoveryLevel
+ allow_duplicated_isid: Allow duplicated initiator session ID
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ if auth_file:
+ params['auth_file'] = auth_file
+ if node_base:
+ params['node_base'] = node_base
+ if nop_timeout:
+ params['nop_timeout'] = nop_timeout
+ if nop_in_interval:
+ params['nop_in_interval'] = nop_in_interval
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ if chap_group:
+ params['chap_group'] = chap_group
+ if max_sessions:
+ params['max_sessions'] = max_sessions
+ if max_queue_depth:
+ params['max_queue_depth'] = max_queue_depth
+ if max_connections_per_session:
+ params['max_connections_per_session'] = max_connections_per_session
+ if default_time2wait:
+ params['default_time2wait'] = default_time2wait
+ if default_time2retain:
+ params['default_time2retain'] = default_time2retain
+ if first_burst_length:
+ params['first_burst_length'] = first_burst_length
+ if immediate_data:
+ params['immediate_data'] = immediate_data
+ if error_recovery_level:
+ params['error_recovery_level'] = error_recovery_level
+ if allow_duplicated_isid:
+ params['allow_duplicated_isid'] = allow_duplicated_isid
+
+ return client.call('iscsi_set_options', params)
+
+
+@deprecated_alias('set_iscsi_discovery_auth')
+def iscsi_set_discovery_auth(
+ client,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None,
+ chap_group=None):
+ """Set CHAP authentication for discovery service.
+
+ Args:
+ disable_chap: CHAP for discovery session should be disabled (optional)
+ require_chap: CHAP for discovery session should be required (optional)
+ mutual_chap: CHAP for discovery session should be mutual (optional)
+ chap_group: Authentication group ID for discovery session (optional)
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ if chap_group:
+ params['chap_group'] = chap_group
+
+ return client.call('iscsi_set_discovery_auth', params)
+
+
+@deprecated_alias('get_iscsi_auth_groups')
+def iscsi_get_auth_groups(client):
+ """Display current authentication group configuration.
+
+ Returns:
+ List of current authentication group configuration.
+ """
+ return client.call('iscsi_get_auth_groups')
+
+
+@deprecated_alias('get_portal_groups')
+def iscsi_get_portal_groups(client):
+ """Display current portal group configuration.
+
+ Returns:
+ List of current portal group configuration.
+ """
+ return client.call('iscsi_get_portal_groups')
+
+
+@deprecated_alias('get_initiator_groups')
+def iscsi_get_initiator_groups(client):
+ """Display current initiator group configuration.
+
+ Returns:
+ List of current initiator group configuration.
+ """
+ return client.call('iscsi_get_initiator_groups')
+
+
+@deprecated_alias('get_target_nodes')
+def iscsi_get_target_nodes(client):
+ """Display target nodes.
+
+ Returns:
+ List of ISCSI target node objects.
+ """
+ return client.call('iscsi_get_target_nodes')
+
+
+@deprecated_alias('construct_target_node')
+def iscsi_create_target_node(
+ client,
+ luns,
+ pg_ig_maps,
+ name,
+ alias_name,
+ queue_depth,
+ chap_group=None,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None,
+ header_digest=None,
+ data_digest=None):
+ """Add a target node.
+
+ Args:
+ luns: List of bdev_name_id_pairs, e.g. [{"bdev_name": "Malloc1", "lun_id": 1}]
+ pg_ig_maps: List of pg_ig_mappings, e.g. [{"pg_tag": pg, "ig_tag": ig}]
+ name: Target node name (ASCII)
+ alias_name: Target node alias name (ASCII)
+ queue_depth: Desired target queue depth
+ chap_group: Authentication group ID for this target node
+ disable_chap: CHAP authentication should be disabled for this target node
+ require_chap: CHAP authentication should be required for this target node
+ mutual_chap: CHAP authentication should be mutual/bidirectional
+ header_digest: Header Digest should be required for this target node
+ data_digest: Data Digest should be required for this target node
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ 'alias_name': alias_name,
+ 'pg_ig_maps': pg_ig_maps,
+ 'luns': luns,
+ 'queue_depth': queue_depth,
+ }
+
+ if chap_group:
+ params['chap_group'] = chap_group
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ if header_digest:
+ params['header_digest'] = header_digest
+ if data_digest:
+ params['data_digest'] = data_digest
+ return client.call('iscsi_create_target_node', params)
+
+
+@deprecated_alias('target_node_add_lun')
+def iscsi_target_node_add_lun(client, name, bdev_name, lun_id=None):
+ """Add LUN to the target node.
+
+ Args:
+ name: Target node name (ASCII)
+ bdev_name: bdev name
+ lun_id: LUN ID (integer >= 0)
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ 'bdev_name': bdev_name,
+ }
+ if lun_id:
+ params['lun_id'] = lun_id
+ return client.call('iscsi_target_node_add_lun', params)
+
+
+@deprecated_alias('set_iscsi_target_node_auth')
+def iscsi_target_node_set_auth(
+ client,
+ name,
+ chap_group=None,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None):
+ """Set CHAP authentication for the target node.
+
+ Args:
+ name: Target node name (ASCII)
+ chap_group: Authentication group ID for this target node
+ disable_chap: CHAP authentication should be disabled for this target node
+ require_chap: CHAP authentication should be required for this target node
+ mutual_chap: CHAP authentication should be mutual/bidirectional
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ }
+
+ if chap_group:
+ params['chap_group'] = chap_group
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ return client.call('iscsi_target_node_set_auth', params)
+
+
+@deprecated_alias('add_iscsi_auth_group')
+def iscsi_create_auth_group(client, tag, secrets=None):
+ """Create authentication group for CHAP authentication.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0).
+ secrets: Array of secrets objects (optional).
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+
+ if secrets:
+ params['secrets'] = secrets
+ return client.call('iscsi_create_auth_group', params)
+
+
+@deprecated_alias('delete_iscsi_auth_group')
+def iscsi_delete_auth_group(client, tag):
+ """Delete an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+ return client.call('iscsi_delete_auth_group', params)
+
+
+@deprecated_alias('add_secret_to_iscsi_auth_group')
+def iscsi_auth_group_add_secret(client, tag, user, secret, muser=None, msecret=None):
+ """Add a secret to an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+ user: User name for one-way CHAP authentication
+ secret: Secret for one-way CHAP authentication
+ muser: User name for mutual CHAP authentication (optional)
+ msecret: Secret for mutual CHAP authentication (optional)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag, 'user': user, 'secret': secret}
+
+ if muser:
+ params['muser'] = muser
+ if msecret:
+ params['msecret'] = msecret
+ return client.call('iscsi_auth_group_add_secret', params)
+
+
+@deprecated_alias('delete_secret_from_iscsi_auth_group')
+def iscsi_auth_group_remove_secret(client, tag, user):
+ """Remove a secret from an authentication group.
+
+ Args:
+ tag: Authentication group tag (unique, integer > 0)
+ user: User name for one-way CHAP authentication
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag, 'user': user}
+ return client.call('iscsi_auth_group_remove_secret', params)
+
+
+@deprecated_alias('delete_pg_ig_maps')
+def iscsi_target_node_remove_pg_ig_maps(client, pg_ig_maps, name):
+ """Delete PG-IG maps from the target node.
+
+ Args:
+ pg_ig_maps: List of pg_ig_mappings, e.g. [{"pg_tag": pg, "ig_tag": ig}]
+ name: Target node alias name (ASCII)
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ 'pg_ig_maps': pg_ig_maps,
+ }
+ return client.call('iscsi_target_node_remove_pg_ig_maps', params)
+
+
+@deprecated_alias('add_pg_ig_maps')
+def iscsi_target_node_add_pg_ig_maps(client, pg_ig_maps, name):
+ """Add PG-IG maps to the target node.
+
+ Args:
+ pg_ig_maps: List of pg_ig_mappings, e.g. [{"pg_tag": pg, "ig_tag": ig}]
+ name: Target node alias name (ASCII)
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'name': name,
+ 'pg_ig_maps': pg_ig_maps,
+ }
+ return client.call('iscsi_target_node_add_pg_ig_maps', params)
+
+
+@deprecated_alias('add_portal_group')
+def iscsi_create_portal_group(client, portals, tag):
+ """Add a portal group.
+
+ Args:
+ portals: List of portals, e.g. [{'host': ip, 'port': port}]
+ tag: Initiator group tag (unique, integer > 0)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag, 'portals': portals}
+ return client.call('iscsi_create_portal_group', params)
+
+
+@deprecated_alias('add_initiator_group')
+def iscsi_create_initiator_group(client, tag, initiators, netmasks):
+ """Add an initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses, e.g. ["127.0.0.1","192.168.200.100"]
+ netmasks: List of initiator netmasks, e.g. ["255.255.0.0","255.248.0.0"]
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag, 'initiators': initiators, 'netmasks': netmasks}
+ return client.call('iscsi_create_initiator_group', params)
+
+
+@deprecated_alias('add_initiators_to_initiator_group')
+def iscsi_initiator_group_add_initiators(
+ client,
+ tag,
+ initiators=None,
+ netmasks=None):
+ """Add initiators to an existing initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses, e.g. ["127.0.0.1","192.168.200.100"]
+ netmasks: List of initiator netmasks, e.g. ["255.255.0.0","255.248.0.0"]
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+
+ if initiators:
+ params['initiators'] = initiators
+ if netmasks:
+ params['netmasks'] = netmasks
+ return client.call('iscsi_initiator_group_add_initiators', params)
+
+
+@deprecated_alias('delete_initiators_from_initiator_group')
+def iscsi_initiator_group_remove_initiators(
+ client, tag, initiators=None, netmasks=None):
+ """Delete initiators from an existing initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+ initiators: List of initiator hostnames or IP addresses, e.g. ["127.0.0.1","192.168.200.100"]
+ netmasks: List of initiator netmasks, e.g. ["255.255.0.0","255.248.0.0"]
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+
+ if initiators:
+ params['initiators'] = initiators
+ if netmasks:
+ params['netmasks'] = netmasks
+ return client.call('iscsi_initiator_group_remove_initiators', params)
+
+
+@deprecated_alias('delete_target_node')
+def iscsi_delete_target_node(client, target_node_name):
+ """Delete a target node.
+
+ Args:
+ target_node_name: Target node name to be deleted. Example: iqn.2016-06.io.spdk:disk1.
+
+ Returns:
+ True or False
+ """
+ params = {'name': target_node_name}
+ return client.call('iscsi_delete_target_node', params)
+
+
+@deprecated_alias('delete_portal_group')
+def iscsi_delete_portal_group(client, tag):
+ """Delete a portal group.
+
+ Args:
+ tag: Portal group tag (unique, integer > 0)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+ return client.call('iscsi_delete_portal_group', params)
+
+
+@deprecated_alias('delete_initiator_group')
+def iscsi_delete_initiator_group(client, tag):
+ """Delete an initiator group.
+
+ Args:
+ tag: Initiator group tag (unique, integer > 0)
+
+ Returns:
+ True or False
+ """
+ params = {'tag': tag}
+ return client.call('iscsi_delete_initiator_group', params)
+
+
+def iscsi_portal_group_set_auth(
+ client,
+ tag,
+ chap_group=None,
+ disable_chap=None,
+ require_chap=None,
+ mutual_chap=None):
+ """Set CHAP authentication for discovery sessions specific for the portal group.
+
+ Args:
+ tag: Portal group tag (unique, integer > 0)
+ chap_group: Authentication group ID for this portal group
+ disable_chap: CHAP authentication should be disabled for this portal group
+ require_chap: CHAP authentication should be required for this portal group
+ mutual_chap: CHAP authentication should be mutual/bidirectional
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'tag': tag,
+ }
+
+ if chap_group:
+ params['chap_group'] = chap_group
+ if disable_chap:
+ params['disable_chap'] = disable_chap
+ if require_chap:
+ params['require_chap'] = require_chap
+ if mutual_chap:
+ params['mutual_chap'] = mutual_chap
+ return client.call('iscsi_portal_group_set_auth', params)
+
+
+@deprecated_alias('get_iscsi_connections')
+def iscsi_get_connections(client):
+ """Display iSCSI connections.
+
+ Returns:
+ List of iSCSI connection.
+ """
+ return client.call('iscsi_get_connections')
+
+
+@deprecated_alias('get_iscsi_global_params')
+def iscsi_get_options(client):
+ """Display iSCSI global parameters.
+
+ Returns:
+ List of iSCSI global parameter.
+ """
+ return client.call('iscsi_get_options')
+
+
+@deprecated_alias('get_iscsi_devices')
+def scsi_get_devices(client):
+ """Display SCSI devices.
+
+ Returns:
+ List of SCSI device.
+ """
+ return client.call('scsi_get_devices')
diff --git a/src/spdk/scripts/rpc/log.py b/src/spdk/scripts/rpc/log.py
new file mode 100644
index 000000000..ee40bf833
--- /dev/null
+++ b/src/spdk/scripts/rpc/log.py
@@ -0,0 +1,75 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('set_log_flag')
+def log_set_flag(client, flag):
+ """Set log flag.
+
+ Args:
+ flag: log flag we want to set. (for example "nvme")
+ """
+ params = {'flag': flag}
+ return client.call('log_set_flag', params)
+
+
+@deprecated_alias('clear_log_flag')
+def log_clear_flag(client, flag):
+ """Clear log flag.
+
+ Args:
+ flag: log flag we want to clear. (for example "nvme")
+ """
+ params = {'flag': flag}
+ return client.call('log_clear_flag', params)
+
+
+@deprecated_alias('get_log_flags')
+def log_get_flags(client):
+ """Get log flags
+
+ Returns:
+ List of log flags
+ """
+ return client.call('log_get_flags')
+
+
+@deprecated_alias('set_log_level')
+def log_set_level(client, level):
+ """Set log level.
+
+ Args:
+ level: log level we want to set. (for example "DEBUG")
+ """
+ params = {'level': level}
+ return client.call('log_set_level', params)
+
+
+@deprecated_alias('get_log_level')
+def log_get_level(client):
+ """Get log level
+
+ Returns:
+ Current log level
+ """
+ return client.call('log_get_level')
+
+
+@deprecated_alias('set_log_print_level')
+def log_set_print_level(client, level):
+ """Set log print level.
+
+ Args:
+ level: log print level we want to set. (for example "DEBUG")
+ """
+ params = {'level': level}
+ return client.call('log_set_print_level', params)
+
+
+@deprecated_alias('get_log_print_level')
+def log_get_print_level(client):
+ """Get log print level
+
+ Returns:
+ Current log print level
+ """
+ return client.call('log_get_print_level')
diff --git a/src/spdk/scripts/rpc/lvol.py b/src/spdk/scripts/rpc/lvol.py
new file mode 100644
index 000000000..1c31f5eda
--- /dev/null
+++ b/src/spdk/scripts/rpc/lvol.py
@@ -0,0 +1,228 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('construct_lvol_store')
+def bdev_lvol_create_lvstore(client, bdev_name, lvs_name, cluster_sz=None, clear_method=None):
+ """Construct a logical volume store.
+
+ Args:
+ bdev_name: bdev on which to construct logical volume store
+ lvs_name: name of the logical volume store to create
+ cluster_sz: cluster size of the logical volume store in bytes (optional)
+ clear_method: Change clear method for data region. Available: none, unmap, write_zeroes (optional)
+
+ Returns:
+ UUID of created logical volume store.
+ """
+ params = {'bdev_name': bdev_name, 'lvs_name': lvs_name}
+ if cluster_sz:
+ params['cluster_sz'] = cluster_sz
+ if clear_method:
+ params['clear_method'] = clear_method
+ return client.call('bdev_lvol_create_lvstore', params)
+
+
+@deprecated_alias('rename_lvol_store')
+def bdev_lvol_rename_lvstore(client, old_name, new_name):
+ """Rename a logical volume store.
+
+ Args:
+ old_name: existing logical volume store name
+ new_name: new logical volume store name
+ """
+ params = {
+ 'old_name': old_name,
+ 'new_name': new_name
+ }
+ return client.call('bdev_lvol_rename_lvstore', params)
+
+
+@deprecated_alias('construct_lvol_bdev')
+def bdev_lvol_create(client, lvol_name, size, thin_provision=False, uuid=None, lvs_name=None, clear_method=None):
+ """Create a logical volume on a logical volume store.
+
+ Args:
+ lvol_name: name of logical volume to create
+ size: desired size of logical volume in bytes (will be rounded up to a multiple of cluster size)
+ thin_provision: True to enable thin provisioning
+ uuid: UUID of logical volume store to create logical volume on (optional)
+ lvs_name: name of logical volume store to create logical volume on (optional)
+
+ Either uuid or lvs_name must be specified, but not both.
+
+ Returns:
+ Name of created logical volume block device.
+ """
+ if (uuid and lvs_name) or (not uuid and not lvs_name):
+ raise ValueError("Either uuid or lvs_name must be specified, but not both")
+
+ params = {'lvol_name': lvol_name, 'size': size}
+ if thin_provision:
+ params['thin_provision'] = thin_provision
+ if uuid:
+ params['uuid'] = uuid
+ if lvs_name:
+ params['lvs_name'] = lvs_name
+ if clear_method:
+ params['clear_method'] = clear_method
+ return client.call('bdev_lvol_create', params)
+
+
+@deprecated_alias('snapshot_lvol_bdev')
+def bdev_lvol_snapshot(client, lvol_name, snapshot_name):
+ """Capture a snapshot of the current state of a logical volume.
+
+ Args:
+ lvol_name: logical volume to create a snapshot from
+ snapshot_name: name for the newly created snapshot
+
+ Returns:
+ Name of created logical volume snapshot.
+ """
+ params = {
+ 'lvol_name': lvol_name,
+ 'snapshot_name': snapshot_name
+ }
+ return client.call('bdev_lvol_snapshot', params)
+
+
+@deprecated_alias('clone_lvol_bdev')
+def bdev_lvol_clone(client, snapshot_name, clone_name):
+ """Create a logical volume based on a snapshot.
+
+ Args:
+ snapshot_name: snapshot to clone
+ clone_name: name of logical volume to create
+
+ Returns:
+ Name of created logical volume clone.
+ """
+ params = {
+ 'snapshot_name': snapshot_name,
+ 'clone_name': clone_name
+ }
+ return client.call('bdev_lvol_clone', params)
+
+
+@deprecated_alias('rename_lvol_bdev')
+def bdev_lvol_rename(client, old_name, new_name):
+ """Rename a logical volume.
+
+ Args:
+ old_name: existing logical volume name
+ new_name: new logical volume name
+ """
+ params = {
+ 'old_name': old_name,
+ 'new_name': new_name
+ }
+ return client.call('bdev_lvol_rename', params)
+
+
+@deprecated_alias('resize_lvol_bdev')
+def bdev_lvol_resize(client, name, size):
+ """Resize a logical volume.
+
+ Args:
+ name: name of logical volume to resize
+ size: desired size of logical volume in bytes (will be rounded up to a multiple of cluster size)
+ """
+ params = {
+ 'name': name,
+ 'size': size,
+ }
+ return client.call('bdev_lvol_resize', params)
+
+
+@deprecated_alias('set_read_only_lvol_bdev')
+def bdev_lvol_set_read_only(client, name):
+ """Mark logical volume as read only.
+
+ Args:
+ name: name of logical volume to set as read only
+ """
+ params = {
+ 'name': name,
+ }
+ return client.call('bdev_lvol_set_read_only', params)
+
+
+@deprecated_alias('destroy_lvol_bdev')
+def bdev_lvol_delete(client, name):
+ """Destroy a logical volume.
+
+ Args:
+ name: name of logical volume to destroy
+ """
+ params = {
+ 'name': name,
+ }
+ return client.call('bdev_lvol_delete', params)
+
+
+@deprecated_alias('inflate_lvol_bdev')
+def bdev_lvol_inflate(client, name):
+ """Inflate a logical volume.
+
+ Args:
+ name: name of logical volume to inflate
+ """
+ params = {
+ 'name': name,
+ }
+ return client.call('bdev_lvol_inflate', params)
+
+
+@deprecated_alias('decouple_parent_lvol_bdev')
+def bdev_lvol_decouple_parent(client, name):
+ """Decouple parent of a logical volume.
+
+ Args:
+ name: name of logical volume to decouple parent
+ """
+ params = {
+ 'name': name,
+ }
+ return client.call('bdev_lvol_decouple_parent', params)
+
+
+@deprecated_alias('destroy_lvol_store')
+def bdev_lvol_delete_lvstore(client, uuid=None, lvs_name=None):
+ """Destroy a logical volume store.
+
+ Args:
+ uuid: UUID of logical volume store to destroy (optional)
+ lvs_name: name of logical volume store to destroy (optional)
+
+ Either uuid or lvs_name must be specified, but not both.
+ """
+ if (uuid and lvs_name) or (not uuid and not lvs_name):
+ raise ValueError("Exactly one of uuid or lvs_name must be specified")
+
+ params = {}
+ if uuid:
+ params['uuid'] = uuid
+ if lvs_name:
+ params['lvs_name'] = lvs_name
+ return client.call('bdev_lvol_delete_lvstore', params)
+
+
+@deprecated_alias('get_lvol_stores')
+def bdev_lvol_get_lvstores(client, uuid=None, lvs_name=None):
+ """List logical volume stores.
+
+ Args:
+ uuid: UUID of logical volume store to retrieve information about (optional)
+ lvs_name: name of logical volume store to retrieve information about (optional)
+
+ Either uuid or lvs_name may be specified, but not both.
+ If both uuid and lvs_name are omitted, information about all logical volume stores is returned.
+ """
+ if (uuid and lvs_name):
+ raise ValueError("Exactly one of uuid or lvs_name may be specified")
+ params = {}
+ if uuid:
+ params['uuid'] = uuid
+ if lvs_name:
+ params['lvs_name'] = lvs_name
+ return client.call('bdev_lvol_get_lvstores', params)
diff --git a/src/spdk/scripts/rpc/nbd.py b/src/spdk/scripts/rpc/nbd.py
new file mode 100644
index 000000000..55e57d11e
--- /dev/null
+++ b/src/spdk/scripts/rpc/nbd.py
@@ -0,0 +1,25 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('start_nbd_disk')
+def nbd_start_disk(client, bdev_name, nbd_device):
+ params = {
+ 'bdev_name': bdev_name
+ }
+ if nbd_device:
+ params['nbd_device'] = nbd_device
+ return client.call('nbd_start_disk', params)
+
+
+@deprecated_alias('stop_nbd_disk')
+def nbd_stop_disk(client, nbd_device):
+ params = {'nbd_device': nbd_device}
+ return client.call('nbd_stop_disk', params)
+
+
+@deprecated_alias('get_nbd_disks')
+def nbd_get_disks(client, nbd_device=None):
+ params = {}
+ if nbd_device:
+ params['nbd_device'] = nbd_device
+ return client.call('nbd_get_disks', params)
diff --git a/src/spdk/scripts/rpc/net.py b/src/spdk/scripts/rpc/net.py
new file mode 100644
index 000000000..7c479bba8
--- /dev/null
+++ b/src/spdk/scripts/rpc/net.py
@@ -0,0 +1,35 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('add_ip_address')
+def net_interface_add_ip_address(client, ifc_index, ip_addr):
+ """Add IP address.
+
+ Args:
+ ifc_index: ifc index of the nic device (int)
+ ip_addr: ip address will be added
+ """
+ params = {'ifc_index': ifc_index, 'ip_address': ip_addr}
+ return client.call('net_interface_add_ip_address', params)
+
+
+@deprecated_alias('delete_ip_address')
+def net_interface_delete_ip_address(client, ifc_index, ip_addr):
+ """Delete IP address.
+
+ Args:
+ ifc_index: ifc index of the nic device (int)
+ ip_addr: ip address will be deleted
+ """
+ params = {'ifc_index': ifc_index, 'ip_address': ip_addr}
+ return client.call('net_interface_delete_ip_address', params)
+
+
+@deprecated_alias('get_interfaces')
+def net_get_interfaces(client):
+ """Display current interface list
+
+ Returns:
+ List of current interface
+ """
+ return client.call('net_get_interfaces')
diff --git a/src/spdk/scripts/rpc/notify.py b/src/spdk/scripts/rpc/notify.py
new file mode 100644
index 000000000..4907447c0
--- /dev/null
+++ b/src/spdk/scripts/rpc/notify.py
@@ -0,0 +1,30 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('get_notification_types')
+def notify_get_types(client):
+ return client.call("notify_get_types")
+
+
+@deprecated_alias('get_notifications')
+def notify_get_notifications(client,
+ id=None,
+ max=None):
+ """
+
+ Args:
+ id First ID to start fetching from
+ max Maximum number of notifications to return in response
+
+ Return:
+ Notifications array
+ """
+
+ params = {}
+ if id:
+ params['id'] = id
+
+ if max:
+ params['max'] = max
+
+ return client.call("notify_get_notifications", params)
diff --git a/src/spdk/scripts/rpc/nvme.py b/src/spdk/scripts/rpc/nvme.py
new file mode 100644
index 000000000..e9a0ba6bb
--- /dev/null
+++ b/src/spdk/scripts/rpc/nvme.py
@@ -0,0 +1,87 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('send_nvme_cmd')
+def bdev_nvme_send_cmd(client, name, cmd_type, data_direction, cmdbuf,
+ data=None, metadata=None,
+ data_len=None, metadata_len=None,
+ timeout_ms=None):
+ """Send one NVMe command
+
+ Args:
+ name: Name of the operating NVMe controller
+ cmd_type: Type of nvme cmd. Valid values are: admin, io
+ data_direction: Direction of data transfer. Valid values are: c2h, h2c
+ cmdbuf: NVMe command encoded by base64 urlsafe
+ data: Data transferring to controller from host, encoded by base64 urlsafe
+ metadata: metadata transferring to controller from host, encoded by base64 urlsafe
+ data_length: Data length required to transfer from controller to host
+ metadata_length: Metadata length required to transfer from controller to host
+ timeout-ms: Command execution timeout value, in milliseconds, if 0, don't track timeout
+
+ Returns:
+ NVMe completion queue entry, requested data and metadata, all are encoded by base64 urlsafe.
+ """
+ params = {'name': name,
+ 'cmd_type': cmd_type,
+ 'data_direction': data_direction,
+ 'cmdbuf': cmdbuf}
+
+ if data:
+ params['data'] = data
+ if metadata:
+ params['metadata'] = metadata
+ if data_len:
+ params['data_len'] = data_len
+ if metadata_len:
+ params['metadata_len'] = metadata_len
+ if timeout_ms:
+ params['timeout_ms'] = timeout_ms
+
+ return client.call('bdev_nvme_send_cmd', params)
+
+
+@deprecated_alias('get_nvme_controllers')
+def bdev_nvme_get_controllers(client, name=None):
+ """Get information about NVMe controllers.
+
+ Args:
+ name: NVMe controller name to query (optional; if omitted, query all NVMe controllers)
+
+ Returns:
+ List of NVMe controller information objects.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('bdev_nvme_get_controllers', params)
+
+
+def bdev_nvme_opal_init(client, nvme_ctrlr_name, password):
+ """Init nvme opal. Take ownership and activate
+
+ Args:
+ nvme_ctrlr_name: name of nvme ctrlr
+ password: password to init opal
+ """
+ params = {
+ 'nvme_ctrlr_name': nvme_ctrlr_name,
+ 'password': password,
+ }
+
+ return client.call('bdev_nvme_opal_init', params)
+
+
+def bdev_nvme_opal_revert(client, nvme_ctrlr_name, password):
+ """Revert opal to default factory settings. Erase all data.
+
+ Args:
+ nvme_ctrlr_name: name of nvme ctrlr
+ password: password
+ """
+ params = {
+ 'nvme_ctrlr_name': nvme_ctrlr_name,
+ 'password': password,
+ }
+
+ return client.call('bdev_nvme_opal_revert', params)
diff --git a/src/spdk/scripts/rpc/nvmf.py b/src/spdk/scripts/rpc/nvmf.py
new file mode 100644
index 000000000..7b2bc3bb6
--- /dev/null
+++ b/src/spdk/scripts/rpc/nvmf.py
@@ -0,0 +1,483 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('set_nvmf_target_max_subsystems')
+def nvmf_set_max_subsystems(client,
+ max_subsystems=None):
+ """Set NVMe-oF target options.
+
+ Args:
+ max_subsystems: Maximum number of NVMe-oF subsystems (e.g. 1024)
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ params['max_subsystems'] = max_subsystems
+ return client.call('nvmf_set_max_subsystems', params)
+
+
+@deprecated_alias('set_nvmf_target_config')
+def nvmf_set_config(client,
+ acceptor_poll_rate=None,
+ conn_sched=None,
+ passthru_identify_ctrlr=None):
+ """Set NVMe-oF target subsystem configuration.
+
+ Args:
+ acceptor_poll_rate: Acceptor poll period in microseconds (optional)
+ conn_sched: (Deprecated) Ignored
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ if acceptor_poll_rate:
+ params['acceptor_poll_rate'] = acceptor_poll_rate
+ if conn_sched:
+ print("WARNING: conn_sched is deprecated and ignored.")
+ if passthru_identify_ctrlr:
+ admin_cmd_passthru = {}
+ admin_cmd_passthru['identify_ctrlr'] = passthru_identify_ctrlr
+ params['admin_cmd_passthru'] = admin_cmd_passthru
+
+ return client.call('nvmf_set_config', params)
+
+
+def nvmf_create_target(client,
+ name,
+ max_subsystems=0):
+ """Create a new NVMe-oF Target.
+
+ Args:
+ name: Must be unique within the application
+ max_subsystems: Maximum number of NVMe-oF subsystems (e.g. 1024). default: 0 (Uses SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS).
+
+ Returns:
+ The name of the new target.
+ """
+ params = {}
+
+ params['name'] = name
+ params['max_subsystems'] = max_subsystems
+ return client.call("nvmf_create_target", params)
+
+
+def nvmf_delete_target(client,
+ name):
+ """Destroy an NVMe-oF Target.
+
+ Args:
+ name: The name of the target you wish to destroy
+
+ Returns:
+ True on success or False
+ """
+ params = {}
+
+ params['name'] = name
+ return client.call("nvmf_delete_target", params)
+
+
+def nvmf_get_targets(client):
+ """Get a list of all the NVMe-oF targets in this application
+
+ Returns:
+ An array of target names.
+ """
+
+ return client.call("nvmf_get_targets")
+
+
+def nvmf_create_transport(client,
+ trtype,
+ tgt_name=None,
+ max_queue_depth=None,
+ max_qpairs_per_ctrlr=None,
+ max_io_qpairs_per_ctrlr=None,
+ in_capsule_data_size=None,
+ max_io_size=None,
+ io_unit_size=None,
+ max_aq_depth=None,
+ num_shared_buffers=None,
+ buf_cache_size=None,
+ max_srq_depth=None,
+ no_srq=False,
+ c2h_success=True,
+ dif_insert_or_strip=None,
+ sock_priority=None,
+ acceptor_backlog=None,
+ abort_timeout_sec=None):
+ """NVMf Transport Create options.
+
+ Args:
+ trtype: Transport type (ex. RDMA)
+ max_queue_depth: Max number of outstanding I/O per queue (optional)
+ max_qpairs_per_ctrlr: Max number of SQ and CQ per controller (optional, deprecated, use max_io_qpairs_per_ctrlr)
+ max_io_qpairs_per_ctrlr: Max number of IO qpairs per controller (optional)
+ in_capsule_data_size: Maximum in-capsule data size in bytes (optional)
+ max_io_size: Maximum I/O data size in bytes (optional)
+ io_unit_size: I/O unit size in bytes (optional)
+ max_aq_depth: Max size admin quque per controller (optional)
+ num_shared_buffers: The number of pooled data buffers available to the transport (optional)
+ buf_cache_size: The number of shared buffers to reserve for each poll group (optional)
+ max_srq_depth: Max number of outstanding I/O per shared receive queue - RDMA specific (optional)
+ no_srq: Boolean flag to disable SRQ even for devices that support it - RDMA specific (optional)
+ c2h_success: Boolean flag to disable the C2H success optimization - TCP specific (optional)
+ dif_insert_or_strip: Boolean flag to enable DIF insert/strip for I/O - TCP specific (optional)
+ acceptor_backlog: Pending connections allowed at one time - RDMA specific (optional)
+ abort_timeout_sec: Abort execution timeout value, in seconds (optional)
+
+ Returns:
+ True or False
+ """
+ params = {}
+
+ params['trtype'] = trtype
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+ if max_queue_depth:
+ params['max_queue_depth'] = max_queue_depth
+ if max_qpairs_per_ctrlr:
+ print("WARNING: max_qpairs_per_ctrlr is deprecated, please use max_io_qpairs_per_ctrlr.")
+ params['max_qpairs_per_ctrlr'] = max_qpairs_per_ctrlr
+ if max_io_qpairs_per_ctrlr:
+ params['max_io_qpairs_per_ctrlr'] = max_io_qpairs_per_ctrlr
+ if in_capsule_data_size:
+ params['in_capsule_data_size'] = in_capsule_data_size
+ if max_io_size:
+ params['max_io_size'] = max_io_size
+ if io_unit_size:
+ params['io_unit_size'] = io_unit_size
+ if max_aq_depth:
+ params['max_aq_depth'] = max_aq_depth
+ if num_shared_buffers:
+ params['num_shared_buffers'] = num_shared_buffers
+ if buf_cache_size:
+ params['buf_cache_size'] = buf_cache_size
+ if max_srq_depth:
+ params['max_srq_depth'] = max_srq_depth
+ if no_srq:
+ params['no_srq'] = no_srq
+ if c2h_success is not None:
+ params['c2h_success'] = c2h_success
+ if dif_insert_or_strip:
+ params['dif_insert_or_strip'] = dif_insert_or_strip
+ if sock_priority:
+ params['sock_priority'] = sock_priority
+ if acceptor_backlog is not None:
+ params['acceptor_backlog'] = acceptor_backlog
+ if abort_timeout_sec:
+ params['abort_timeout_sec'] = abort_timeout_sec
+ return client.call('nvmf_create_transport', params)
+
+
+@deprecated_alias('get_nvmf_transports')
+def nvmf_get_transports(client, tgt_name=None):
+ """Get list of NVMe-oF transports.
+ Args:
+ tgt_name: name of the parent NVMe-oF target (optional).
+
+ Returns:
+ List of NVMe-oF transport objects.
+ """
+
+ params = {}
+
+ if tgt_name:
+ params = {
+ 'tgt_name': tgt_name,
+ }
+
+ return client.call('nvmf_get_transports', params)
+
+
+@deprecated_alias('get_nvmf_subsystems')
+def nvmf_get_subsystems(client, tgt_name=None):
+ """Get list of NVMe-oF subsystems.
+ Args:
+ tgt_name: name of the parent NVMe-oF target (optional).
+
+ Returns:
+ List of NVMe-oF subsystem objects.
+ """
+
+ params = {}
+
+ if tgt_name:
+ params = {
+ 'tgt_name': tgt_name,
+ }
+
+ return client.call('nvmf_get_subsystems', params)
+
+
+@deprecated_alias('nvmf_subsystem_create')
+def nvmf_create_subsystem(client,
+ nqn,
+ serial_number,
+ tgt_name=None,
+ model_number='SPDK bdev Controller',
+ allow_any_host=False,
+ max_namespaces=0):
+ """Construct an NVMe over Fabrics target subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ tgt_name: name of the parent NVMe-oF target (optional).
+ serial_number: Serial number of virtual controller.
+ model_number: Model number of virtual controller.
+ allow_any_host: Allow any host (True) or enforce allowed host whitelist (False). Default: False.
+ max_namespaces: Maximum number of namespaces that can be attached to the subsystem (optional). Default: 0 (Unlimited).
+
+ Returns:
+ True or False
+ """
+ params = {
+ 'nqn': nqn,
+ }
+
+ if serial_number:
+ params['serial_number'] = serial_number
+
+ if model_number:
+ params['model_number'] = model_number
+
+ if allow_any_host:
+ params['allow_any_host'] = True
+
+ if max_namespaces:
+ params['max_namespaces'] = max_namespaces
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_create_subsystem', params)
+
+
+def nvmf_subsystem_add_listener(client, nqn, trtype, traddr, trsvcid, adrfam, tgt_name=None):
+ """Add a new listen address to an NVMe-oF subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ trtype: Transport type ("RDMA").
+ traddr: Transport address.
+ trsvcid: Transport service ID.
+ tgt_name: name of the parent NVMe-oF target (optional).
+ adrfam: Address family ("IPv4", "IPv6", "IB", or "FC").
+
+ Returns:
+ True or False
+ """
+ listen_address = {'trtype': trtype,
+ 'traddr': traddr,
+ 'trsvcid': trsvcid}
+
+ if adrfam:
+ listen_address['adrfam'] = adrfam
+
+ params = {'nqn': nqn,
+ 'listen_address': listen_address}
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_subsystem_add_listener', params)
+
+
+def nvmf_subsystem_remove_listener(
+ client,
+ nqn,
+ trtype,
+ traddr,
+ trsvcid,
+ adrfam,
+ tgt_name=None):
+ """Remove existing listen address from an NVMe-oF subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ trtype: Transport type ("RDMA").
+ traddr: Transport address.
+ trsvcid: Transport service ID.
+ tgt_name: name of the parent NVMe-oF target (optional).
+ adrfam: Address family ("IPv4", "IPv6", "IB", or "FC").
+
+ Returns:
+ True or False
+ """
+ listen_address = {'trtype': trtype,
+ 'traddr': traddr,
+ 'trsvcid': trsvcid}
+
+ if adrfam:
+ listen_address['adrfam'] = adrfam
+
+ params = {'nqn': nqn,
+ 'listen_address': listen_address}
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_subsystem_remove_listener', params)
+
+
+def nvmf_subsystem_add_ns(client, nqn, bdev_name, tgt_name=None, ptpl_file=None, nsid=None, nguid=None, eui64=None, uuid=None):
+ """Add a namespace to a subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ bdev_name: Name of bdev to expose as a namespace.
+ tgt_name: name of the parent NVMe-oF target (optional).
+ nsid: Namespace ID (optional).
+ nguid: 16-byte namespace globally unique identifier in hexadecimal (optional).
+ eui64: 8-byte namespace EUI-64 in hexadecimal (e.g. "ABCDEF0123456789") (optional).
+ uuid: Namespace UUID (optional).
+
+ Returns:
+ The namespace ID
+ """
+ ns = {'bdev_name': bdev_name}
+
+ if ptpl_file:
+ ns['ptpl_file'] = ptpl_file
+
+ if nsid:
+ ns['nsid'] = nsid
+
+ if nguid:
+ ns['nguid'] = nguid
+
+ if eui64:
+ ns['eui64'] = eui64
+
+ if uuid:
+ ns['uuid'] = uuid
+
+ params = {'nqn': nqn,
+ 'namespace': ns}
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_subsystem_add_ns', params)
+
+
+def nvmf_subsystem_remove_ns(client, nqn, nsid, tgt_name=None):
+ """Remove a existing namespace from a subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ nsid: Namespace ID.
+ tgt_name: name of the parent NVMe-oF target (optional).
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn,
+ 'nsid': nsid}
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_subsystem_remove_ns', params)
+
+
+def nvmf_subsystem_add_host(client, nqn, host, tgt_name=None):
+ """Add a host NQN to the whitelist of allowed hosts.
+
+ Args:
+ nqn: Subsystem NQN.
+ host: Host NQN to add to the list of allowed host NQNs
+ tgt_name: name of the parent NVMe-oF target (optional).
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn,
+ 'host': host}
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_subsystem_add_host', params)
+
+
+def nvmf_subsystem_remove_host(client, nqn, host, tgt_name=None):
+ """Remove a host NQN from the whitelist of allowed hosts.
+
+ Args:
+ nqn: Subsystem NQN.
+ host: Host NQN to remove to the list of allowed host NQNs
+ tgt_name: name of the parent NVMe-oF target (optional).
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn,
+ 'host': host}
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_subsystem_remove_host', params)
+
+
+def nvmf_subsystem_allow_any_host(client, nqn, disable, tgt_name=None):
+ """Configure a subsystem to allow any host to connect or to enforce the host NQN whitelist.
+
+ Args:
+ nqn: Subsystem NQN.
+ disable: Allow any host (true) or enforce allowed host whitelist (false).
+ tgt_name: name of the parent NVMe-oF target (optional).
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn, 'allow_any_host': False if disable else True}
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_subsystem_allow_any_host', params)
+
+
+@deprecated_alias('delete_nvmf_subsystem')
+def nvmf_delete_subsystem(client, nqn, tgt_name=None):
+ """Delete an existing NVMe-oF subsystem.
+
+ Args:
+ nqn: Subsystem NQN.
+ tgt_name: name of the parent NVMe-oF target (optional).
+
+ Returns:
+ True or False
+ """
+ params = {'nqn': nqn}
+
+ if tgt_name:
+ params['tgt_name'] = tgt_name
+
+ return client.call('nvmf_delete_subsystem', params)
+
+
+def nvmf_get_stats(client, tgt_name=None):
+ """Query NVMf statistics.
+
+ Args:
+ tgt_name: name of the parent NVMe-oF target (optional).
+
+ Returns:
+ Current NVMf statistics.
+ """
+
+ params = {}
+
+ if tgt_name:
+ params = {
+ 'tgt_name': tgt_name,
+ }
+
+ return client.call('nvmf_get_stats', params)
diff --git a/src/spdk/scripts/rpc/pmem.py b/src/spdk/scripts/rpc/pmem.py
new file mode 100644
index 000000000..403674cf1
--- /dev/null
+++ b/src/spdk/scripts/rpc/pmem.py
@@ -0,0 +1,35 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('create_pmem_pool')
+def bdev_pmem_create_pool(client, pmem_file, num_blocks, block_size):
+ """Create pmem pool at specified path.
+ Args:
+ pmem_file: path at which to create pmem pool
+ num_blocks: number of blocks for created pmem pool file
+ block_size: block size for pmem pool file
+ """
+ params = {'pmem_file': pmem_file,
+ 'num_blocks': num_blocks,
+ 'block_size': block_size}
+ return client.call('bdev_pmem_create_pool', params)
+
+
+@deprecated_alias('pmem_pool_info')
+def bdev_pmem_get_pool_info(client, pmem_file):
+ """Get details about pmem pool.
+ Args:
+ pmem_file: path to pmem pool
+ """
+ params = {'pmem_file': pmem_file}
+ return client.call('bdev_pmem_get_pool_info', params)
+
+
+@deprecated_alias('delete_pmem_pool')
+def bdev_pmem_delete_pool(client, pmem_file):
+ """Delete pmem pool.
+ Args:
+ pmem_file: path to pmem pool
+ """
+ params = {'pmem_file': pmem_file}
+ return client.call('bdev_pmem_delete_pool', params)
diff --git a/src/spdk/scripts/rpc/sock.py b/src/spdk/scripts/rpc/sock.py
new file mode 100644
index 000000000..34d7f100d
--- /dev/null
+++ b/src/spdk/scripts/rpc/sock.py
@@ -0,0 +1,41 @@
+def sock_impl_get_options(client, impl_name=None):
+ """Get parameters for the socket layer implementation.
+
+ Args:
+ impl_name: name of socket implementation, e.g. posix
+ """
+ params = {}
+
+ params['impl_name'] = impl_name
+
+ return client.call('sock_impl_get_options', params)
+
+
+def sock_impl_set_options(client,
+ impl_name=None,
+ recv_buf_size=None,
+ send_buf_size=None,
+ enable_recv_pipe=None,
+ enable_zerocopy_send=None):
+ """Set parameters for the socket layer implementation.
+
+ Args:
+ impl_name: name of socket implementation, e.g. posix
+ recv_buf_size: size of socket receive buffer in bytes (optional)
+ send_buf_size: size of socket send buffer in bytes (optional)
+ enable_recv_pipe: enable or disable receive pipe (optional)
+ enable_zerocopy_send: enable or disable zerocopy on send (optional)
+ """
+ params = {}
+
+ params['impl_name'] = impl_name
+ if recv_buf_size is not None:
+ params['recv_buf_size'] = recv_buf_size
+ if send_buf_size is not None:
+ params['send_buf_size'] = send_buf_size
+ if enable_recv_pipe is not None:
+ params['enable_recv_pipe'] = enable_recv_pipe
+ if enable_zerocopy_send is not None:
+ params['enable_zerocopy_send'] = enable_zerocopy_send
+
+ return client.call('sock_impl_set_options', params)
diff --git a/src/spdk/scripts/rpc/subsystem.py b/src/spdk/scripts/rpc/subsystem.py
new file mode 100644
index 000000000..a52adbf6b
--- /dev/null
+++ b/src/spdk/scripts/rpc/subsystem.py
@@ -0,0 +1,12 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('get_subsystems')
+def framework_get_subsystems(client):
+ return client.call('framework_get_subsystems')
+
+
+@deprecated_alias('get_subsystem_config')
+def framework_get_config(client, name):
+ params = {'name': name}
+ return client.call('framework_get_config', params)
diff --git a/src/spdk/scripts/rpc/trace.py b/src/spdk/scripts/rpc/trace.py
new file mode 100644
index 000000000..60667b5e8
--- /dev/null
+++ b/src/spdk/scripts/rpc/trace.py
@@ -0,0 +1,33 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('enable_tpoint_group')
+def trace_enable_tpoint_group(client, name):
+ """Enable trace on a specific tpoint group.
+
+ Args:
+ name: trace group name we want to enable in tpoint_group_mask. (for example "bdev").
+ """
+ params = {'name': name}
+ return client.call('trace_enable_tpoint_group', params)
+
+
+@deprecated_alias('disable_tpoint_group')
+def trace_disable_tpoint_group(client, name):
+ """Disable trace on a specific tpoint group.
+
+ Args:
+ name: trace group name we want to disable in tpoint_group_mask. (for example "bdev").
+ """
+ params = {'name': name}
+ return client.call('trace_disable_tpoint_group', params)
+
+
+@deprecated_alias('get_tpoint_group_mask')
+def trace_get_tpoint_group_mask(client):
+ """Get trace point group mask
+
+ Returns:
+ List of trace point group mask
+ """
+ return client.call('trace_get_tpoint_group_mask')
diff --git a/src/spdk/scripts/rpc/vhost.py b/src/spdk/scripts/rpc/vhost.py
new file mode 100644
index 000000000..b2e0a846c
--- /dev/null
+++ b/src/spdk/scripts/rpc/vhost.py
@@ -0,0 +1,190 @@
+from .helpers import deprecated_alias
+
+
+@deprecated_alias('set_vhost_controller_coalescing')
+def vhost_controller_set_coalescing(client, ctrlr, delay_base_us, iops_threshold):
+ """Set coalescing for vhost controller.
+ Args:
+ ctrlr: controller name
+ delay_base_us: base delay time
+ iops_threshold: IOPS threshold when coalescing is enabled
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'delay_base_us': delay_base_us,
+ 'iops_threshold': iops_threshold,
+ }
+ return client.call('vhost_controller_set_coalescing', params)
+
+
+@deprecated_alias('construct_vhost_scsi_controller')
+def vhost_create_scsi_controller(client, ctrlr, cpumask=None):
+ """Create a vhost scsi controller.
+ Args:
+ ctrlr: controller name
+ cpumask: cpu mask for this controller
+ """
+ params = {'ctrlr': ctrlr}
+
+ if cpumask:
+ params['cpumask'] = cpumask
+
+ return client.call('vhost_create_scsi_controller', params)
+
+
+@deprecated_alias('add_vhost_scsi_lun')
+def vhost_scsi_controller_add_target(client, ctrlr, scsi_target_num, bdev_name):
+ """Add LUN to vhost scsi controller target.
+ Args:
+ ctrlr: controller name
+ scsi_target_num: target number to use
+ bdev_name: name of bdev to add to target
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'scsi_target_num': scsi_target_num,
+ 'bdev_name': bdev_name,
+ }
+ return client.call('vhost_scsi_controller_add_target', params)
+
+
+@deprecated_alias('remove_vhost_scsi_target')
+def vhost_scsi_controller_remove_target(client, ctrlr, scsi_target_num):
+ """Remove target from vhost scsi controller.
+ Args:
+ ctrlr: controller name to remove target from
+ scsi_target_num: number of target to remove from controller
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'scsi_target_num': scsi_target_num
+ }
+ return client.call('vhost_scsi_controller_remove_target', params)
+
+
+@deprecated_alias('construct_vhost_nvme_controller')
+def vhost_create_nvme_controller(client, ctrlr, io_queues, cpumask=None):
+ """Construct vhost NVMe controller.
+ Args:
+ ctrlr: controller name
+ io_queues: number of IO queues for the controller
+ cpumask: cpu mask for this controller
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'io_queues': io_queues
+ }
+
+ if cpumask:
+ params['cpumask'] = cpumask
+
+ return client.call('vhost_create_nvme_controller', params)
+
+
+@deprecated_alias('add_vhost_nvme_ns')
+def vhost_nvme_controller_add_ns(client, ctrlr, bdev_name):
+ """Add namespace to vhost nvme controller.
+ Args:
+ ctrlr: controller name where to add a namespace
+ bdev_name: block device name for a new namespace
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'bdev_name': bdev_name,
+ }
+
+ return client.call('vhost_nvme_controller_add_ns', params)
+
+
+@deprecated_alias('construct_vhost_blk_controller')
+def vhost_create_blk_controller(client, ctrlr, dev_name, cpumask=None, readonly=None, packed_ring=None):
+ """Create vhost BLK controller.
+ Args:
+ ctrlr: controller name
+ dev_name: device name to add to controller
+ cpumask: cpu mask for this controller
+ readonly: set controller as read-only
+ packed_ring: support controller packed_ring
+ """
+ params = {
+ 'ctrlr': ctrlr,
+ 'dev_name': dev_name,
+ }
+ if cpumask:
+ params['cpumask'] = cpumask
+ if readonly:
+ params['readonly'] = readonly
+ if packed_ring:
+ params['packed_ring'] = packed_ring
+ return client.call('vhost_create_blk_controller', params)
+
+
+@deprecated_alias('get_vhost_controllers')
+def vhost_get_controllers(client, name=None):
+ """Get information about configured vhost controllers.
+
+ Args:
+ name: controller name to query (optional; if omitted, query all controllers)
+
+ Returns:
+ List of vhost controllers.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+ return client.call('vhost_get_controllers', params)
+
+
+@deprecated_alias('remove_vhost_controller')
+def vhost_delete_controller(client, ctrlr):
+ """Delete vhost controller from configuration.
+ Args:
+ ctrlr: controller name to remove
+ """
+ params = {'ctrlr': ctrlr}
+ return client.call('vhost_delete_controller', params)
+
+
+@deprecated_alias('construct_virtio_dev')
+def bdev_virtio_attach_controller(client, name, trtype, traddr, dev_type, vq_count=None, vq_size=None):
+ """Attaches virtio controller using
+ provided transport type and device type.
+ This will also create bdevs for any block
+ devices connected to that controller.
+ Args:
+ name: name base for new created bdevs
+ trtype: virtio target transport type: pci or user
+ traddr: transport type specific target address: e.g. UNIX
+ domain socket path or BDF
+ dev_type: device type: blk or scsi
+ vq_count: number of virtual queues to be used
+ vq_size: size of each queue
+ """
+ params = {
+ 'name': name,
+ 'trtype': trtype,
+ 'traddr': traddr,
+ 'dev_type': dev_type
+ }
+ if vq_count:
+ params['vq_count'] = vq_count
+ if vq_size:
+ params['vq_size'] = vq_size
+ return client.call('bdev_virtio_attach_controller', params)
+
+
+@deprecated_alias('remove_virtio_bdev ')
+def bdev_virtio_detach_controller(client, name):
+ """Remove a Virtio device
+ This will delete all bdevs exposed by this device.
+ Args:
+ name: virtio device name
+ """
+ params = {'name': name}
+ return client.call('bdev_virtio_detach_controller', params)
+
+
+@deprecated_alias('get_virtio_scsi_devs')
+def bdev_virtio_scsi_get_devices(client):
+ """Get list of virtio scsi devices."""
+ return client.call('bdev_virtio_scsi_get_devices')
diff --git a/src/spdk/scripts/rpc/vmd.py b/src/spdk/scripts/rpc/vmd.py
new file mode 100644
index 000000000..067271ef1
--- /dev/null
+++ b/src/spdk/scripts/rpc/vmd.py
@@ -0,0 +1,3 @@
+def enable_vmd(client):
+ """Enable VMD enumeration."""
+ return client.call('enable_vmd')
diff --git a/src/spdk/scripts/rpc_http_proxy.py b/src/spdk/scripts/rpc_http_proxy.py
new file mode 100755
index 000000000..ea9d17b16
--- /dev/null
+++ b/src/spdk/scripts/rpc_http_proxy.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python3
+
+import argparse
+import base64
+import errno
+import json
+import socket
+import ssl
+import sys
+try:
+ from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
+except ImportError:
+ from http.server import HTTPServer
+ from http.server import BaseHTTPRequestHandler
+
+rpc_sock = None
+
+parser = argparse.ArgumentParser(description='http(s) proxy for SPDK RPC calls')
+parser.add_argument('host', help='Host name / IP representing proxy server')
+parser.add_argument('port', help='Port number', type=int)
+parser.add_argument('user', help='User name used for authentication')
+parser.add_argument('password', help='Password used for authentication')
+parser.add_argument('-s', dest='sock', help='RPC domain socket path', default='/var/tmp/spdk.sock')
+parser.add_argument('-c', dest='cert', help='SSL certificate')
+
+
+def print_usage_and_exit(status):
+ print('Usage: rpc_http_proxy.py <server IP> <server port> <user name>' +
+ ' <password> <SPDK RPC socket (optional, default: /var/tmp/spdk.sock)>')
+ sys.exit(status)
+
+
+def rpc_call(req):
+ global rpc_sock
+
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.connect(rpc_sock)
+ sock.sendall(req)
+
+ if 'id' not in json.loads(req.decode('ascii')):
+ sock.close()
+ return None
+
+ buf = ''
+ closed = False
+ response = None
+
+ while not closed:
+ newdata = sock.recv(1024)
+ if (newdata == b''):
+ closed = True
+ buf += newdata.decode('ascii')
+ try:
+ response = json.loads(buf)
+ except ValueError:
+ continue # incomplete response; keep buffering
+ break
+
+ sock.close()
+
+ if not response and len(buf) > 0:
+ raise
+
+ return buf
+
+
+class ServerHandler(BaseHTTPRequestHandler):
+
+ key = ""
+
+ def do_HEAD(self):
+ self.send_response(200)
+ self.send_header('Content-type', 'text/html')
+ self.end_headers()
+
+ def do_AUTHHEAD(self):
+ self.send_response(401)
+ self.send_header('WWW-Authenticate', 'text/html')
+ self.send_header('Content-type', 'text/html')
+ self.end_headers()
+
+ def do_INTERNALERROR(self):
+ self.send_response(500)
+ self.send_header('Content-type', 'text/html')
+ self.end_headers()
+
+ def do_POST(self):
+ if self.headers['Authorization'] != 'Basic ' + self.key:
+ self.do_AUTHHEAD()
+ else:
+ data_string = self.rfile.read(int(self.headers['Content-Length']))
+
+ try:
+ response = rpc_call(data_string)
+ if response is not None:
+ self.do_HEAD()
+ self.wfile.write(bytes(response.encode(encoding='ascii')))
+ except ValueError:
+ self.do_INTERNALERROR()
+
+
+def main():
+ global rpc_sock
+
+ args = parser.parse_args()
+ rpc_sock = args.sock
+
+ # encoding user name and password
+ key = base64.b64encode((args.user+':'+args.password).encode(encoding='ascii')).decode('ascii')
+
+ try:
+ ServerHandler.key = key
+ httpd = HTTPServer((args.host, args.port), ServerHandler)
+ if args.cert is not None:
+ httpd.socket = ssl.wrap_socket(httpd.socket, certfile=args.cert, server_side=True)
+ print('Started RPC http proxy server')
+ httpd.serve_forever()
+ except KeyboardInterrupt:
+ print('Shutting down server')
+ httpd.socket.close()
+
+
+if __name__ == '__main__':
+ main()