diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-21 11:54:28 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-21 11:54:28 +0000 |
commit | e6918187568dbd01842d8d1d2c808ce16a894239 (patch) | |
tree | 64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/etc | |
parent | Initial commit. (diff) | |
download | ceph-upstream/18.2.2.tar.xz ceph-upstream/18.2.2.zip |
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | src/spdk/etc/spdk/iscsi.conf.in | 230 | ||||
-rw-r--r-- | src/spdk/etc/spdk/nvmf.conf.in | 295 | ||||
-rw-r--r-- | src/spdk/etc/spdk/vhost.conf.in | 187 |
3 files changed, 712 insertions, 0 deletions
diff --git a/src/spdk/etc/spdk/iscsi.conf.in b/src/spdk/etc/spdk/iscsi.conf.in new file mode 100644 index 000000000..1a7864473 --- /dev/null +++ b/src/spdk/etc/spdk/iscsi.conf.in @@ -0,0 +1,230 @@ +# iSCSI target configuration file +# +# Please write all parameters using ASCII. +# The parameter must be quoted if it includes whitespace. +# +# Configuration syntax: +# Leading whitespace is ignored. +# Lines starting with '#' are comments. +# Lines ending with '\' are concatenated with the next line. +# Bracketed ([]) names define sections + +[Global] + # Shared Memory Group ID. SPDK applications with the same ID will share memory. + # Default: <the process PID> + #SharedMemoryID 0 + + # Disable PCI access. PCI is enabled by default. Setting this + # option will hide any PCI device from all SPDK modules, making + # SPDK act as if they don't exist. + #NoPci Yes + + # Tracepoint group mask for spdk trace buffers + # Default: 0x0 (all tracepoint groups disabled) + # Set to 0xFFFF to enable all tracepoint groups. + #TpointGroupMask 0x0 + +# Users may activate entries in this section to override default values for +# global parameters in the block device (bdev) subsystem. +[Bdev] + # Number of spdk_bdev_io structures allocated in the global bdev subsystem pool. + #BdevIoPoolSize 65536 + + # Maximum number of spdk_bdev_io structures to cache per thread. + #BdevIoCacheSize 256 + +[iSCSI] + # node name (not include optional part) + # Users can optionally change this to fit their environment. + NodeBase "iqn.2016-06.io.spdk" + + AuthFile /usr/local/etc/spdk/auth.conf + + # Socket I/O timeout sec. (0 is infinite) + Timeout 30 + + # authentication information for discovery session + # Options: + # None, Auto, CHAP and Mutual. Note that Mutual infers CHAP. + DiscoveryAuthMethod Auto + + #MaxSessions 128 + #MaxConnectionsPerSession 2 + + # iSCSI initial parameters negotiate with initiators + # NOTE: incorrect values might crash + DefaultTime2Wait 2 + DefaultTime2Retain 60 + + # Maximum amount in bytes of unsolicited data the iSCSI + # initiator may send to the target during the execution of + # a single SCSI command. + FirstBurstLength 8192 + + ImmediateData Yes + ErrorRecoveryLevel 0 + +# Users must change the PortalGroup section(s) to match the IP addresses +# for their environment. +# PortalGroup sections define which network portals the iSCSI target +# will use to listen for incoming connections. These are also used to +# determine which targets are accessible over each portal group. +# Up to 1024 portal directives are allowed. These define the network +# portals of the portal group. The user must specify a IP address +# for each network portal, and may optionally specify a port. +# If the port is omitted, 3260 will be used. +# Syntax: +# Portal <Name> <IP address>[:<port>] +[PortalGroup1] + Portal DA1 192.168.2.21:3260 + Portal DA2 192.168.2.22:3260 + +# Users must change the InitiatorGroup section(s) to match the IP +# addresses and initiator configuration in their environment. +# Netmask can be used to specify a single IP address or a range of IP addresses +# Netmask 192.168.1.20 <== single IP address +# Netmask 192.168.1.0/24 <== IP range 192.168.1.* +[InitiatorGroup1] + InitiatorName ANY + Netmask 192.168.2.0/24 + +# NVMe configuration options +[Nvme] + # NVMe Device Whitelist + # Users may specify which NVMe devices to claim by their transport id. + # See spdk_nvme_transport_id_parse() in spdk/nvme.h for the correct format. + # The second argument is the assigned name, which can be referenced from + # other sections in the configuration file. For NVMe devices, a namespace + # is automatically appended to each name in the format <YourName>nY, where + # Y is the NSID (starts at 1). + TransportID "trtype:PCIe traddr:0000:00:00.0" Nvme0 + TransportID "trtype:PCIe traddr:0000:01:00.0" Nvme1 + + # The number of attempts per I/O when an I/O fails. Do not include + # this key to get the default behavior. + RetryCount 4 + # Timeout for each command, in microseconds. If 0, don't track timeouts. + TimeoutUsec 0 + # Action to take on command time out. Only valid when Timeout is greater + # than 0. This may be 'Reset' to reset the controller, 'Abort' to abort + # the command, or 'None' to just print a message but do nothing. + # Admin command timeouts will always result in a reset. + ActionOnTimeout None + # Set how often the admin queue is polled for asynchronous events. + # Units in microseconds. + AdminPollRate 100000 + # Set how often I/O queues are polled from completions. + # Units in microseconds. + IOPollRate 0 + + # Disable handling of hotplug (runtime insert and remove) events, + # users can set to Yes if want to enable it. + # Default: No + HotplugEnable No + + # Set how often the hotplug is processed for insert and remove events. + # Units in microseconds. + HotplugPollRate 0 + +# Users may change this section to create a different number or size of +# malloc LUNs. +# If the system has hardware DMA engine, it can use an IOAT +# (i.e. Crystal Beach DMA) channel to do the copy instead of memcpy +# by specifying "Enable Yes" in [Ioat] section. +# Offload is disabled by default even it is available. +[Malloc] + # Number of Malloc targets + NumberOfLuns 5 + # Malloc targets are 128M + LunSizeInMB 128 + # Block size. Default is 512 bytes. + BlockSize 4096 + +# Users can use offload by specifying "Enable Yes" in this section +# if it is available. +# Users may use the whitelist to initialize specified devices, IDS +# uses BUS:DEVICE.FUNCTION to identify each Ioat channel. +[Ioat] + Enable No + Whitelist 00:04.0 + Whitelist 00:04.1 + +# Users must change this section to match the /dev/sdX devices to be +# exported as iSCSI LUNs. The devices are accessed using Linux AIO. +# The format is: +# AIO <file name> <bdev name> [<block size>] +# The file name is the backing device +# The bdev name can be referenced from elsewhere in the configuration file. +# Block size may be omitted to automatically detect the block size of a disk. +[AIO] + AIO /dev/sdb AIO0 + AIO /dev/sdc AIO1 + AIO /tmp/myfile AIO2 4096 + +# PMDK libpmemblk-based block device +[Pmem] + # Syntax: + # Blk <pmemblk pool file name> <bdev name> + Blk /path/to/pmem-pool Pmem0 + +# The Split virtual block device slices block devices into multiple smaller bdevs. +[Split] + # Syntax: + # Split <bdev> <count> [<size_in_megabytes>] + + # Split Malloc1 into two equally-sized portions, Malloc1p0 and Malloc1p1 + Split Malloc1 2 + + # Split Malloc2 into eight 1-megabyte portions, Malloc2p0 ... Malloc2p7, + # leaving the rest of the device inaccessible + Split Malloc2 8 1 + +# The RAID virtual block device based on pre-configured block device. +[RAID1] + # Unique name of this RAID device. + Name Raid0 + # RAID level, only raid level 0 is supported. + RaidLevel 0 + # Strip size in KB. + StripSize 64 + # Number of pre-configured bdevs. + NumDevices 2 + # Pre-configured bdevs name with Nvme. + #Devices Nvme0n1 Nvme1n1 + # Pre-configured bdevs name with Malloc. + Devices Malloc3 Malloc4 + # Pre-configured bdevs name with AIO. + #Devices AIO0 AIO1 + +# Users should change the TargetNode section(s) below to match the +# desired iSCSI target node configuration. +# TargetName, Mapping, LUN0 are minimum required +[TargetNode1] + TargetName disk1 + TargetAlias "Data Disk1" + Mapping PortalGroup1 InitiatorGroup1 + AuthMethod Auto + AuthGroup AuthGroup1 + # Enable header and data digest + # UseDigest Header Data + UseDigest Auto + # Use the first malloc target + LUN0 Malloc0 + # Using the first AIO target + LUN1 AIO0 + # Using the second storage target + LUN2 AIO1 + # Using the third storage target + LUN3 AIO2 + QueueDepth 128 + +[TargetNode2] + TargetName disk2 + TargetAlias "Data Disk2" + Mapping PortalGroup1 InitiatorGroup1 + AuthMethod Auto + AuthGroup AuthGroup1 + UseDigest Auto + LUN0 Nvme0n1 + LUN1 Raid0 + QueueDepth 32 diff --git a/src/spdk/etc/spdk/nvmf.conf.in b/src/spdk/etc/spdk/nvmf.conf.in new file mode 100644 index 000000000..5799f65cf --- /dev/null +++ b/src/spdk/etc/spdk/nvmf.conf.in @@ -0,0 +1,295 @@ +# NVMf Target Configuration File +# +# Please write all parameters using ASCII. +# The parameter must be quoted if it includes whitespace. +# +# Configuration syntax: +# Leading whitespace is ignored. +# Lines starting with '#' are comments. +# Lines ending with '\' are concatenated with the next line. +# Bracketed ([]) names define sections + +[Global] + # Tracepoint group mask for spdk trace buffers + # Default: 0x0 (all tracepoint groups disabled) + # Set to 0xFFFF to enable all tracepoint groups. + #TpointGroupMask 0x0 + + # PciBlacklist and PciWhitelist cannot be used at the same time + #PciBlacklist 0000:01:00.0 + #PciBlacklist 0000:02:00.0 + #PciWhitelist 0000:03:00.0 + #PciWhitelist 0000:04:00.0 + +# Users may activate entries in this section to override default values for +# global parameters in the block device (bdev) subsystem. +[Bdev] + # Number of spdk_bdev_io structures allocated in the global bdev subsystem pool. + #BdevIoPoolSize 65536 + + # Maximum number of spdk_bdev_io structures to cache per thread. + #BdevIoCacheSize 256 + +# Users may change this section to create a different number or size of +# malloc LUNs. +# This will generate 8 LUNs with a malloc-allocated backend. +# Each LUN will be size 64MB and these will be named +# Malloc0 through Malloc7. Not all LUNs defined here are necessarily +# used below. +[Malloc] + NumberOfLuns 8 + LunSizeInMB 64 + +# Users must change this section to match the /dev/sdX devices to be +# exported as iSCSI LUNs. The devices are accessed using Linux AIO. +# The format is: +# AIO <file name> <bdev name> +# The file name is the backing device +# The bdev name can be referenced from elsewhere in the configuration file. +# Block size may be omitted to automatically detect the block size of a disk. +[AIO] + AIO /dev/sdb AIO0 + AIO /dev/sdc AIO1 + AIO /tmp/myfile AIO2 4096 + +# PMDK libpmemblk-based block device +[Pmem] + # Syntax: + # Blk <pmemblk pool file name> <bdev name> + Blk /path/to/pmem-pool Pmem0 + +# Define NVMf protocol global options +[Nvmf] + # Set how often the acceptor polls for incoming connections. The acceptor is also + # responsible for polling existing connections that have gone idle. 0 means continuously + # poll. Units in microseconds. + AcceptorPollRate 10000 + + # Set how the connection is scheduled among multiple threads, current supported string value are + # "RoundRobin", "Host", "Transport". + # RoundRobin: Schedule the connection with roundrobin manner. + # Host: Schedule the connection according to host IP. + # Transport: Schedule the connection according to the transport characteristics. + # For example, for TCP transport, we can schedule the connection according to socket NAPI_ID info. + # The connection which has the same socket NAPI_ID info will be grouped in the same polling group. + ConnectionScheduler RoundRobin + +# One valid transport type must be set in each [Transport]. +# The first is the case of RDMA transport and the second is the case of TCP transport. +[Transport] + # Set RDMA transport type. + Type RDMA + + # Set the maximum number of outstanding I/O per queue. + #MaxQueueDepth 128 + + # Set the maximum number of submission and completion queues per session. + # Setting this to '8', for example, allows for 8 submission and 8 completion queues + # per session. + #MaxQueuesPerSession 4 + + # Set the maximum in-capsule data size. Must be a multiple of 16. + # 0 is a valid choice. + #InCapsuleDataSize 4096 + + # Set the maximum I/O size. Must be a multiple of 4096. + #MaxIOSize 131072 + + # Set the I/O unit size, and this value should not be larger than MaxIOSize + #IOUnitSize 131072 + + # Set the maximum number of IO for admin queue + #MaxAQDepth 32 + + # Set the number of pooled data buffers available to the transport + # It is used to provide the read/write data buffers for the qpairs on this transport. + #NumSharedBuffers 512 + + # Set the number of shared buffers to be cached per poll group + #BufCacheSize 32 + + # Set the maximum number outstanding I/O per shared receive queue. Relevant only for RDMA transport + #MaxSRQDepth 4096 + +[Transport] + # Set TCP transport type. + Type TCP + + # Set the maximum number of outstanding I/O per queue. + #MaxQueueDepth 128 + + # Set the maximum number of submission and completion queues per session. + # Setting this to '8', for example, allows for 8 submission and 8 completion queues + # per session. + #MaxQueuesPerSession 4 + + # Set the maximum in-capsule data size. Must be a multiple of 16. + # 0 is a valid choice. + #InCapsuleDataSize 4096 + + # Set the maximum I/O size. Must be a multiple of 4096. + #MaxIOSize 131072 + + # Set the I/O unit size, and this value should not be larger than MaxIOSize + #IOUnitSize 131072 + + # Set the maximum number of IO for admin queue + #MaxAQDepth 32 + + # Set the number of pooled data buffers available to the transport + # It is used to provide the read/write data buffers for the qpairs on this transport. + #NumSharedBuffers 512 + + # Set the number of shared buffers to be cached per poll group + #BufCacheSize 32 + + # Set whether to use the C2H Success optimization, only used for TCP transport. + # C2HSuccess true + + # Set whether to use different priority for socket, only used for TCP transport. + # SockPriority 0 + +# Define FC transport +#[Transport] + # Set FC transport type. + #Type FC + + # Set the maximum number of submission and completion queues per session. + # Setting this to '8', for example, allows for 8 submission and 8 completion queues + # per session. + #MaxQueuesPerSession 5 + + # Set the maximum number of outstanding I/O per queue. + #MaxQueueDepth 128 + + # Set the maximum I/O size. Must be a multiple of 4096. + #MaxIOSize 65536 + +[Nvme] + # NVMe Device Whitelist + # Users may specify which NVMe devices to claim by their transport id. + # See spdk_nvme_transport_id_parse() in spdk/nvme.h for the correct format. + # The second argument is the assigned name, which can be referenced from + # other sections in the configuration file. For NVMe devices, a namespace + # is automatically appended to each name in the format <YourName>nY, where + # Y is the NSID (starts at 1). + TransportID "trtype:PCIe traddr:0000:00:00.0" Nvme0 + TransportID "trtype:PCIe traddr:0000:01:00.0" Nvme1 + TransportID "trtype:PCIe traddr:0000:02:00.0" Nvme2 + TransportID "trtype:PCIe traddr:0000:03:00.0" Nvme3 + TransportID "trtype:RDMA adrfam:IPv4 traddr:192.168.100.8 trsvcid:4420 hostaddr:192.168.100.9 subnqn:nqn.2016-06.io.spdk:cnode1" Nvme4 + TransportID "trtype:TCP adrfam:IPv4 traddr:192.168.100.3 trsvcid:4420 hostaddr:192.168.100.4 subnqn:nqn.2016-06.io.spdk:cnode2" Nvme5 + + # The number of attempts per I/O when an I/O fails. Do not include + # this key to get the default behavior. + RetryCount 4 + # Timeout for each command, in microseconds. If 0, don't track timeouts. + TimeoutUsec 0 + # Action to take on command time out. Only valid when Timeout is greater + # than 0. This may be 'Reset' to reset the controller, 'Abort' to abort + # the command, or 'None' to just print a message but do nothing. + # Admin command timeouts will always result in a reset. + ActionOnTimeout None + # Set how often the admin queue is polled for asynchronous events. + # Units in microseconds. + AdminPollRate 100000 + # Set how often I/O queues are polled from completions. + # Units in microseconds. + IOPollRate 0 + + # Disable handling of hotplug (runtime insert and remove) events, + # users can set to Yes if want to enable it. + # Default: No + HotplugEnable No + + # Enable/disable delayed NVMe command submission. + # Default: True. + DelayCmdSubmit True + +# The Split virtual block device slices block devices into multiple smaller bdevs. +[Split] + # Syntax: + # Split <bdev> <count> [<size_in_megabytes>] + + # Split Malloc2 into two equally-sized portions, Malloc2p0 and Malloc2p1 + Split Malloc2 2 + + # Split Malloc3 into eight 1-megabyte portions, Malloc3p0 ... Malloc3p7, + # leaving the rest of the device inaccessible + Split Malloc3 8 1 + +# The RAID virtual block device based on pre-configured block device. +[RAID1] + # Unique name of this RAID device. + Name Raid0 + # RAID level, only raid level 0 is supported. + RaidLevel 0 + # Strip size in KB. + StripSize 64 + # Number of pre-configured bdevs. + NumDevices 2 + # Pre-configured bdevs name with Nvme. + Devices Nvme2n1 Nvme3n1 + # Pre-configured bdevs name with Malloc. + #Devices Malloc0 Malloc1 + # Pre-configured bdevs name with AIO. + #Devices AIO0 AIO1 + +# Define an NVMf Subsystem. +# - NQN is required and must be unique. +# - Between 1 and 255 Listen directives are allowed. This defines +# the addresses on which new connections may be accepted. The format +# is Listen <type> <address> where type can be RDMA, TCP or FC. +# - Between 0 and 255 Host directives are allowed. This defines the +# NQNs of allowed hosts. If no Host directive is specified, all hosts +# are allowed to connect. +# - Between 0 and 255 Namespace directives are allowed. These define the +# namespaces accessible from this subsystem. +# The user must specify MaxNamespaces to allow for adding namespaces +# during active connection. By default it is 0 +# The user must specify a bdev name for each namespace, and may optionally +# specify a namespace ID. If nsid is omitted, the namespace will be +# assigned the next available NSID. The NSID must be unique within the +# subsystem. An optional namespace UUID may also be specified. +# Syntax: +# Namespace <bdev_name> [<nsid> [<uuid>]] + +# Namespaces backed by physical NVMe devices +[Subsystem1] + NQN nqn.2016-06.io.spdk:cnode1 + Listen TCP 15.15.15.2:4420 + AllowAnyHost No + Host nqn.2016-06.io.spdk:init + SN SPDK00000000000001 + MN SPDK_Controller1 + MaxNamespaces 20 + Namespace Nvme0n1 1 + Namespace Nvme1n1 2 + Namespace Raid0 + +# Multiple subsystems are allowed. +# Namespaces backed by non-NVMe devices +[Subsystem2] + NQN nqn.2016-06.io.spdk:cnode2 + Listen RDMA 192.168.2.21:4420 + AllowAnyHost No + Host nqn.2016-06.io.spdk:init + SN SPDK00000000000002 + MN SPDK_Controller2 + Namespace Malloc0 + Namespace Malloc1 + Namespace AIO0 + Namespace AIO1 + +# Subsystem with FC listen address directive +# - Listen option allows subsystem access on specific FC ports identified +# by WWNN-WWPN. Each subsystem allows 0 - 255 listen directives. +# If no listen directive is provided, subsystem can be accessed on all +# avialable FC links permitted by FC zoning rules. +# +# [Subsystem3] + #NQN nqn.2016-06.io.spdk:cnode3 + #Listen FC "nn-0x20000090fac7ca5c:pn-0x10000090fac7ca5c" + #AllowAnyHost Yes + #SN SPDK00000000000003 + #Namespace Malloc4 diff --git a/src/spdk/etc/spdk/vhost.conf.in b/src/spdk/etc/spdk/vhost.conf.in new file mode 100644 index 000000000..2b18a3dfb --- /dev/null +++ b/src/spdk/etc/spdk/vhost.conf.in @@ -0,0 +1,187 @@ +# SPDK vhost configuration file +# +# Please write all parameters using ASCII. +# The parameter must be quoted if it includes whitespace. + +# Configuration syntax: +# Leading whitespace is ignored. +# Lines starting with '#' are comments. +# Lines ending with '\' are concatenated with the next line. +# Bracketed ([]) names define sections + +[Global] + # Instance ID for multi-process support + # Default: 0 + #InstanceID 0 + + # Disable PCI access. PCI is enabled by default. Setting this + # option will hide any PCI device from all SPDK modules, making + # SPDK act as if they don't exist. + #NoPci Yes + + # Tracepoint group mask for spdk trace buffers + # Default: 0x0 (all tracepoint groups disabled) + # Set to 0xFFFF to enable all tracepoint groups. + #TpointGroupMask 0x0 + +# Users may activate entries in this section to override default values for +# global parameters in the block device (bdev) subsystem. +[Bdev] + # Number of spdk_bdev_io structures allocated in the global bdev subsystem pool. + #BdevIoPoolSize 65536 + + # Maximum number of spdk_bdev_io structures to cache per thread. + #BdevIoCacheSize 256 + +# Users may not want to use offload even it is available. +# Users can use offload by specifying "Enable Yes" in this section +# if it is available. +# Users may use the whitelist to initialize specified devices, IDS +# uses BUS:DEVICE.FUNCTION to identify each Ioat channel. +[Ioat] + Enable No + #Whitelist 00:04.0 + #Whitelist 00:04.1 + +# Users must change this section to match the /dev/sdX devices to be +# exported as vhost scsi drives. The devices are accessed using Linux AIO. +[AIO] + #AIO /dev/sdb AIO0 + #AIO /dev/sdc AIO1 + +# PMDK libpmemblk-based block device +[Pmem] + # Syntax: + # Blk <pmemblk pool file name> <bdev name> + Blk /path/to/pmem-pool Pmem0 + +# Users may change this section to create a different number or size of +# malloc LUNs. +# If the system has hardware DMA engine, it can use an IOAT +# (i.e. Crystal Beach DMA) channel to do the copy instead of memcpy +# by specifying "Enable Yes" in [Ioat] section. +# Offload is disabled by default even it is available. +[Malloc] + # Number of Malloc targets + NumberOfLuns 3 + # Malloc targets are 128M + LunSizeInMB 128 + # Block size. Default is 512 bytes. + BlockSize 4096 + +# NVMe configuration options +[Nvme] + # NVMe Device Whitelist + # Users may specify which NVMe devices to claim by their transport id. + # See spdk_nvme_transport_id_parse() in spdk/nvme.h for the correct format. + # The second argument is the assigned name, which can be referenced from + # other sections in the configuration file. For NVMe devices, a namespace + # is automatically appended to each name in the format <YourName>nY, where + # Y is the NSID (starts at 1). + TransportID "trtype:PCIe traddr:0000:00:00.0" Nvme0 + TransportID "trtype:PCIe traddr:0000:01:00.0" Nvme1 + + # The number of attempts per I/O when an I/O fails. Do not include + # this key to get the default behavior. + RetryCount 4 + # Timeout for each command, in microseconds. If 0, don't track timeouts. + TimeoutUsec 0 + # Action to take on command time out. Only valid when Timeout is greater + # than 0. This may be 'Reset' to reset the controller, 'Abort' to abort + # the command, or 'None' to just print a message but do nothing. + # Admin command timeouts will always result in a reset. + ActionOnTimeout None + # Set how often the admin queue is polled for asynchronous events. + # Units in microseconds. + AdminPollRate 100000 + # Set how often I/O queues are polled from completions. + # Units in microseconds. + IOPollRate 0 + +# The Split virtual block device slices block devices into multiple smaller bdevs. +[Split] + # Syntax: + # Split <bdev> <count> [<size_in_megabytes>] + # + # Split Nvme1n1 into two equally-sized portions, Nvme1n1p0 and Nvme1n1p1 + #Split Nvme1n1 2 + + # Split Malloc2 into eight 1-megabyte portions, Malloc2p0 ... Malloc2p7, + # leaving the rest of the device inaccessible + #Split Malloc2 8 1 + +# The RAID virtual block device based on pre-configured block device. +[RAID1] + # Unique name of this RAID device. + Name Raid0 + # RAID level, only raid level 0 is supported. + RaidLevel 0 + # Strip size in KB. + StripSize 64 + # Number of pre-configured bdevs. + NumDevices 2 + # Pre-configured bdevs name with Nvme. + #Devices Nvme0n1 Nvme1n1 + # Pre-configured bdevs name with Malloc. + Devices Malloc1 Malloc2 + # Pre-configured bdevs name with AIO. + #Devices AIO0 AIO1 + +# Vhost scsi controller configuration +# Users should change the VhostScsi section(s) below to match the desired +# vhost configuration. +# Name is minimum required +[VhostScsi0] + # Define name for controller + Name vhost.0 + # Assign devices from backend + # Use the first malloc device + Target 0 Malloc0 + # Use the first AIO device + #Target 1 AIO0 + # Use the frist Nvme device + #Target 2 Nvme0n1 + # Use the third partition from second Nvme device + #Target 3 Nvme1n1p2 + + # Start the poller for this vhost controller on one of the cores in + # this cpumask. By default, it not specified, will use any core in the + # SPDK process. + #Cpumask 0x1 + +#[VhostScsi1] +# Name vhost.1 +# Target 0 AIO1 +# Cpumask 0x1 + +#[VhostBlk0] + # Define name for controller + #Name vhost.2 + # Use first partition from the second Malloc device + #Dev Malloc2p0 + # Put controller in read-only mode + #ReadOnly no + # Start the poller for this vhost controller on one of the cores in + # this cpumask. By default, it not specified, will use any core in the + # SPDK process. + #Cpumask 0x1 + +#[VhostBlk1] + # Define name for controller + #Name vhost.2 + # Use device which named Raid0 + #Dev Raid0 + +#[VhostNvme0] + # Define name for controller + #Name vhost.0 + #NumberOfQueues 2 + # Use first partition from the first NVMe device + #Namespace Nvme0n1p0 + # Use first partition from the first NVMe device + #Namespace Nvme0n1p1 + + # Start the poller for this vhost controller on one of the cores in + # this cpumask. By default, it not specified, will use any core in the + # SPDK process. + #Cpumask 0x1 |