summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/bdev
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/spdk/test/bdev
parentInitial commit. (diff)
downloadceph-upstream.tar.xz
ceph-upstream.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--src/spdk/test/bdev/Makefile44
-rw-r--r--src/spdk/test/bdev/bdev.conf.in44
-rw-r--r--src/spdk/test/bdev/bdevio/.gitignore1
-rw-r--r--src/spdk/test/bdev/bdevio/Makefile61
-rw-r--r--src/spdk/test/bdev/bdevio/bdevio.c973
-rwxr-xr-xsrc/spdk/test/bdev/bdevjson/json_config.sh27
-rwxr-xr-xsrc/spdk/test/bdev/bdevjson/rbd_json_config.sh26
-rw-r--r--src/spdk/test/bdev/bdevperf/.gitignore1
-rw-r--r--src/spdk/test/bdev/bdevperf/Makefile61
-rw-r--r--src/spdk/test/bdev/bdevperf/bdevperf.c1035
-rwxr-xr-xsrc/spdk/test/bdev/blockdev.sh171
-rw-r--r--src/spdk/test/bdev/nbd_common.sh95
-rwxr-xr-xsrc/spdk/test/bdev/nbdjson/json_config.sh28
13 files changed, 2567 insertions, 0 deletions
diff --git a/src/spdk/test/bdev/Makefile b/src/spdk/test/bdev/Makefile
new file mode 100644
index 00000000..cb15bd49
--- /dev/null
+++ b/src/spdk/test/bdev/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = bdevio bdevperf
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/bdev/bdev.conf.in b/src/spdk/test/bdev/bdev.conf.in
new file mode 100644
index 00000000..0439ab5e
--- /dev/null
+++ b/src/spdk/test/bdev/bdev.conf.in
@@ -0,0 +1,44 @@
+[Passthru]
+ # PT <bdev name> <vbdev name>
+ PT Malloc3 TestPT
+
+[Malloc]
+ NumberOfLuns 7
+ LunSizeInMB 32
+
+[Split]
+ # Split Malloc1 into two auto-sized halves
+ Split Malloc1 2
+
+ # Split Malloc2 into eight 4-megabyte pieces,
+ # leaving the rest of the device inaccessible
+ Split Malloc2 8 4
+
+[AIO]
+ AIO /dev/ram0 AIO0
+ AIO /tmp/aiofile AIO1 2048
+
+[QoS]
+ # QoS section defines limitation on performance
+ # metric like IOPS and bandwidth
+ #
+ # Format: Limit_IOPS Bdev_Name IOPS_Limit_Value
+ #
+ # IOPS limit must be 10000 or greater and be multiple
+ # of 10000
+ #
+ # Assign 20000 IOPS for the Malloc0 block device
+ Limit_IOPS Malloc0 20000
+ #
+ # Bandwidth limit must be 10 (MB) or greater and be
+ # multiple of 10
+ # Assign 100 (MB) bandwidth for the Malloc3 block
+ # device
+ Limit_BPS Malloc3 100
+
+[RAID0]
+ Name raid0
+ StripSize 64
+ NumDevices 2
+ RaidLevel 0
+ Devices Malloc4 Malloc5
diff --git a/src/spdk/test/bdev/bdevio/.gitignore b/src/spdk/test/bdev/bdevio/.gitignore
new file mode 100644
index 00000000..1bb55429
--- /dev/null
+++ b/src/spdk/test/bdev/bdevio/.gitignore
@@ -0,0 +1 @@
+bdevio
diff --git a/src/spdk/test/bdev/bdevio/Makefile b/src/spdk/test/bdev/bdevio/Makefile
new file mode 100644
index 00000000..d973846f
--- /dev/null
+++ b/src/spdk/test/bdev/bdevio/Makefile
@@ -0,0 +1,61 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk
+
+APP = bdevio
+
+C_SRCS := bdevio.c
+
+SPDK_LIB_LIST = event_bdev event_copy
+SPDK_LIB_LIST += bdev copy event trace log conf thread util rpc jsonrpc json
+
+LIBS += $(BLOCKDEV_MODULES_LINKER_ARGS) \
+ $(COPY_MODULES_LINKER_ARGS) \
+ $(SOCK_MODULES_LINKER_ARGS)
+
+LIBS += $(SPDK_LIB_LINKER_ARGS) $(ENV_LINKER_ARGS) -lcunit
+
+all : $(APP)
+ @:
+
+$(APP) : $(OBJS) $(SPDK_LIB_FILES) $(COPY_MODULES_FILES) $(BLOCKDEV_MODULES_FILES) $(SOCK_MODULES_FILES) $(LINKER_MODULES) $(ENV_LIBS)
+ $(LINK_C)
+
+clean :
+ $(CLEAN_C) $(APP)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.deps.mk
diff --git a/src/spdk/test/bdev/bdevio/bdevio.c b/src/spdk/test/bdev/bdevio/bdevio.c
new file mode 100644
index 00000000..c139b6f2
--- /dev/null
+++ b/src/spdk/test/bdev/bdevio/bdevio.c
@@ -0,0 +1,973 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/bdev.h"
+#include "spdk/copy_engine.h"
+#include "spdk/env.h"
+#include "spdk/log.h"
+#include "spdk/thread.h"
+#include "spdk/event.h"
+
+#include "CUnit/Basic.h"
+
+#define BUFFER_IOVS 1024
+#define BUFFER_SIZE 260 * 1024
+#define BDEV_TASK_ARRAY_SIZE 2048
+
+pthread_mutex_t g_test_mutex;
+pthread_cond_t g_test_cond;
+
+static uint32_t g_lcore_id_init;
+static uint32_t g_lcore_id_ut;
+static uint32_t g_lcore_id_io;
+
+struct io_target {
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *bdev_desc;
+ struct spdk_io_channel *ch;
+ struct io_target *next;
+};
+
+struct bdevio_request {
+ char *buf;
+ int data_len;
+ uint64_t offset;
+ struct iovec iov[BUFFER_IOVS];
+ int iovcnt;
+ struct io_target *target;
+};
+
+struct io_target *g_io_targets = NULL;
+
+static void
+execute_spdk_function(spdk_event_fn fn, void *arg1, void *arg2)
+{
+ struct spdk_event *event;
+
+ event = spdk_event_allocate(g_lcore_id_io, fn, arg1, arg2);
+ pthread_mutex_lock(&g_test_mutex);
+ spdk_event_call(event);
+ pthread_cond_wait(&g_test_cond, &g_test_mutex);
+ pthread_mutex_unlock(&g_test_mutex);
+}
+
+static void
+wake_ut_thread(void)
+{
+ pthread_mutex_lock(&g_test_mutex);
+ pthread_cond_signal(&g_test_cond);
+ pthread_mutex_unlock(&g_test_mutex);
+}
+
+static void
+__get_io_channel(void *arg1, void *arg2)
+{
+ struct io_target *target = arg1;
+
+ target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
+ assert(target->ch);
+ wake_ut_thread();
+}
+
+static int
+bdevio_construct_targets(void)
+{
+ struct spdk_bdev *bdev;
+ struct io_target *target;
+ int rc;
+
+ printf("I/O targets:\n");
+
+ bdev = spdk_bdev_first_leaf();
+ while (bdev != NULL) {
+ uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev);
+ uint32_t block_size = spdk_bdev_get_block_size(bdev);
+
+ target = malloc(sizeof(struct io_target));
+ if (target == NULL) {
+ return -ENOMEM;
+ }
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &target->bdev_desc);
+ if (rc != 0) {
+ free(target);
+ SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
+ bdev = spdk_bdev_next_leaf(bdev);
+ continue;
+ }
+
+ printf(" %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
+ spdk_bdev_get_name(bdev),
+ num_blocks, block_size,
+ (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024));
+
+ target->bdev = bdev;
+ target->next = g_io_targets;
+ execute_spdk_function(__get_io_channel, target, NULL);
+ g_io_targets = target;
+
+ bdev = spdk_bdev_next_leaf(bdev);
+ }
+
+ return 0;
+}
+
+static void
+__put_io_channel(void *arg1, void *arg2)
+{
+ struct io_target *target = arg1;
+
+ spdk_put_io_channel(target->ch);
+ wake_ut_thread();
+}
+
+static void
+bdevio_cleanup_targets(void)
+{
+ struct io_target *target;
+
+ target = g_io_targets;
+ while (target != NULL) {
+ execute_spdk_function(__put_io_channel, target, NULL);
+ spdk_bdev_close(target->bdev_desc);
+ g_io_targets = target->next;
+ free(target);
+ target = g_io_targets;
+ }
+}
+
+static bool g_completion_success;
+
+static void
+initialize_buffer(char **buf, int pattern, int size)
+{
+ *buf = spdk_dma_zmalloc(size, 0x1000, NULL);
+ memset(*buf, pattern, size);
+}
+
+static void
+quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
+{
+ g_completion_success = success;
+ spdk_bdev_free_io(bdev_io);
+ wake_ut_thread();
+}
+
+static void
+__blockdev_write(void *arg1, void *arg2)
+{
+ struct bdevio_request *req = arg1;
+ struct io_target *target = req->target;
+ int rc;
+
+ if (req->iovcnt) {
+ rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
+ req->data_len, quick_test_complete, NULL);
+ } else {
+ rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset,
+ req->data_len, quick_test_complete, NULL);
+ }
+
+ if (rc) {
+ g_completion_success = false;
+ wake_ut_thread();
+ }
+}
+
+static void
+__blockdev_write_zeroes(void *arg1, void *arg2)
+{
+ struct bdevio_request *req = arg1;
+ struct io_target *target = req->target;
+ int rc;
+
+ rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset,
+ req->data_len, quick_test_complete, NULL);
+ if (rc) {
+ g_completion_success = false;
+ wake_ut_thread();
+ }
+}
+
+static void
+sgl_chop_buffer(struct bdevio_request *req, int iov_len)
+{
+ int data_len = req->data_len;
+ char *buf = req->buf;
+
+ req->iovcnt = 0;
+ if (!iov_len) {
+ return;
+ }
+
+ for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
+ if (data_len < iov_len) {
+ iov_len = data_len;
+ }
+
+ req->iov[req->iovcnt].iov_base = buf;
+ req->iov[req->iovcnt].iov_len = iov_len;
+
+ buf += iov_len;
+ data_len -= iov_len;
+ }
+
+ CU_ASSERT_EQUAL_FATAL(data_len, 0);
+}
+
+static void
+blockdev_write(struct io_target *target, char *tx_buf,
+ uint64_t offset, int data_len, int iov_len)
+{
+ struct bdevio_request req;
+
+ req.target = target;
+ req.buf = tx_buf;
+ req.data_len = data_len;
+ req.offset = offset;
+ sgl_chop_buffer(&req, iov_len);
+
+ g_completion_success = false;
+
+ execute_spdk_function(__blockdev_write, &req, NULL);
+}
+
+static void
+blockdev_write_zeroes(struct io_target *target, char *tx_buf,
+ uint64_t offset, int data_len)
+{
+ struct bdevio_request req;
+
+ req.target = target;
+ req.buf = tx_buf;
+ req.data_len = data_len;
+ req.offset = offset;
+
+ g_completion_success = false;
+
+ execute_spdk_function(__blockdev_write_zeroes, &req, NULL);
+}
+
+static void
+__blockdev_read(void *arg1, void *arg2)
+{
+ struct bdevio_request *req = arg1;
+ struct io_target *target = req->target;
+ int rc;
+
+ if (req->iovcnt) {
+ rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
+ req->data_len, quick_test_complete, NULL);
+ } else {
+ rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset,
+ req->data_len, quick_test_complete, NULL);
+ }
+
+ if (rc) {
+ g_completion_success = false;
+ wake_ut_thread();
+ }
+}
+
+static void
+blockdev_read(struct io_target *target, char *rx_buf,
+ uint64_t offset, int data_len, int iov_len)
+{
+ struct bdevio_request req;
+
+ req.target = target;
+ req.buf = rx_buf;
+ req.data_len = data_len;
+ req.offset = offset;
+ req.iovcnt = 0;
+ sgl_chop_buffer(&req, iov_len);
+
+ g_completion_success = false;
+
+ execute_spdk_function(__blockdev_read, &req, NULL);
+}
+
+static int
+blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
+{
+ int rc;
+ rc = memcmp(rx_buf, tx_buf, data_length);
+
+ spdk_dma_free(rx_buf);
+ spdk_dma_free(tx_buf);
+
+ return rc;
+}
+
+static void
+blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
+ int expected_rc, bool write_zeroes)
+{
+ struct io_target *target;
+ char *tx_buf = NULL;
+ char *rx_buf = NULL;
+ int rc;
+
+ target = g_io_targets;
+ while (target != NULL) {
+ if (data_length < spdk_bdev_get_block_size(target->bdev) ||
+ data_length / spdk_bdev_get_block_size(target->bdev) > spdk_bdev_get_num_blocks(target->bdev)) {
+ target = target->next;
+ continue;
+ }
+
+ if (!write_zeroes) {
+ initialize_buffer(&tx_buf, pattern, data_length);
+ initialize_buffer(&rx_buf, 0, data_length);
+
+ blockdev_write(target, tx_buf, offset, data_length, iov_len);
+ } else {
+ initialize_buffer(&tx_buf, 0, data_length);
+ initialize_buffer(&rx_buf, pattern, data_length);
+
+ blockdev_write_zeroes(target, tx_buf, offset, data_length);
+ }
+
+
+ if (expected_rc == 0) {
+ CU_ASSERT_EQUAL(g_completion_success, true);
+ } else {
+ CU_ASSERT_EQUAL(g_completion_success, false);
+ }
+ blockdev_read(target, rx_buf, offset, data_length, iov_len);
+
+ if (expected_rc == 0) {
+ CU_ASSERT_EQUAL(g_completion_success, true);
+ } else {
+ CU_ASSERT_EQUAL(g_completion_success, false);
+ }
+
+ if (g_completion_success) {
+ rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length);
+ /* Assert the write by comparing it with values read
+ * from each blockdev */
+ CU_ASSERT_EQUAL(rc, 0);
+ }
+
+ target = target->next;
+ }
+}
+
+static void
+blockdev_write_read_4k(void)
+{
+ uint32_t data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 4K */
+ data_length = 4096;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 0;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_write_zeroes_read_4k(void)
+{
+ uint32_t data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 4K */
+ data_length = 4096;
+ offset = 0;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write_zeroes and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
+}
+
+/*
+ * This i/o will not have to split at the bdev layer.
+ */
+static void
+blockdev_write_zeroes_read_1m(void)
+{
+ uint32_t data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 1M */
+ data_length = 1048576;
+ offset = 0;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write_zeroes and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
+}
+
+/*
+ * This i/o will have to split at the bdev layer if
+ * write-zeroes is not supported by the bdev.
+ */
+static void
+blockdev_write_zeroes_read_3m(void)
+{
+ uint32_t data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 3M */
+ data_length = 3145728;
+ offset = 0;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write_zeroes and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
+}
+
+/*
+ * This i/o will have to split at the bdev layer if
+ * write-zeroes is not supported by the bdev. It also
+ * tests a write size that is not an even multiple of
+ * the bdev layer zero buffer size.
+ */
+static void
+blockdev_write_zeroes_read_3m_500k(void)
+{
+ uint32_t data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 3.5M */
+ data_length = 3670016;
+ offset = 0;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write_zeroes and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
+}
+
+static void
+blockdev_writev_readv_4k(void)
+{
+ uint32_t data_length, iov_len;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 4K */
+ data_length = 4096;
+ iov_len = 4096;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 0;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_writev_readv_30x4k(void)
+{
+ uint32_t data_length, iov_len;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 4K */
+ data_length = 4096 * 30;
+ iov_len = 4096;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 0;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_write_read_512Bytes(void)
+{
+ uint32_t data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 512 */
+ data_length = 512;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 8192;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_writev_readv_512Bytes(void)
+{
+ uint32_t data_length, iov_len;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 512 */
+ data_length = 512;
+ iov_len = 512;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 8192;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_write_read_size_gt_128k(void)
+{
+ uint32_t data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 132K */
+ data_length = 135168;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 8192;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_writev_readv_size_gt_128k(void)
+{
+ uint32_t data_length, iov_len;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 132K */
+ data_length = 135168;
+ iov_len = 135168;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 8192;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_writev_readv_size_gt_128k_two_iov(void)
+{
+ uint32_t data_length, iov_len;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 132K */
+ data_length = 135168;
+ iov_len = 128 * 1024;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 8192;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+
+ blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_write_read_invalid_size(void)
+{
+ uint32_t data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size is not a multiple of the block size */
+ data_length = 0x1015;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 8192;
+ pattern = 0xA3;
+ /* Params are invalid, hence the expected return value
+ * of write and read for all blockdevs is < 0 */
+ expected_rc = -1;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
+{
+ struct io_target *target;
+ struct spdk_bdev *bdev;
+ char *tx_buf = NULL;
+ char *rx_buf = NULL;
+ uint64_t offset;
+ uint32_t block_size;
+ int rc;
+
+ target = g_io_targets;
+ while (target != NULL) {
+ bdev = target->bdev;
+
+ block_size = spdk_bdev_get_block_size(bdev);
+
+ /* The start offset has been set to a marginal value
+ * such that offset + nbytes == Total size of
+ * blockdev. */
+ offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size);
+
+ initialize_buffer(&tx_buf, 0xA3, block_size);
+ initialize_buffer(&rx_buf, 0, block_size);
+
+ blockdev_write(target, tx_buf, offset, block_size, 0);
+ CU_ASSERT_EQUAL(g_completion_success, true);
+
+ blockdev_read(target, rx_buf, offset, block_size, 0);
+ CU_ASSERT_EQUAL(g_completion_success, true);
+
+ rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size);
+ /* Assert the write by comparing it with values read
+ * from each blockdev */
+ CU_ASSERT_EQUAL(rc, 0);
+
+ target = target->next;
+ }
+}
+
+static void
+blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
+{
+ struct io_target *target;
+ struct spdk_bdev *bdev;
+ char *tx_buf = NULL;
+ char *rx_buf = NULL;
+ int data_length;
+ uint64_t offset;
+ int pattern;
+
+ /* Tests the overflow condition of the blockdevs. */
+ data_length = 4096;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ pattern = 0xA3;
+
+ target = g_io_targets;
+ while (target != NULL) {
+ bdev = target->bdev;
+
+ /* The start offset has been set to a valid value
+ * but offset + nbytes is greater than the Total size
+ * of the blockdev. The test should fail. */
+ offset = ((spdk_bdev_get_num_blocks(bdev) * spdk_bdev_get_block_size(bdev)) - 1024);
+
+ initialize_buffer(&tx_buf, pattern, data_length);
+ initialize_buffer(&rx_buf, 0, data_length);
+
+ blockdev_write(target, tx_buf, offset, data_length, 0);
+ CU_ASSERT_EQUAL(g_completion_success, false);
+
+ blockdev_read(target, rx_buf, offset, data_length, 0);
+ CU_ASSERT_EQUAL(g_completion_success, false);
+
+ target = target->next;
+ }
+}
+
+static void
+blockdev_write_read_max_offset(void)
+{
+ int data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ data_length = 4096;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ /* The start offset has been set to UINT64_MAX such that
+ * adding nbytes wraps around and points to an invalid address. */
+ offset = UINT64_MAX;
+ pattern = 0xA3;
+ /* Params are invalid, hence the expected return value
+ * of write and read for all blockdevs is < 0 */
+ expected_rc = -1;
+
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
+}
+
+static void
+blockdev_overlapped_write_read_8k(void)
+{
+ int data_length;
+ uint64_t offset;
+ int pattern;
+ int expected_rc;
+
+ /* Data size = 8K */
+ data_length = 8192;
+ CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
+ offset = 0;
+ pattern = 0xA3;
+ /* Params are valid, hence the expected return value
+ * of write and read for all blockdevs is 0. */
+ expected_rc = 0;
+ /* Assert the write by comparing it with values read
+ * from the same offset for each blockdev */
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
+
+ /* Overwrite the pattern 0xbb of size 8K on an address offset overlapping
+ * with the address written above and assert the new value in
+ * the overlapped address range */
+ /* Populate 8k with value 0xBB */
+ pattern = 0xBB;
+ /* Offset = 6144; Overlap offset addresses and write value 0xbb */
+ offset = 4096;
+ /* Assert the write by comparing it with values read
+ * from the overlapped offset for each blockdev */
+ blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
+}
+
+static void
+__blockdev_reset(void *arg1, void *arg2)
+{
+ struct bdevio_request *req = arg1;
+ struct io_target *target = req->target;
+ int rc;
+
+ rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL);
+ if (rc < 0) {
+ g_completion_success = false;
+ wake_ut_thread();
+ }
+}
+
+static void
+blockdev_reset(struct io_target *target)
+{
+ struct bdevio_request req;
+
+ req.target = target;
+
+ g_completion_success = false;
+
+ execute_spdk_function(__blockdev_reset, &req, NULL);
+}
+
+static void
+blockdev_test_reset(void)
+{
+ struct io_target *target;
+
+ target = g_io_targets;
+ while (target != NULL) {
+ blockdev_reset(target);
+ CU_ASSERT_EQUAL(g_completion_success, true);
+
+ target = target->next;
+ }
+}
+
+static void
+__stop_init_thread(void *arg1, void *arg2)
+{
+ unsigned num_failures = (unsigned)(uintptr_t)arg1;
+
+ bdevio_cleanup_targets();
+ spdk_app_stop(num_failures);
+}
+
+static void
+stop_init_thread(unsigned num_failures)
+{
+ struct spdk_event *event;
+
+ event = spdk_event_allocate(g_lcore_id_init, __stop_init_thread,
+ (void *)(uintptr_t)num_failures, NULL);
+ spdk_event_call(event);
+}
+
+static void
+__run_ut_thread(void *arg1, void *arg2)
+{
+ CU_pSuite suite = NULL;
+ unsigned num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ stop_init_thread(CU_get_error());
+ return;
+ }
+
+ suite = CU_add_suite("components_suite", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ stop_init_thread(CU_get_error());
+ return;
+ }
+
+ if (
+ CU_add_test(suite, "blockdev write read 4k", blockdev_write_read_4k) == NULL
+ || CU_add_test(suite, "blockdev write zeroes read 4k", blockdev_write_zeroes_read_4k) == NULL
+ || CU_add_test(suite, "blockdev write zeroes read 1m", blockdev_write_zeroes_read_1m) == NULL
+ || CU_add_test(suite, "blockdev write zeroes read 3m", blockdev_write_zeroes_read_3m) == NULL
+ || CU_add_test(suite, "blockdev write zeroes read 3.5m", blockdev_write_zeroes_read_3m_500k) == NULL
+ || CU_add_test(suite, "blockdev write read 512 bytes",
+ blockdev_write_read_512Bytes) == NULL
+ || CU_add_test(suite, "blockdev write read size > 128k",
+ blockdev_write_read_size_gt_128k) == NULL
+ || CU_add_test(suite, "blockdev write read invalid size",
+ blockdev_write_read_invalid_size) == NULL
+ || CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
+ blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
+ || CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
+ blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
+ || CU_add_test(suite, "blockdev write read max offset",
+ blockdev_write_read_max_offset) == NULL
+ || CU_add_test(suite, "blockdev write read 8k on overlapped address offset",
+ blockdev_overlapped_write_read_8k) == NULL
+ || CU_add_test(suite, "blockdev writev readv 4k", blockdev_writev_readv_4k) == NULL
+ || CU_add_test(suite, "blockdev writev readv 30 x 4k",
+ blockdev_writev_readv_30x4k) == NULL
+ || CU_add_test(suite, "blockdev writev readv 512 bytes",
+ blockdev_writev_readv_512Bytes) == NULL
+ || CU_add_test(suite, "blockdev writev readv size > 128k",
+ blockdev_writev_readv_size_gt_128k) == NULL
+ || CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
+ blockdev_writev_readv_size_gt_128k_two_iov) == NULL
+ || CU_add_test(suite, "blockdev reset",
+ blockdev_test_reset) == NULL
+ ) {
+ CU_cleanup_registry();
+ stop_init_thread(CU_get_error());
+ return;
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ stop_init_thread(num_failures);
+}
+
+static void
+test_main(void *arg1, void *arg2)
+{
+ struct spdk_event *event;
+
+ pthread_mutex_init(&g_test_mutex, NULL);
+ pthread_cond_init(&g_test_cond, NULL);
+
+ g_lcore_id_init = spdk_env_get_first_core();
+ g_lcore_id_ut = spdk_env_get_next_core(g_lcore_id_init);
+ g_lcore_id_io = spdk_env_get_next_core(g_lcore_id_ut);
+
+ if (g_lcore_id_init == SPDK_ENV_LCORE_ID_ANY ||
+ g_lcore_id_ut == SPDK_ENV_LCORE_ID_ANY ||
+ g_lcore_id_io == SPDK_ENV_LCORE_ID_ANY) {
+ SPDK_ERRLOG("Could not reserve 3 separate threads.\n");
+ spdk_app_stop(-1);
+ }
+
+ if (bdevio_construct_targets() < 0) {
+ spdk_app_stop(-1);
+ return;
+ }
+
+ event = spdk_event_allocate(g_lcore_id_ut, __run_ut_thread, NULL, NULL);
+ spdk_event_call(event);
+}
+
+static void
+bdevio_usage(void)
+{
+}
+
+static void
+bdevio_parse_arg(int ch, char *arg)
+{
+}
+
+int
+main(int argc, char **argv)
+{
+ int rc;
+ struct spdk_app_opts opts = {};
+
+ spdk_app_opts_init(&opts);
+ opts.name = "bdevtest";
+ opts.rpc_addr = NULL;
+ opts.reactor_mask = "0x7";
+
+ if ((rc = spdk_app_parse_args(argc, argv, &opts, "", NULL,
+ bdevio_parse_arg, bdevio_usage)) !=
+ SPDK_APP_PARSE_ARGS_SUCCESS) {
+ return rc;
+ }
+
+ rc = spdk_app_start(&opts, test_main, NULL, NULL);
+ spdk_app_fini();
+
+ return rc;
+}
diff --git a/src/spdk/test/bdev/bdevjson/json_config.sh b/src/spdk/test/bdev/bdevjson/json_config.sh
new file mode 100755
index 00000000..3e5d276e
--- /dev/null
+++ b/src/spdk/test/bdev/bdevjson/json_config.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+set -ex
+BDEV_JSON_DIR=$(readlink -f $(dirname $0))
+. $BDEV_JSON_DIR/../../json_config/common.sh
+
+function test_subsystems() {
+ run_spdk_tgt
+ rootdir=$(readlink -f $BDEV_JSON_DIR/../../..)
+
+ rpc_py="$spdk_rpc_py"
+ clear_config_py="$spdk_clear_config_py"
+ load_nvme
+ create_bdev_subsystem_config
+ test_json_config
+
+ clear_bdev_subsystem_config
+ test_global_params "spdk_tgt"
+ kill_targets
+}
+
+timing_enter json_config
+trap 'on_error_exit "${FUNCNAME}" "${LINENO}"' ERR
+
+test_subsystems
+
+timing_exit json_config
+report_test_completion json_config
diff --git a/src/spdk/test/bdev/bdevjson/rbd_json_config.sh b/src/spdk/test/bdev/bdevjson/rbd_json_config.sh
new file mode 100755
index 00000000..458ecb74
--- /dev/null
+++ b/src/spdk/test/bdev/bdevjson/rbd_json_config.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+set -ex
+VHOST_JSON_DIR=$(readlink -f $(dirname $0))
+. $VHOST_JSON_DIR/../../json_config/common.sh
+
+function test_subsystems() {
+ run_spdk_tgt
+ rootdir=$(readlink -f $VHOST_JSON_DIR/../../..)
+
+ rpc_py="$spdk_rpc_py"
+ clear_config_py="$spdk_clear_config_py"
+ $rpc_py start_subsystem_init
+
+ create_rbd_bdev_subsystem_config
+ test_json_config
+ clear_rbd_bdev_subsystem_config
+
+ kill_targets
+}
+
+trap 'rbd_cleanup; on_error_exit "${FUNCNAME}" "${LINENO}"' ERR
+timing_enter rbd_json_config
+
+test_subsystems
+timing_exit rbd_json_config
+report_test_completion rbd_json_config
diff --git a/src/spdk/test/bdev/bdevperf/.gitignore b/src/spdk/test/bdev/bdevperf/.gitignore
new file mode 100644
index 00000000..e14ddd84
--- /dev/null
+++ b/src/spdk/test/bdev/bdevperf/.gitignore
@@ -0,0 +1 @@
+bdevperf
diff --git a/src/spdk/test/bdev/bdevperf/Makefile b/src/spdk/test/bdev/bdevperf/Makefile
new file mode 100644
index 00000000..eb5f76ae
--- /dev/null
+++ b/src/spdk/test/bdev/bdevperf/Makefile
@@ -0,0 +1,61 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk
+
+APP = bdevperf
+
+C_SRCS := bdevperf.c
+
+SPDK_LIB_LIST = event_bdev event_copy
+SPDK_LIB_LIST += bdev copy event trace log conf thread util rpc jsonrpc json
+
+LIBS += $(BLOCKDEV_MODULES_LINKER_ARGS) \
+ $(COPY_MODULES_LINKER_ARGS) \
+ $(SOCK_MODULES_LINKER_ARGS)
+
+LIBS += $(SPDK_LIB_LINKER_ARGS) $(ENV_LINKER_ARGS)
+
+all : $(APP)
+ @:
+
+$(APP) : $(OBJS) $(SPDK_LIB_FILES) $(BLOCKDEV_MODULES_FILES) $(COPY_MODULES_FILES) $(SOCK_MODULES_FILES) $(ENV_LIBS)
+ $(LINK_C)
+
+clean :
+ $(CLEAN_C) $(APP)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.deps.mk
diff --git a/src/spdk/test/bdev/bdevperf/bdevperf.c b/src/spdk/test/bdev/bdevperf/bdevperf.c
new file mode 100644
index 00000000..1416ea27
--- /dev/null
+++ b/src/spdk/test/bdev/bdevperf/bdevperf.c
@@ -0,0 +1,1035 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/bdev.h"
+#include "spdk/copy_engine.h"
+#include "spdk/endian.h"
+#include "spdk/env.h"
+#include "spdk/event.h"
+#include "spdk/log.h"
+#include "spdk/util.h"
+#include "spdk/thread.h"
+#include "spdk/string.h"
+
+struct bdevperf_task {
+ struct iovec iov;
+ struct io_target *target;
+ void *buf;
+ uint64_t offset_blocks;
+ enum spdk_bdev_io_type io_type;
+ TAILQ_ENTRY(bdevperf_task) link;
+ struct spdk_bdev_io_wait_entry bdev_io_wait;
+};
+
+static const char *g_workload_type;
+static int g_io_size = 0;
+/* initialize to invalid value so we can detect if user overrides it. */
+static int g_rw_percentage = -1;
+static int g_is_random;
+static bool g_verify = false;
+static bool g_reset = false;
+static bool g_unmap = false;
+static bool g_write_zeroes = false;
+static bool g_flush = false;
+static int g_queue_depth;
+static uint64_t g_time_in_usec;
+static int g_show_performance_real_time = 0;
+static uint64_t g_show_performance_period_in_usec = 1000000;
+static uint64_t g_show_performance_period_num = 0;
+static uint64_t g_show_performance_ema_period = 0;
+static bool g_run_failed = false;
+static bool g_shutdown = false;
+static uint64_t g_shutdown_tsc;
+static bool g_zcopy = true;
+static unsigned g_master_core;
+static int g_time_in_sec;
+static bool g_mix_specified;
+
+static struct spdk_poller *g_perf_timer = NULL;
+
+static void bdevperf_submit_single(struct io_target *target, struct bdevperf_task *task);
+
+struct io_target {
+ char *name;
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *bdev_desc;
+ struct spdk_io_channel *ch;
+ struct io_target *next;
+ unsigned lcore;
+ uint64_t io_completed;
+ uint64_t prev_io_completed;
+ double ema_io_per_second;
+ int current_queue_depth;
+ uint64_t size_in_ios;
+ uint64_t offset_in_ios;
+ uint64_t io_size_blocks;
+ bool is_draining;
+ struct spdk_poller *run_timer;
+ struct spdk_poller *reset_timer;
+ TAILQ_HEAD(, bdevperf_task) task_list;
+};
+
+struct io_target **g_head;
+uint32_t *coremap;
+static int g_target_count = 0;
+
+/*
+ * Used to determine how the I/O buffers should be aligned.
+ * This alignment will be bumped up for blockdevs that
+ * require alignment based on block length - for example,
+ * AIO blockdevs.
+ */
+static size_t g_min_alignment = 8;
+
+static int
+blockdev_heads_init(void)
+{
+ uint32_t i, idx = 0;
+ uint32_t core_count = spdk_env_get_core_count();
+
+ g_head = calloc(core_count, sizeof(struct io_target *));
+ if (!g_head) {
+ fprintf(stderr, "Cannot allocate g_head array with size=%u\n",
+ core_count);
+ return -1;
+ }
+
+ coremap = calloc(core_count, sizeof(uint32_t));
+ if (!coremap) {
+ free(g_head);
+ fprintf(stderr, "Cannot allocate coremap array with size=%u\n",
+ core_count);
+ return -1;
+ }
+
+ SPDK_ENV_FOREACH_CORE(i) {
+ coremap[idx++] = i;
+ }
+
+ return 0;
+}
+
+static void
+bdevperf_free_target(struct io_target *target)
+{
+ struct bdevperf_task *task, *tmp;
+
+ TAILQ_FOREACH_SAFE(task, &target->task_list, link, tmp) {
+ TAILQ_REMOVE(&target->task_list, task, link);
+ spdk_dma_free(task->buf);
+ free(task);
+ }
+
+ free(target->name);
+ free(target);
+}
+
+static void
+blockdev_heads_destroy(void)
+{
+ uint32_t i, core_count;
+ struct io_target *target, *next_target;
+
+ if (!g_head) {
+ return;
+ }
+
+ core_count = spdk_env_get_core_count();
+ for (i = 0; i < core_count; i++) {
+ target = g_head[i];
+ while (target != NULL) {
+ next_target = target->next;
+ bdevperf_free_target(target);
+ target = next_target;
+ }
+ }
+
+ free(g_head);
+ free(coremap);
+}
+
+static void
+bdevperf_construct_targets(void)
+{
+ int index = 0;
+ struct spdk_bdev *bdev;
+ struct io_target *target;
+ size_t align;
+ int rc;
+
+ bdev = spdk_bdev_first_leaf();
+ while (bdev != NULL) {
+
+ if (g_unmap && !spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP)) {
+ printf("Skipping %s because it does not support unmap\n", spdk_bdev_get_name(bdev));
+ bdev = spdk_bdev_next_leaf(bdev);
+ continue;
+ }
+
+ target = malloc(sizeof(struct io_target));
+ if (!target) {
+ fprintf(stderr, "Unable to allocate memory for new target.\n");
+ /* Return immediately because all mallocs will presumably fail after this */
+ return;
+ }
+
+ target->name = strdup(spdk_bdev_get_name(bdev));
+ if (!target->name) {
+ fprintf(stderr, "Unable to allocate memory for target name.\n");
+ free(target);
+ /* Return immediately because all mallocs will presumably fail after this */
+ return;
+ }
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &target->bdev_desc);
+ if (rc != 0) {
+ SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
+ free(target->name);
+ free(target);
+ bdev = spdk_bdev_next_leaf(bdev);
+ continue;
+ }
+
+ target->bdev = bdev;
+ /* Mapping each target to lcore */
+ index = g_target_count % spdk_env_get_core_count();
+ target->next = g_head[index];
+ target->lcore = coremap[index];
+ target->io_completed = 0;
+ target->current_queue_depth = 0;
+ target->offset_in_ios = 0;
+ target->io_size_blocks = g_io_size / spdk_bdev_get_block_size(bdev);
+ if (target->io_size_blocks == 0 ||
+ (g_io_size % spdk_bdev_get_block_size(bdev)) != 0) {
+ SPDK_ERRLOG("IO size (%d) is bigger than blocksize of bdev %s (%"PRIu32") or not a blocksize multiple\n",
+ g_io_size, spdk_bdev_get_name(bdev), spdk_bdev_get_block_size(bdev));
+ spdk_bdev_close(target->bdev_desc);
+ free(target->name);
+ free(target);
+ bdev = spdk_bdev_next_leaf(bdev);
+ continue;
+ }
+
+ target->size_in_ios = spdk_bdev_get_num_blocks(bdev) / target->io_size_blocks;
+ align = spdk_bdev_get_buf_align(bdev);
+ /*
+ * TODO: This should actually use the LCM of align and g_min_alignment, but
+ * it is fairly safe to assume all alignments are powers of two for now.
+ */
+ g_min_alignment = spdk_max(g_min_alignment, align);
+
+ target->is_draining = false;
+ target->run_timer = NULL;
+ target->reset_timer = NULL;
+ TAILQ_INIT(&target->task_list);
+
+ g_head[index] = target;
+ g_target_count++;
+
+ bdev = spdk_bdev_next_leaf(bdev);
+ }
+}
+
+static void
+end_run(void *arg1, void *arg2)
+{
+ struct io_target *target = arg1;
+
+ spdk_put_io_channel(target->ch);
+ spdk_bdev_close(target->bdev_desc);
+ if (--g_target_count == 0) {
+ if (g_show_performance_real_time) {
+ spdk_poller_unregister(&g_perf_timer);
+ }
+ if (g_run_failed) {
+ spdk_app_stop(1);
+ } else {
+ spdk_app_stop(0);
+ }
+ }
+}
+
+static void
+bdevperf_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ struct io_target *target;
+ struct bdevperf_task *task = cb_arg;
+ struct spdk_event *complete;
+ struct iovec *iovs;
+ int iovcnt;
+
+ target = task->target;
+
+ if (!success) {
+ if (!g_reset) {
+ target->is_draining = true;
+ g_run_failed = true;
+ printf("task offset: %lu on target bdev=%s fails\n",
+ task->offset_blocks, target->name);
+ }
+ } else if (g_verify || g_reset) {
+ spdk_bdev_io_get_iovec(bdev_io, &iovs, &iovcnt);
+ assert(iovcnt == 1);
+ assert(iovs != NULL);
+ if (memcmp(task->buf, iovs[0].iov_base, g_io_size) != 0) {
+ printf("Buffer mismatch! Disk Offset: %lu\n", task->offset_blocks);
+ target->is_draining = true;
+ g_run_failed = true;
+ }
+ }
+
+ target->current_queue_depth--;
+
+ if (success) {
+ target->io_completed++;
+ }
+
+ spdk_bdev_free_io(bdev_io);
+
+ /*
+ * is_draining indicates when time has expired for the test run
+ * and we are just waiting for the previously submitted I/O
+ * to complete. In this case, do not submit a new I/O to replace
+ * the one just completed.
+ */
+ if (!target->is_draining) {
+ bdevperf_submit_single(target, task);
+ } else {
+ TAILQ_INSERT_TAIL(&target->task_list, task, link);
+ if (target->current_queue_depth == 0) {
+ complete = spdk_event_allocate(g_master_core, end_run, target, NULL);
+ spdk_event_call(complete);
+ }
+ }
+}
+
+static void
+bdevperf_verify_submit_read(void *cb_arg)
+{
+ struct io_target *target;
+ struct bdevperf_task *task = cb_arg;
+ int rc;
+
+ target = task->target;
+
+ /* Read the data back in */
+ rc = spdk_bdev_read_blocks(target->bdev_desc, target->ch, NULL, task->offset_blocks,
+ target->io_size_blocks, bdevperf_complete, task);
+ if (rc == -ENOMEM) {
+ task->bdev_io_wait.bdev = target->bdev;
+ task->bdev_io_wait.cb_fn = bdevperf_verify_submit_read;
+ task->bdev_io_wait.cb_arg = task;
+ spdk_bdev_queue_io_wait(target->bdev, target->ch, &task->bdev_io_wait);
+ } else if (rc != 0) {
+ printf("Failed to submit read: %d\n", rc);
+ target->is_draining = true;
+ g_run_failed = true;
+ }
+}
+
+static void
+bdevperf_verify_write_complete(struct spdk_bdev_io *bdev_io, bool success,
+ void *cb_arg)
+{
+ if (success) {
+ spdk_bdev_free_io(bdev_io);
+ bdevperf_verify_submit_read(cb_arg);
+ } else {
+ bdevperf_complete(bdev_io, success, cb_arg);
+ }
+}
+
+static __thread unsigned int seed = 0;
+
+static void
+bdevperf_prep_task(struct bdevperf_task *task)
+{
+ struct io_target *target = task->target;
+ uint64_t offset_in_ios;
+
+ if (g_is_random) {
+ offset_in_ios = rand_r(&seed) % target->size_in_ios;
+ } else {
+ offset_in_ios = target->offset_in_ios++;
+ if (target->offset_in_ios == target->size_in_ios) {
+ target->offset_in_ios = 0;
+ }
+ }
+
+ task->offset_blocks = offset_in_ios * target->io_size_blocks;
+ if (g_verify || g_reset) {
+ memset(task->buf, rand_r(&seed) % 256, g_io_size);
+ task->iov.iov_base = task->buf;
+ task->iov.iov_len = g_io_size;
+ task->io_type = SPDK_BDEV_IO_TYPE_WRITE;
+ } else if (g_flush) {
+ task->io_type = SPDK_BDEV_IO_TYPE_FLUSH;
+ } else if (g_unmap) {
+ task->io_type = SPDK_BDEV_IO_TYPE_UNMAP;
+ } else if (g_write_zeroes) {
+ task->io_type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
+ } else if ((g_rw_percentage == 100) ||
+ (g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) {
+ task->io_type = SPDK_BDEV_IO_TYPE_READ;
+ } else {
+ task->iov.iov_base = task->buf;
+ task->iov.iov_len = g_io_size;
+ task->io_type = SPDK_BDEV_IO_TYPE_WRITE;
+ }
+}
+
+static void
+bdevperf_submit_task(void *arg)
+{
+ struct bdevperf_task *task = arg;
+ struct io_target *target = task->target;
+ struct spdk_bdev_desc *desc;
+ struct spdk_io_channel *ch;
+ spdk_bdev_io_completion_cb cb_fn;
+ void *rbuf;
+ int rc;
+
+ desc = target->bdev_desc;
+ ch = target->ch;
+
+ switch (task->io_type) {
+ case SPDK_BDEV_IO_TYPE_WRITE:
+ cb_fn = (g_verify || g_reset) ? bdevperf_verify_write_complete : bdevperf_complete;
+ rc = spdk_bdev_writev_blocks(desc, ch, &task->iov, 1, task->offset_blocks,
+ target->io_size_blocks, cb_fn, task);
+ break;
+ case SPDK_BDEV_IO_TYPE_FLUSH:
+ rc = spdk_bdev_flush_blocks(desc, ch, task->offset_blocks,
+ target->io_size_blocks, bdevperf_complete, task);
+ break;
+ case SPDK_BDEV_IO_TYPE_UNMAP:
+ rc = spdk_bdev_unmap_blocks(desc, ch, task->offset_blocks,
+ target->io_size_blocks, bdevperf_complete, task);
+ break;
+ case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
+ rc = spdk_bdev_write_zeroes_blocks(desc, ch, task->offset_blocks,
+ target->io_size_blocks, bdevperf_complete, task);
+ break;
+ case SPDK_BDEV_IO_TYPE_READ:
+ rbuf = g_zcopy ? NULL : task->buf;
+ rc = spdk_bdev_read_blocks(desc, ch, rbuf, task->offset_blocks,
+ target->io_size_blocks, bdevperf_complete, task);
+ break;
+ default:
+ assert(false);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc == -ENOMEM) {
+ task->bdev_io_wait.bdev = target->bdev;
+ task->bdev_io_wait.cb_fn = bdevperf_submit_task;
+ task->bdev_io_wait.cb_arg = task;
+ spdk_bdev_queue_io_wait(target->bdev, ch, &task->bdev_io_wait);
+ return;
+ } else if (rc != 0) {
+ printf("Failed to submit bdev_io: %d\n", rc);
+ target->is_draining = true;
+ g_run_failed = true;
+ return;
+ }
+
+ target->current_queue_depth++;
+}
+
+static void
+bdevperf_submit_single(struct io_target *target, struct bdevperf_task *task)
+{
+ if (!task) {
+ if (!TAILQ_EMPTY(&target->task_list)) {
+ task = TAILQ_FIRST(&target->task_list);
+ TAILQ_REMOVE(&target->task_list, task, link);
+ } else {
+ printf("Task allocation failed\n");
+ abort();
+ }
+ }
+
+ bdevperf_prep_task(task);
+ bdevperf_submit_task(task);
+}
+
+static void
+bdevperf_submit_io(struct io_target *target, int queue_depth)
+{
+ while (queue_depth-- > 0) {
+ bdevperf_submit_single(target, NULL);
+ }
+}
+
+static int
+end_target(void *arg)
+{
+ struct io_target *target = arg;
+
+ spdk_poller_unregister(&target->run_timer);
+ if (g_reset) {
+ spdk_poller_unregister(&target->reset_timer);
+ }
+
+ target->is_draining = true;
+
+ return -1;
+}
+
+static int reset_target(void *arg);
+
+static void
+reset_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ struct bdevperf_task *task = cb_arg;
+ struct io_target *target = task->target;
+
+ if (!success) {
+ printf("Reset blockdev=%s failed\n", spdk_bdev_get_name(target->bdev));
+ target->is_draining = true;
+ g_run_failed = true;
+ }
+
+ TAILQ_INSERT_TAIL(&target->task_list, task, link);
+ spdk_bdev_free_io(bdev_io);
+
+ target->reset_timer = spdk_poller_register(reset_target, target,
+ 10 * 1000000);
+}
+
+static int
+reset_target(void *arg)
+{
+ struct io_target *target = arg;
+ struct bdevperf_task *task = NULL;
+ int rc;
+
+ spdk_poller_unregister(&target->reset_timer);
+
+ /* Do reset. */
+ task = TAILQ_FIRST(&target->task_list);
+ if (!task) {
+ printf("Task allocation failed\n");
+ abort();
+ }
+ TAILQ_REMOVE(&target->task_list, task, link);
+
+ rc = spdk_bdev_reset(target->bdev_desc, target->ch,
+ reset_cb, task);
+ if (rc) {
+ printf("Reset failed: %d\n", rc);
+ target->is_draining = true;
+ g_run_failed = true;
+ }
+
+ return -1;
+}
+
+static void
+bdevperf_submit_on_core(void *arg1, void *arg2)
+{
+ struct io_target *target = arg1;
+
+ /* Submit initial I/O for each block device. Each time one
+ * completes, another will be submitted. */
+ while (target != NULL) {
+ target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
+ if (!target->ch) {
+ printf("Skip this device (%s) as IO channel not setup.\n",
+ spdk_bdev_get_name(target->bdev));
+ g_target_count--;
+ g_run_failed = true;
+ spdk_bdev_close(target->bdev_desc);
+
+ target = target->next;
+ continue;
+ }
+
+ /* Start a timer to stop this I/O chain when the run is over */
+ target->run_timer = spdk_poller_register(end_target, target,
+ g_time_in_usec);
+ if (g_reset) {
+ target->reset_timer = spdk_poller_register(reset_target, target,
+ 10 * 1000000);
+ }
+ bdevperf_submit_io(target, g_queue_depth);
+ target = target->next;
+ }
+}
+
+static void
+bdevperf_usage(void)
+{
+ printf(" -q <depth> io depth\n");
+ printf(" -o <size> io size in bytes\n");
+ printf(" -w <type> io pattern type, must be one of (read, write, randread, randwrite, rw, randrw, verify, reset, unmap, flush)\n");
+ printf(" -t <time> time in seconds\n");
+ printf(" -M <percent> rwmixread (100 for reads, 0 for writes)\n");
+ printf(" -P <num> number of moving average period\n");
+ printf("\t\t(If set to n, show weighted mean of the previous n IO/s in real time)\n");
+ printf("\t\t(Formula: M = 2 / (n + 1), EMA[i+1] = IO/s * M + (1 - M) * EMA[i])\n");
+ printf("\t\t(only valid with -S)\n");
+ printf(" -S show performance result in real time in seconds\n");
+}
+
+/*
+ * Cumulative Moving Average (CMA): average of all data up to current
+ * Exponential Moving Average (EMA): weighted mean of the previous n data and more weight is given to recent
+ * Simple Moving Average (SMA): unweighted mean of the previous n data
+ *
+ * Bdevperf supports CMA and EMA.
+ */
+static double
+get_cma_io_per_second(struct io_target *target, uint64_t io_time_in_usec)
+{
+ return (double)target->io_completed * 1000000 / io_time_in_usec;
+}
+
+static double
+get_ema_io_per_second(struct io_target *target, uint64_t ema_period)
+{
+ double io_completed, io_per_second;
+
+ io_completed = target->io_completed;
+ io_per_second = (double)(io_completed - target->prev_io_completed) * 1000000
+ / g_show_performance_period_in_usec;
+ target->prev_io_completed = io_completed;
+
+ target->ema_io_per_second += (io_per_second - target->ema_io_per_second) * 2
+ / (ema_period + 1);
+ return target->ema_io_per_second;
+}
+
+static void
+performance_dump(uint64_t io_time_in_usec, uint64_t ema_period)
+{
+ uint32_t index;
+ unsigned lcore_id;
+ double io_per_second, mb_per_second;
+ double total_io_per_second, total_mb_per_second;
+ struct io_target *target;
+
+ total_io_per_second = 0;
+ total_mb_per_second = 0;
+ for (index = 0; index < spdk_env_get_core_count(); index++) {
+ target = g_head[index];
+ if (target != NULL) {
+ lcore_id = target->lcore;
+ printf("\r Logical core: %u\n", lcore_id);
+ }
+ while (target != NULL) {
+ if (ema_period == 0) {
+ io_per_second = get_cma_io_per_second(target, io_time_in_usec);
+ } else {
+ io_per_second = get_ema_io_per_second(target, ema_period);
+ }
+ mb_per_second = io_per_second * g_io_size / (1024 * 1024);
+ printf("\r %-20s: %10.2f IO/s %10.2f MB/s\n",
+ target->name, io_per_second, mb_per_second);
+ total_io_per_second += io_per_second;
+ total_mb_per_second += mb_per_second;
+ target = target->next;
+ }
+ }
+
+ printf("\r =====================================================\n");
+ printf("\r %-20s: %10.2f IO/s %10.2f MB/s\n",
+ "Total", total_io_per_second, total_mb_per_second);
+ fflush(stdout);
+
+}
+
+static int
+performance_statistics_thread(void *arg)
+{
+ g_show_performance_period_num++;
+ performance_dump(g_show_performance_period_num * g_show_performance_period_in_usec,
+ g_show_performance_ema_period);
+ return -1;
+}
+
+static int
+bdevperf_construct_targets_tasks(void)
+{
+ uint32_t i;
+ struct io_target *target;
+ struct bdevperf_task *task;
+ int j, task_num = g_queue_depth;
+
+ /*
+ * Create the task pool after we have enumerated the targets, so that we know
+ * the min buffer alignment. Some backends such as AIO have alignment restrictions
+ * that must be accounted for.
+ */
+ if (g_reset) {
+ task_num += 1;
+ }
+
+ /* Initialize task list for each target */
+ for (i = 0; i < spdk_env_get_core_count(); i++) {
+ target = g_head[i];
+ if (!target) {
+ break;
+ }
+ while (target != NULL) {
+ for (j = 0; j < task_num; j++) {
+ task = calloc(1, sizeof(struct bdevperf_task));
+ if (!task) {
+ fprintf(stderr, "Failed to allocate task from memory\n");
+ goto ret;
+ }
+
+ task->buf = spdk_dma_zmalloc(g_io_size, g_min_alignment, NULL);
+ if (!task->buf) {
+ fprintf(stderr, "Cannot allocate buf for task=%p\n", task);
+ free(task);
+ goto ret;
+ }
+
+ task->target = target;
+ TAILQ_INSERT_TAIL(&target->task_list, task, link);
+ }
+ target = target->next;
+ }
+ }
+
+ return 0;
+
+ret:
+ fprintf(stderr, "Bdevperf program exits due to memory allocation issue\n");
+ fprintf(stderr, "Use -d XXX to allocate more huge pages, e.g., -d 4096\n");
+ return -1;
+}
+
+static void
+bdevperf_run(void *arg1, void *arg2)
+{
+ uint32_t i;
+ struct io_target *target;
+ struct spdk_event *event;
+ int rc;
+
+ rc = blockdev_heads_init();
+ if (rc) {
+ spdk_app_stop(1);
+ return;
+ }
+
+ bdevperf_construct_targets();
+
+ if (g_target_count == 0) {
+ fprintf(stderr, "No valid bdevs found.\n");
+ spdk_app_stop(1);
+ return;
+ }
+
+ rc = bdevperf_construct_targets_tasks();
+ if (rc) {
+ blockdev_heads_destroy();
+ spdk_app_stop(1);
+ return;
+ }
+
+ printf("Running I/O for %" PRIu64 " seconds...\n", g_time_in_usec / 1000000);
+ fflush(stdout);
+
+ /* Start a timer to dump performance numbers */
+ g_shutdown_tsc = spdk_get_ticks();
+ if (g_show_performance_real_time) {
+ g_perf_timer = spdk_poller_register(performance_statistics_thread, NULL,
+ g_show_performance_period_in_usec);
+ }
+
+ g_master_core = spdk_env_get_current_core();
+ /* Send events to start all I/O */
+ for (i = 0; i < spdk_env_get_core_count(); i++) {
+ target = g_head[i];
+ if (target == NULL) {
+ break;
+ }
+ event = spdk_event_allocate(target->lcore, bdevperf_submit_on_core,
+ target, NULL);
+ spdk_event_call(event);
+ }
+}
+
+static void
+bdevperf_stop_io_on_core(void *arg1, void *arg2)
+{
+ struct io_target *target = arg1;
+
+ /* Stop I/O for each block device. */
+ while (target != NULL) {
+ end_target(target);
+ target = target->next;
+ }
+}
+
+static void
+spdk_bdevperf_shutdown_cb(void)
+{
+ uint32_t i;
+ struct io_target *target;
+ struct spdk_event *event;
+
+ g_shutdown = true;
+ g_shutdown_tsc = spdk_get_ticks() - g_shutdown_tsc;
+
+ /* Send events to stop all I/O on each core */
+ for (i = 0; i < spdk_env_get_core_count(); i++) {
+ if (g_head == NULL) {
+ break;
+ }
+ target = g_head[i];
+ if (target == NULL) {
+ break;
+ }
+ event = spdk_event_allocate(target->lcore, bdevperf_stop_io_on_core,
+ target, NULL);
+ spdk_event_call(event);
+ }
+}
+
+static void
+bdevperf_parse_arg(int ch, char *arg)
+{
+ switch (ch) {
+ case 'q':
+ g_queue_depth = atoi(optarg);
+ break;
+ case 'o':
+ g_io_size = atoi(optarg);
+ break;
+ case 't':
+ g_time_in_sec = atoi(optarg);
+ break;
+ case 'w':
+ g_workload_type = optarg;
+ break;
+ case 'M':
+ g_rw_percentage = atoi(optarg);
+ g_mix_specified = true;
+ break;
+ case 'P':
+ g_show_performance_ema_period = atoi(optarg);
+ break;
+ case 'S':
+ g_show_performance_real_time = 1;
+ g_show_performance_period_in_usec = atoi(optarg) * 1000000;
+ g_show_performance_period_in_usec = spdk_max(g_show_performance_period_in_usec,
+ g_show_performance_period_in_usec);
+ break;
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ struct spdk_app_opts opts = {};
+ int rc;
+
+ spdk_app_opts_init(&opts);
+ opts.name = "bdevperf";
+ opts.rpc_addr = NULL;
+ opts.reactor_mask = NULL;
+ opts.mem_size = 1024;
+ opts.shutdown_cb = spdk_bdevperf_shutdown_cb;
+
+ /* default value */
+ g_queue_depth = 0;
+ g_io_size = 0;
+ g_workload_type = NULL;
+ g_time_in_sec = 0;
+ g_mix_specified = false;
+
+ if ((rc = spdk_app_parse_args(argc, argv, &opts, "q:o:t:w:M:P:S:", NULL,
+ bdevperf_parse_arg, bdevperf_usage)) !=
+ SPDK_APP_PARSE_ARGS_SUCCESS) {
+ return rc;
+ }
+
+ if (g_queue_depth <= 0) {
+ spdk_app_usage();
+ bdevperf_usage();
+ exit(1);
+ }
+ if (g_io_size <= 0) {
+ spdk_app_usage();
+ bdevperf_usage();
+ exit(1);
+ }
+ if (!g_workload_type) {
+ spdk_app_usage();
+ bdevperf_usage();
+ exit(1);
+ }
+ if (g_time_in_sec <= 0) {
+ spdk_app_usage();
+ bdevperf_usage();
+ exit(1);
+ }
+ g_time_in_usec = g_time_in_sec * 1000000LL;
+
+ if (g_show_performance_ema_period > 0 &&
+ g_show_performance_real_time == 0) {
+ fprintf(stderr, "-P option must be specified with -S option\n");
+ exit(1);
+ }
+
+ if (strcmp(g_workload_type, "read") &&
+ strcmp(g_workload_type, "write") &&
+ strcmp(g_workload_type, "randread") &&
+ strcmp(g_workload_type, "randwrite") &&
+ strcmp(g_workload_type, "rw") &&
+ strcmp(g_workload_type, "randrw") &&
+ strcmp(g_workload_type, "verify") &&
+ strcmp(g_workload_type, "reset") &&
+ strcmp(g_workload_type, "unmap") &&
+ strcmp(g_workload_type, "write_zeroes") &&
+ strcmp(g_workload_type, "flush")) {
+ fprintf(stderr,
+ "io pattern type must be one of\n"
+ "(read, write, randread, randwrite, rw, randrw, verify, reset, unmap, flush)\n");
+ exit(1);
+ }
+
+ if (!strcmp(g_workload_type, "read") ||
+ !strcmp(g_workload_type, "randread")) {
+ g_rw_percentage = 100;
+ }
+
+ if (!strcmp(g_workload_type, "write") ||
+ !strcmp(g_workload_type, "randwrite")) {
+ g_rw_percentage = 0;
+ }
+
+ if (!strcmp(g_workload_type, "unmap")) {
+ g_unmap = true;
+ }
+
+ if (!strcmp(g_workload_type, "write_zeroes")) {
+ g_write_zeroes = true;
+ }
+
+ if (!strcmp(g_workload_type, "flush")) {
+ g_flush = true;
+ }
+
+ if (!strcmp(g_workload_type, "verify") ||
+ !strcmp(g_workload_type, "reset")) {
+ g_rw_percentage = 50;
+ if (g_io_size > SPDK_BDEV_LARGE_BUF_MAX_SIZE) {
+ fprintf(stderr, "Unable to exceed max I/O size of %d for verify. (%d provided).\n",
+ SPDK_BDEV_LARGE_BUF_MAX_SIZE, g_io_size);
+ exit(1);
+ }
+ if (opts.reactor_mask) {
+ fprintf(stderr, "Ignoring -m option. Verify can only run with a single core.\n");
+ opts.reactor_mask = NULL;
+ }
+ g_verify = true;
+ if (!strcmp(g_workload_type, "reset")) {
+ g_reset = true;
+ }
+ }
+
+ if (!strcmp(g_workload_type, "read") ||
+ !strcmp(g_workload_type, "randread") ||
+ !strcmp(g_workload_type, "write") ||
+ !strcmp(g_workload_type, "randwrite") ||
+ !strcmp(g_workload_type, "verify") ||
+ !strcmp(g_workload_type, "reset") ||
+ !strcmp(g_workload_type, "unmap") ||
+ !strcmp(g_workload_type, "write_zeroes") ||
+ !strcmp(g_workload_type, "flush")) {
+ if (g_mix_specified) {
+ fprintf(stderr, "Ignoring -M option... Please use -M option"
+ " only when using rw or randrw.\n");
+ }
+ }
+
+ if (!strcmp(g_workload_type, "rw") ||
+ !strcmp(g_workload_type, "randrw")) {
+ if (g_rw_percentage < 0 || g_rw_percentage > 100) {
+ fprintf(stderr,
+ "-M must be specified to value from 0 to 100 "
+ "for rw or randrw.\n");
+ exit(1);
+ }
+ }
+
+ if (!strcmp(g_workload_type, "read") ||
+ !strcmp(g_workload_type, "write") ||
+ !strcmp(g_workload_type, "rw") ||
+ !strcmp(g_workload_type, "verify") ||
+ !strcmp(g_workload_type, "reset") ||
+ !strcmp(g_workload_type, "unmap") ||
+ !strcmp(g_workload_type, "write_zeroes")) {
+ g_is_random = 0;
+ } else {
+ g_is_random = 1;
+ }
+
+ if (g_io_size > SPDK_BDEV_LARGE_BUF_MAX_SIZE) {
+ printf("I/O size of %d is greater than zero copy threshold (%d).\n",
+ g_io_size, SPDK_BDEV_LARGE_BUF_MAX_SIZE);
+ printf("Zero copy mechanism will not be used.\n");
+ g_zcopy = false;
+ }
+
+ rc = spdk_app_start(&opts, bdevperf_run, NULL, NULL);
+ if (rc) {
+ g_run_failed = true;
+ }
+
+ if (g_shutdown) {
+ g_time_in_usec = g_shutdown_tsc * 1000000 / spdk_get_ticks_hz();
+ printf("Received shutdown signal, test time is about %.6f seconds\n",
+ (double)g_time_in_usec / 1000000);
+ }
+
+ if (g_time_in_usec) {
+ if (!g_run_failed) {
+ performance_dump(g_time_in_usec, 0);
+ }
+ } else {
+ printf("Test time less than one microsecond, no performance data will be shown\n");
+ }
+
+ blockdev_heads_destroy();
+ spdk_app_fini();
+ return g_run_failed;
+}
diff --git a/src/spdk/test/bdev/blockdev.sh b/src/spdk/test/bdev/blockdev.sh
new file mode 100755
index 00000000..bf3e006b
--- /dev/null
+++ b/src/spdk/test/bdev/blockdev.sh
@@ -0,0 +1,171 @@
+#!/usr/bin/env bash
+
+set -e
+
+testdir=$(readlink -f $(dirname $0))
+rootdir=$(readlink -f $testdir/../..)
+plugindir=$rootdir/examples/bdev/fio_plugin
+rpc_py="$rootdir/scripts/rpc.py"
+
+function run_fio()
+{
+ if [ $RUN_NIGHTLY -eq 0 ]; then
+ LD_PRELOAD=$plugindir/fio_plugin /usr/src/fio/fio --ioengine=spdk_bdev --iodepth=8 --bs=4k --runtime=10 $testdir/bdev.fio "$@"
+ elif [ $RUN_NIGHTLY_FAILING -eq 1 ]; then
+ # Use size 192KB which both exceeds typical 128KB max NVMe I/O
+ # size and will cross 128KB Intel DC P3700 stripe boundaries.
+ LD_PRELOAD=$plugindir/fio_plugin /usr/src/fio/fio --ioengine=spdk_bdev --iodepth=128 --bs=192k --runtime=100 $testdir/bdev.fio "$@"
+ fi
+}
+
+source $rootdir/test/common/autotest_common.sh
+source $testdir/nbd_common.sh
+
+function nbd_function_test() {
+ if [ $(uname -s) = Linux ] && modprobe -n nbd; then
+ local rpc_server=/var/tmp/spdk-nbd.sock
+ local conf=$1
+ local nbd_num=6
+ local nbd_all=(`ls /dev/nbd* | grep -v p`)
+ local bdev_all=($bdevs_name)
+ local nbd_list=(${nbd_all[@]:0:$nbd_num})
+ local bdev_list=(${bdev_all[@]:0:$nbd_num})
+
+ if [ ! -e $conf ]; then
+ return 1
+ fi
+
+ modprobe nbd
+ $rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 -c ${conf} &
+ nbd_pid=$!
+ echo "Process nbd pid: $nbd_pid"
+ waitforlisten $nbd_pid $rpc_server
+
+ nbd_rpc_data_verify $rpc_server "${bdev_list[*]}" "${nbd_list[*]}"
+
+ $rpc_py -s $rpc_server delete_passthru_bdev TestPT
+
+ killprocess $nbd_pid
+ fi
+
+ return 0
+}
+
+timing_enter bdev
+
+# Create a file to be used as an AIO backend
+dd if=/dev/zero of=/tmp/aiofile bs=2048 count=5000
+
+cp $testdir/bdev.conf.in $testdir/bdev.conf
+$rootdir/scripts/gen_nvme.sh >> $testdir/bdev.conf
+
+if [ $SPDK_TEST_RBD -eq 1 ]; then
+ timing_enter rbd_setup
+ rbd_setup 127.0.0.1
+ timing_exit rbd_setup
+
+ $rootdir/scripts/gen_rbd.sh >> $testdir/bdev.conf
+fi
+
+if [ $SPDK_TEST_CRYPTO -eq 1 ]; then
+ $rootdir/scripts/gen_crypto.sh Malloc6 >> $testdir/bdev.conf
+fi
+
+if hash pmempool; then
+ rm -f /tmp/spdk-pmem-pool
+ pmempool create blk --size=32M 512 /tmp/spdk-pmem-pool
+ echo "[Pmem]" >> $testdir/bdev.conf
+ echo " Blk /tmp/spdk-pmem-pool Pmem0" >> $testdir/bdev.conf
+fi
+
+timing_enter hello_bdev
+if grep -q Nvme0 $testdir/bdev.conf; then
+ $rootdir/examples/bdev/hello_world/hello_bdev -c $testdir/bdev.conf -b Nvme0n1
+fi
+timing_exit hello_bdev
+
+timing_enter bounds
+$testdir/bdevio/bdevio -c $testdir/bdev.conf
+timing_exit bounds
+
+timing_enter nbd_gpt
+if grep -q Nvme0 $testdir/bdev.conf; then
+ part_dev_by_gpt $testdir/bdev.conf Nvme0n1 $rootdir
+fi
+timing_exit nbd_gpt
+
+timing_enter bdev_svc
+bdevs=$(discover_bdevs $rootdir $testdir/bdev.conf | jq -r '.[] | select(.claimed == false)')
+timing_exit bdev_svc
+
+timing_enter nbd
+bdevs_name=$(echo $bdevs | jq -r '.name')
+nbd_function_test $testdir/bdev.conf "$bdevs_name"
+timing_exit nbd
+
+if [ -d /usr/src/fio ] && [ $SPDK_RUN_ASAN -eq 0 ]; then
+ timing_enter fio
+
+ timing_enter fio_rw_verify
+ # Generate the fio config file given the list of all unclaimed bdevs
+ fio_config_gen $testdir/bdev.fio verify
+ for b in $(echo $bdevs | jq -r '.name'); do
+ fio_config_add_job $testdir/bdev.fio $b
+ done
+
+ run_fio --spdk_conf=./test/bdev/bdev.conf
+
+ rm -f *.state
+ rm -f $testdir/bdev.fio
+ timing_exit fio_rw_verify
+
+ timing_enter fio_trim
+ # Generate the fio config file given the list of all unclaimed bdevs that support unmap
+ fio_config_gen $testdir/bdev.fio trim
+ for b in $(echo $bdevs | jq -r 'select(.supported_io_types.unmap == true) | .name'); do
+ fio_config_add_job $testdir/bdev.fio $b
+ done
+
+ run_fio --spdk_conf=./test/bdev/bdev.conf
+
+ rm -f *.state
+ rm -f $testdir/bdev.fio
+ timing_exit fio_trim
+ report_test_completion "bdev_fio"
+ timing_exit fio
+fi
+
+# Create conf file for bdevperf with gpt
+cat > $testdir/bdev_gpt.conf << EOL
+[Gpt]
+ Disable No
+EOL
+
+# Get Nvme info through filtering gen_nvme.sh's result
+$rootdir/scripts/gen_nvme.sh >> $testdir/bdev_gpt.conf
+
+# Run bdevperf with gpt
+$testdir/bdevperf/bdevperf -c $testdir/bdev_gpt.conf -q 128 -o 4096 -w verify -t 5
+$testdir/bdevperf/bdevperf -c $testdir/bdev_gpt.conf -q 128 -o 4096 -w write_zeroes -t 1
+rm -f $testdir/bdev_gpt.conf
+
+if [ $RUN_NIGHTLY -eq 1 ]; then
+ # Temporarily disabled - infinite loop
+ timing_enter reset
+ #$testdir/bdevperf/bdevperf -c $testdir/bdev.conf -q 16 -w reset -o 4096 -t 60
+ timing_exit reset
+ report_test_completion "nightly_bdev_reset"
+fi
+
+
+if grep -q Nvme0 $testdir/bdev.conf; then
+ part_dev_by_gpt $testdir/bdev.conf Nvme0n1 $rootdir reset
+fi
+
+rm -f /tmp/aiofile
+rm -f /tmp/spdk-pmem-pool
+rm -f $testdir/bdev.conf
+trap - SIGINT SIGTERM EXIT
+rbd_cleanup
+report_test_completion "bdev"
+timing_exit bdev
diff --git a/src/spdk/test/bdev/nbd_common.sh b/src/spdk/test/bdev/nbd_common.sh
new file mode 100644
index 00000000..df8caac6
--- /dev/null
+++ b/src/spdk/test/bdev/nbd_common.sh
@@ -0,0 +1,95 @@
+set -e
+
+function nbd_start_disks() {
+ local rpc_server=$1
+ local bdev_list=($2)
+ local nbd_list=($3)
+
+ for (( i=0; i<${#nbd_list[@]}; i++ )); do
+ $rootdir/scripts/rpc.py -s $rpc_server start_nbd_disk \
+ ${bdev_list[$i]} ${nbd_list[$i]}
+ done
+ # Wait for nbd devices ready
+ for i in ${nbd_list[@]}; do
+ waitfornbd ${i:5}
+ done
+}
+
+function waitfornbd_exit() {
+ nbd_name=$1
+
+ for ((i=1; i<=20; i++)); do
+ if grep -q -w $nbd_name /proc/partitions; then
+ sleep 0.1
+ else
+ break
+ fi
+ done
+
+ return 0
+}
+
+function nbd_stop_disks() {
+ local rpc_server=$1
+ local nbd_list=($2)
+
+ for i in ${nbd_list[@]}; do
+ $rootdir/scripts/rpc.py -s $rpc_server stop_nbd_disk $i
+ done
+ for i in ${nbd_list[@]}; do
+ waitfornbd_exit ${i:5}
+ done
+}
+
+function nbd_get_count() {
+ # return = count of spdk nbd devices
+ local rpc_server=$1
+
+ nbd_disks_json=`$rootdir/scripts/rpc.py -s $rpc_server get_nbd_disks`
+ nbd_disks_name=`echo "${nbd_disks_json}" | jq -r '.[] | .nbd_device'`
+ count=`echo "${nbd_disks_name}" | grep -c /dev/nbd || true`
+ echo $count
+}
+
+function nbd_dd_data_verify() {
+ local nbd_list=($1)
+ local operation=$2
+ local tmp_file=/tmp/nbdrandtest
+
+ if [ "$operation" = "write" ]; then
+ # data write
+ dd if=/dev/urandom of=$tmp_file bs=4096 count=256
+ for i in ${nbd_list[@]}; do
+ dd if=$tmp_file of=$i bs=4096 count=256 oflag=direct
+ done
+ elif [ "$operation" = "verify" ]; then
+ # data read and verify
+ for i in ${nbd_list[@]}; do
+ cmp -b -n 1M $tmp_file $i
+ done
+ rm $tmp_file
+ fi
+}
+
+function nbd_rpc_data_verify() {
+ local rpc_server=$1
+ local bdev_list=($2)
+ local nbd_list=($3)
+
+ nbd_start_disks $rpc_server "${bdev_list[*]}" "${nbd_list[*]}"
+ count=$(nbd_get_count $rpc_server)
+ if [ $count -ne ${#nbd_list[@]} ]; then
+ return -1
+ fi
+
+ nbd_dd_data_verify "${nbd_list[*]}" "write"
+ nbd_dd_data_verify "${nbd_list[*]}" "verify"
+
+ nbd_stop_disks $rpc_server "${nbd_list[*]}"
+ count=$(nbd_get_count $rpc_server)
+ if [ $count -ne 0 ]; then
+ return -1
+ fi
+
+ return 0
+}
diff --git a/src/spdk/test/bdev/nbdjson/json_config.sh b/src/spdk/test/bdev/nbdjson/json_config.sh
new file mode 100755
index 00000000..ccd7006c
--- /dev/null
+++ b/src/spdk/test/bdev/nbdjson/json_config.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+set -xe
+NBD_JSON_DIR=$(readlink -f $(dirname $0))
+. $NBD_JSON_DIR/../../json_config/common.sh
+rpc_py="$spdk_rpc_py"
+clear_config_py="$spdk_clear_config_py"
+trap 'on_error_exit "${FUNCNAME}" "${LINENO}"' ERR
+
+timing_enter nbd_json_config
+run_spdk_tgt
+load_nvme
+modprobe nbd
+
+timing_enter nbd_json_config_create_setup
+$rpc_py construct_malloc_bdev 128 512 --name Malloc0
+$rpc_py start_nbd_disk Malloc0 /dev/nbd0
+$rpc_py start_nbd_disk Nvme0n1 /dev/nbd1
+timing_exit nbd_json_config_create_setup
+
+timing_enter nbd_json_config_test
+test_json_config
+timing_exit nbd_json_config_test
+
+$clear_config_py clear_config
+kill_targets
+rmmod nbd
+timing_exit nbd_json_config
+report_test_completion nbd_json_config