diff options
Diffstat (limited to 'src/spdk/test/unit/lib/bdev')
46 files changed, 16843 insertions, 0 deletions
diff --git a/src/spdk/test/unit/lib/bdev/Makefile b/src/spdk/test/unit/lib/bdev/Makefile new file mode 100644 index 000000000..8120b1127 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/Makefile @@ -0,0 +1,51 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..) +include $(SPDK_ROOT_DIR)/mk/spdk.common.mk + +DIRS-y = bdev.c part.c scsi_nvme.c gpt vbdev_lvol.c mt raid bdev_zone.c vbdev_zone_block.c bdev_ocssd.c + +DIRS-$(CONFIG_CRYPTO) += crypto.c + +# enable once new mocks are added for compressdev +DIRS-$(CONFIG_REDUCE) += compress.c + +DIRS-$(CONFIG_PMDK) += pmem + +.PHONY: all clean $(DIRS-y) + +all: $(DIRS-y) +clean: $(DIRS-y) + +include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk diff --git a/src/spdk/test/unit/lib/bdev/bdev.c/.gitignore b/src/spdk/test/unit/lib/bdev/bdev.c/.gitignore new file mode 100644 index 000000000..a5a22d0d3 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/bdev.c/.gitignore @@ -0,0 +1 @@ +bdev_ut diff --git a/src/spdk/test/unit/lib/bdev/bdev.c/Makefile b/src/spdk/test/unit/lib/bdev/bdev.c/Makefile new file mode 100644 index 000000000..eb73fafb3 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/bdev.c/Makefile @@ -0,0 +1,37 @@ +# +# BSD LICENSE +# +# Copyright (c) 2016 FUJITSU LIMITED, All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..) + +TEST_FILE = bdev_ut.c + +include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk diff --git a/src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c b/src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c new file mode 100644 index 000000000..36916f4f5 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c @@ -0,0 +1,3417 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. All rights reserved. + * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk_cunit.h" + +#include "common/lib/ut_multithread.c" +#include "unit/lib/json_mock.c" + +#include "spdk/config.h" +/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ +#undef SPDK_CONFIG_VTUNE + +#include "bdev/bdev.c" + +DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp, + const char *name), NULL); +DEFINE_STUB(spdk_conf_section_get_nmval, char *, + (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL); +DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1); + +struct spdk_trace_histories *g_trace_histories; +DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); +DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix)); +DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); +DEFINE_STUB_V(spdk_trace_register_description, (const char *name, + uint16_t tpoint_id, uint8_t owner_type, + uint8_t object_type, uint8_t new_object, + uint8_t arg1_type, const char *arg1_name)); +DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, + uint32_t size, uint64_t object_id, uint64_t arg1)); +DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); +DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); + + +int g_status; +int g_count; +enum spdk_bdev_event_type g_event_type1; +enum spdk_bdev_event_type g_event_type2; +struct spdk_histogram_data *g_histogram; +void *g_unregister_arg; +int g_unregister_rc; + +void +spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, + int *sc, int *sk, int *asc, int *ascq) +{ +} + +static int +null_init(void) +{ + return 0; +} + +static int +null_clean(void) +{ + return 0; +} + +static int +stub_destruct(void *ctx) +{ + return 0; +} + +struct ut_expected_io { + uint8_t type; + uint64_t offset; + uint64_t length; + int iovcnt; + struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; + void *md_buf; + TAILQ_ENTRY(ut_expected_io) link; +}; + +struct bdev_ut_channel { + TAILQ_HEAD(, spdk_bdev_io) outstanding_io; + uint32_t outstanding_io_count; + TAILQ_HEAD(, ut_expected_io) expected_io; +}; + +static bool g_io_done; +static struct spdk_bdev_io *g_bdev_io; +static enum spdk_bdev_io_status g_io_status; +static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; +static uint32_t g_bdev_ut_io_device; +static struct bdev_ut_channel *g_bdev_ut_channel; +static void *g_compare_read_buf; +static uint32_t g_compare_read_buf_len; +static void *g_compare_write_buf; +static uint32_t g_compare_write_buf_len; +static bool g_abort_done; +static enum spdk_bdev_io_status g_abort_status; + +static struct ut_expected_io * +ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) +{ + struct ut_expected_io *expected_io; + + expected_io = calloc(1, sizeof(*expected_io)); + SPDK_CU_ASSERT_FATAL(expected_io != NULL); + + expected_io->type = type; + expected_io->offset = offset; + expected_io->length = length; + expected_io->iovcnt = iovcnt; + + return expected_io; +} + +static void +ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) +{ + expected_io->iov[pos].iov_base = base; + expected_io->iov[pos].iov_len = len; +} + +static void +stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) +{ + struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); + struct ut_expected_io *expected_io; + struct iovec *iov, *expected_iov; + struct spdk_bdev_io *bio_to_abort; + int i; + + g_bdev_io = bdev_io; + + if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { + uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; + + CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); + CU_ASSERT(g_compare_read_buf_len == len); + memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); + } + + if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { + uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; + + CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); + CU_ASSERT(g_compare_write_buf_len == len); + memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); + } + + if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { + uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; + + CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); + CU_ASSERT(g_compare_read_buf_len == len); + if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { + g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; + } + } + + if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { + if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { + TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { + if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { + TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); + ch->outstanding_io_count--; + spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); + break; + } + } + } + } + + TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); + ch->outstanding_io_count++; + + expected_io = TAILQ_FIRST(&ch->expected_io); + if (expected_io == NULL) { + return; + } + TAILQ_REMOVE(&ch->expected_io, expected_io, link); + + if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { + CU_ASSERT(bdev_io->type == expected_io->type); + } + + if (expected_io->md_buf != NULL) { + CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); + } + + if (expected_io->length == 0) { + free(expected_io); + return; + } + + CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); + CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); + + if (expected_io->iovcnt == 0) { + free(expected_io); + /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ + return; + } + + CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); + for (i = 0; i < expected_io->iovcnt; i++) { + iov = &bdev_io->u.bdev.iovs[i]; + expected_iov = &expected_io->iov[i]; + CU_ASSERT(iov->iov_len == expected_iov->iov_len); + CU_ASSERT(iov->iov_base == expected_iov->iov_base); + } + + free(expected_io); +} + +static void +stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, + struct spdk_bdev_io *bdev_io, bool success) +{ + CU_ASSERT(success == true); + + stub_submit_request(_ch, bdev_io); +} + +static void +stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) +{ + spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, + bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); +} + +static uint32_t +stub_complete_io(uint32_t num_to_complete) +{ + struct bdev_ut_channel *ch = g_bdev_ut_channel; + struct spdk_bdev_io *bdev_io; + static enum spdk_bdev_io_status io_status; + uint32_t num_completed = 0; + + while (num_completed < num_to_complete) { + if (TAILQ_EMPTY(&ch->outstanding_io)) { + break; + } + bdev_io = TAILQ_FIRST(&ch->outstanding_io); + TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); + ch->outstanding_io_count--; + io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : + g_io_exp_status; + spdk_bdev_io_complete(bdev_io, io_status); + num_completed++; + } + + return num_completed; +} + +static struct spdk_io_channel * +bdev_ut_get_io_channel(void *ctx) +{ + return spdk_get_io_channel(&g_bdev_ut_io_device); +} + +static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { + [SPDK_BDEV_IO_TYPE_READ] = true, + [SPDK_BDEV_IO_TYPE_WRITE] = true, + [SPDK_BDEV_IO_TYPE_COMPARE] = true, + [SPDK_BDEV_IO_TYPE_UNMAP] = true, + [SPDK_BDEV_IO_TYPE_FLUSH] = true, + [SPDK_BDEV_IO_TYPE_RESET] = true, + [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, + [SPDK_BDEV_IO_TYPE_NVME_IO] = true, + [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, + [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, + [SPDK_BDEV_IO_TYPE_ZCOPY] = true, + [SPDK_BDEV_IO_TYPE_ABORT] = true, +}; + +static void +ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) +{ + g_io_types_supported[io_type] = enable; +} + +static bool +stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) +{ + return g_io_types_supported[io_type]; +} + +static struct spdk_bdev_fn_table fn_table = { + .destruct = stub_destruct, + .submit_request = stub_submit_request, + .get_io_channel = bdev_ut_get_io_channel, + .io_type_supported = stub_io_type_supported, +}; + +static int +bdev_ut_create_ch(void *io_device, void *ctx_buf) +{ + struct bdev_ut_channel *ch = ctx_buf; + + CU_ASSERT(g_bdev_ut_channel == NULL); + g_bdev_ut_channel = ch; + + TAILQ_INIT(&ch->outstanding_io); + ch->outstanding_io_count = 0; + TAILQ_INIT(&ch->expected_io); + return 0; +} + +static void +bdev_ut_destroy_ch(void *io_device, void *ctx_buf) +{ + CU_ASSERT(g_bdev_ut_channel != NULL); + g_bdev_ut_channel = NULL; +} + +struct spdk_bdev_module bdev_ut_if; + +static int +bdev_ut_module_init(void) +{ + spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, + sizeof(struct bdev_ut_channel), NULL); + spdk_bdev_module_init_done(&bdev_ut_if); + return 0; +} + +static void +bdev_ut_module_fini(void) +{ + spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); +} + +struct spdk_bdev_module bdev_ut_if = { + .name = "bdev_ut", + .module_init = bdev_ut_module_init, + .module_fini = bdev_ut_module_fini, + .async_init = true, +}; + +static void vbdev_ut_examine(struct spdk_bdev *bdev); + +static int +vbdev_ut_module_init(void) +{ + return 0; +} + +static void +vbdev_ut_module_fini(void) +{ +} + +struct spdk_bdev_module vbdev_ut_if = { + .name = "vbdev_ut", + .module_init = vbdev_ut_module_init, + .module_fini = vbdev_ut_module_fini, + .examine_config = vbdev_ut_examine, +}; + +SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) +SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) + +static void +vbdev_ut_examine(struct spdk_bdev *bdev) +{ + spdk_bdev_module_examine_done(&vbdev_ut_if); +} + +static struct spdk_bdev * +allocate_bdev(char *name) +{ + struct spdk_bdev *bdev; + int rc; + + bdev = calloc(1, sizeof(*bdev)); + SPDK_CU_ASSERT_FATAL(bdev != NULL); + + bdev->name = name; + bdev->fn_table = &fn_table; + bdev->module = &bdev_ut_if; + bdev->blockcnt = 1024; + bdev->blocklen = 512; + + rc = spdk_bdev_register(bdev); + CU_ASSERT(rc == 0); + + return bdev; +} + +static struct spdk_bdev * +allocate_vbdev(char *name) +{ + struct spdk_bdev *bdev; + int rc; + + bdev = calloc(1, sizeof(*bdev)); + SPDK_CU_ASSERT_FATAL(bdev != NULL); + + bdev->name = name; + bdev->fn_table = &fn_table; + bdev->module = &vbdev_ut_if; + + rc = spdk_bdev_register(bdev); + CU_ASSERT(rc == 0); + + return bdev; +} + +static void +free_bdev(struct spdk_bdev *bdev) +{ + spdk_bdev_unregister(bdev, NULL, NULL); + poll_threads(); + memset(bdev, 0xFF, sizeof(*bdev)); + free(bdev); +} + +static void +free_vbdev(struct spdk_bdev *bdev) +{ + spdk_bdev_unregister(bdev, NULL, NULL); + poll_threads(); + memset(bdev, 0xFF, sizeof(*bdev)); + free(bdev); +} + +static void +get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) +{ + const char *bdev_name; + + CU_ASSERT(bdev != NULL); + CU_ASSERT(rc == 0); + bdev_name = spdk_bdev_get_name(bdev); + CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); + + free(stat); + free_bdev(bdev); + + *(bool *)cb_arg = true; +} + +static void +bdev_unregister_cb(void *cb_arg, int rc) +{ + g_unregister_arg = cb_arg; + g_unregister_rc = rc; +} + +static void +bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) +{ + struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; + + g_event_type1 = type; + if (SPDK_BDEV_EVENT_REMOVE == type) { + spdk_bdev_close(desc); + } +} + +static void +bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) +{ + struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; + + g_event_type2 = type; + if (SPDK_BDEV_EVENT_REMOVE == type) { + spdk_bdev_close(desc); + } +} + +static void +get_device_stat_test(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_io_stat *stat; + bool done; + + bdev = allocate_bdev("bdev0"); + stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); + if (stat == NULL) { + free_bdev(bdev); + return; + } + + done = false; + spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); + while (!done) { poll_threads(); } + + +} + +static void +open_write_test(void) +{ + struct spdk_bdev *bdev[9]; + struct spdk_bdev_desc *desc[9] = {}; + int rc; + + /* + * Create a tree of bdevs to test various open w/ write cases. + * + * bdev0 through bdev3 are physical block devices, such as NVMe + * namespaces or Ceph block devices. + * + * bdev4 is a virtual bdev with multiple base bdevs. This models + * caching or RAID use cases. + * + * bdev5 through bdev7 are all virtual bdevs with the same base + * bdev (except bdev7). This models partitioning or logical volume + * use cases. + * + * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs + * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This + * models caching, RAID, partitioning or logical volumes use cases. + * + * bdev8 is a virtual bdev with multiple base bdevs, but these + * base bdevs are themselves virtual bdevs. + * + * bdev8 + * | + * +----------+ + * | | + * bdev4 bdev5 bdev6 bdev7 + * | | | | + * +---+---+ +---+ + +---+---+ + * | | \ | / \ + * bdev0 bdev1 bdev2 bdev3 + */ + + bdev[0] = allocate_bdev("bdev0"); + rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); + CU_ASSERT(rc == 0); + + bdev[1] = allocate_bdev("bdev1"); + rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); + CU_ASSERT(rc == 0); + + bdev[2] = allocate_bdev("bdev2"); + rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); + CU_ASSERT(rc == 0); + + bdev[3] = allocate_bdev("bdev3"); + rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); + CU_ASSERT(rc == 0); + + bdev[4] = allocate_vbdev("bdev4"); + rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); + CU_ASSERT(rc == 0); + + bdev[5] = allocate_vbdev("bdev5"); + rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); + CU_ASSERT(rc == 0); + + bdev[6] = allocate_vbdev("bdev6"); + + bdev[7] = allocate_vbdev("bdev7"); + + bdev[8] = allocate_vbdev("bdev8"); + + /* Open bdev0 read-only. This should succeed. */ + rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(desc[0] != NULL); + spdk_bdev_close(desc[0]); + + /* + * Open bdev1 read/write. This should fail since bdev1 has been claimed + * by a vbdev module. + */ + rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]); + CU_ASSERT(rc == -EPERM); + + /* + * Open bdev4 read/write. This should fail since bdev3 has been claimed + * by a vbdev module. + */ + rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]); + CU_ASSERT(rc == -EPERM); + + /* Open bdev4 read-only. This should succeed. */ + rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(desc[4] != NULL); + spdk_bdev_close(desc[4]); + + /* + * Open bdev8 read/write. This should succeed since it is a leaf + * bdev. + */ + rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(desc[8] != NULL); + spdk_bdev_close(desc[8]); + + /* + * Open bdev5 read/write. This should fail since bdev4 has been claimed + * by a vbdev module. + */ + rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]); + CU_ASSERT(rc == -EPERM); + + /* Open bdev4 read-only. This should succeed. */ + rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(desc[5] != NULL); + spdk_bdev_close(desc[5]); + + free_vbdev(bdev[8]); + + free_vbdev(bdev[5]); + free_vbdev(bdev[6]); + free_vbdev(bdev[7]); + + free_vbdev(bdev[4]); + + free_bdev(bdev[0]); + free_bdev(bdev[1]); + free_bdev(bdev[2]); + free_bdev(bdev[3]); +} + +static void +bytes_to_blocks_test(void) +{ + struct spdk_bdev bdev; + uint64_t offset_blocks, num_blocks; + + memset(&bdev, 0, sizeof(bdev)); + + bdev.blocklen = 512; + + /* All parameters valid */ + offset_blocks = 0; + num_blocks = 0; + CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); + CU_ASSERT(offset_blocks == 1); + CU_ASSERT(num_blocks == 2); + + /* Offset not a block multiple */ + CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); + + /* Length not a block multiple */ + CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); + + /* In case blocklen not the power of two */ + bdev.blocklen = 100; + CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); + CU_ASSERT(offset_blocks == 1); + CU_ASSERT(num_blocks == 2); + + /* Offset not a block multiple */ + CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); + + /* Length not a block multiple */ + CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); +} + +static void +num_blocks_test(void) +{ + struct spdk_bdev bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_bdev_desc *desc_ext = NULL; + int rc; + + memset(&bdev, 0, sizeof(bdev)); + bdev.name = "num_blocks"; + bdev.fn_table = &fn_table; + bdev.module = &bdev_ut_if; + spdk_bdev_register(&bdev); + spdk_bdev_notify_blockcnt_change(&bdev, 50); + + /* Growing block number */ + CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); + /* Shrinking block number */ + CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); + + /* In case bdev opened */ + rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(desc != NULL); + + /* Growing block number */ + CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); + /* Shrinking block number */ + CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); + + /* In case bdev opened with ext API */ + rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc_ext, &desc_ext); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(desc_ext != NULL); + + g_event_type1 = 0xFF; + /* Growing block number */ + CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); + + poll_threads(); + CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); + + g_event_type1 = 0xFF; + /* Growing block number and closing */ + CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); + + spdk_bdev_close(desc); + spdk_bdev_close(desc_ext); + spdk_bdev_unregister(&bdev, NULL, NULL); + + poll_threads(); + + /* Callback is not called for closed device */ + CU_ASSERT_EQUAL(g_event_type1, 0xFF); +} + +static void +io_valid_test(void) +{ + struct spdk_bdev bdev; + + memset(&bdev, 0, sizeof(bdev)); + + bdev.blocklen = 512; + spdk_bdev_notify_blockcnt_change(&bdev, 100); + + /* All parameters valid */ + CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); + + /* Last valid block */ + CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); + + /* Offset past end of bdev */ + CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); + + /* Offset + length past end of bdev */ + CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); + + /* Offset near end of uint64_t range (2^64 - 1) */ + CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); +} + +static void +alias_add_del_test(void) +{ + struct spdk_bdev *bdev[3]; + int rc; + + /* Creating and registering bdevs */ + bdev[0] = allocate_bdev("bdev0"); + SPDK_CU_ASSERT_FATAL(bdev[0] != 0); + + bdev[1] = allocate_bdev("bdev1"); + SPDK_CU_ASSERT_FATAL(bdev[1] != 0); + + bdev[2] = allocate_bdev("bdev2"); + SPDK_CU_ASSERT_FATAL(bdev[2] != 0); + + poll_threads(); + + /* + * Trying adding an alias identical to name. + * Alias is identical to name, so it can not be added to aliases list + */ + rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); + CU_ASSERT(rc == -EEXIST); + + /* + * Trying to add empty alias, + * this one should fail + */ + rc = spdk_bdev_alias_add(bdev[0], NULL); + CU_ASSERT(rc == -EINVAL); + + /* Trying adding same alias to two different registered bdevs */ + + /* Alias is used first time, so this one should pass */ + rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); + CU_ASSERT(rc == 0); + + /* Alias was added to another bdev, so this one should fail */ + rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); + CU_ASSERT(rc == -EEXIST); + + /* Alias is used first time, so this one should pass */ + rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); + CU_ASSERT(rc == 0); + + /* Trying removing an alias from registered bdevs */ + + /* Alias is not on a bdev aliases list, so this one should fail */ + rc = spdk_bdev_alias_del(bdev[0], "not existing"); + CU_ASSERT(rc == -ENOENT); + + /* Alias is present on a bdev aliases list, so this one should pass */ + rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); + CU_ASSERT(rc == 0); + + /* Alias is present on a bdev aliases list, so this one should pass */ + rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); + CU_ASSERT(rc == 0); + + /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ + rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); + CU_ASSERT(rc != 0); + + /* Trying to del all alias from empty alias list */ + spdk_bdev_alias_del_all(bdev[2]); + SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); + + /* Trying to del all alias from non-empty alias list */ + rc = spdk_bdev_alias_add(bdev[2], "alias0"); + CU_ASSERT(rc == 0); + rc = spdk_bdev_alias_add(bdev[2], "alias1"); + CU_ASSERT(rc == 0); + spdk_bdev_alias_del_all(bdev[2]); + CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); + + /* Unregister and free bdevs */ + spdk_bdev_unregister(bdev[0], NULL, NULL); + spdk_bdev_unregister(bdev[1], NULL, NULL); + spdk_bdev_unregister(bdev[2], NULL, NULL); + + poll_threads(); + + free(bdev[0]); + free(bdev[1]); + free(bdev[2]); +} + +static void +io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) +{ + g_io_done = true; + g_io_status = bdev_io->internal.status; + spdk_bdev_free_io(bdev_io); +} + +static void +bdev_init_cb(void *arg, int rc) +{ + CU_ASSERT(rc == 0); +} + +static void +bdev_fini_cb(void *arg) +{ +} + +struct bdev_ut_io_wait_entry { + struct spdk_bdev_io_wait_entry entry; + struct spdk_io_channel *io_ch; + struct spdk_bdev_desc *desc; + bool submitted; +}; + +static void +io_wait_cb(void *arg) +{ + struct bdev_ut_io_wait_entry *entry = arg; + int rc; + + rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); + CU_ASSERT(rc == 0); + entry->submitted = true; +} + +static void +bdev_io_types_test(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_io_channel *io_ch; + struct spdk_bdev_opts bdev_opts = { + .bdev_io_pool_size = 4, + .bdev_io_cache_size = 2, + }; + int rc; + + rc = spdk_bdev_set_opts(&bdev_opts); + CU_ASSERT(rc == 0); + spdk_bdev_initialize(bdev_init_cb, NULL); + poll_threads(); + + bdev = allocate_bdev("bdev0"); + + rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); + CU_ASSERT(rc == 0); + poll_threads(); + SPDK_CU_ASSERT_FATAL(desc != NULL); + io_ch = spdk_bdev_get_io_channel(desc); + CU_ASSERT(io_ch != NULL); + + /* WRITE and WRITE ZEROES are not supported */ + ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); + ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); + rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); + CU_ASSERT(rc == -ENOTSUP); + ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); + ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); + + spdk_put_io_channel(io_ch); + spdk_bdev_close(desc); + free_bdev(bdev); + spdk_bdev_finish(bdev_fini_cb, NULL); + poll_threads(); +} + +static void +bdev_io_wait_test(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_io_channel *io_ch; + struct spdk_bdev_opts bdev_opts = { + .bdev_io_pool_size = 4, + .bdev_io_cache_size = 2, + }; + struct bdev_ut_io_wait_entry io_wait_entry; + struct bdev_ut_io_wait_entry io_wait_entry2; + int rc; + + rc = spdk_bdev_set_opts(&bdev_opts); + CU_ASSERT(rc == 0); + spdk_bdev_initialize(bdev_init_cb, NULL); + poll_threads(); + + bdev = allocate_bdev("bdev0"); + + rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); + CU_ASSERT(rc == 0); + poll_threads(); + SPDK_CU_ASSERT_FATAL(desc != NULL); + io_ch = spdk_bdev_get_io_channel(desc); + CU_ASSERT(io_ch != NULL); + + rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); + CU_ASSERT(rc == 0); + rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); + CU_ASSERT(rc == 0); + rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); + CU_ASSERT(rc == 0); + rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); + + rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); + CU_ASSERT(rc == -ENOMEM); + + io_wait_entry.entry.bdev = bdev; + io_wait_entry.entry.cb_fn = io_wait_cb; + io_wait_entry.entry.cb_arg = &io_wait_entry; + io_wait_entry.io_ch = io_ch; + io_wait_entry.desc = desc; + io_wait_entry.submitted = false; + /* Cannot use the same io_wait_entry for two different calls. */ + memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); + io_wait_entry2.entry.cb_arg = &io_wait_entry2; + + /* Queue two I/O waits. */ + rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); + CU_ASSERT(rc == 0); + CU_ASSERT(io_wait_entry.submitted == false); + rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); + CU_ASSERT(rc == 0); + CU_ASSERT(io_wait_entry2.submitted == false); + + stub_complete_io(1); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); + CU_ASSERT(io_wait_entry.submitted == true); + CU_ASSERT(io_wait_entry2.submitted == false); + + stub_complete_io(1); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); + CU_ASSERT(io_wait_entry2.submitted == true); + + stub_complete_io(4); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); + + spdk_put_io_channel(io_ch); + spdk_bdev_close(desc); + free_bdev(bdev); + spdk_bdev_finish(bdev_fini_cb, NULL); + poll_threads(); +} + +static void +bdev_io_spans_boundary_test(void) +{ + struct spdk_bdev bdev; + struct spdk_bdev_io bdev_io; + + memset(&bdev, 0, sizeof(bdev)); + + bdev.optimal_io_boundary = 0; + bdev_io.bdev = &bdev; + + /* bdev has no optimal_io_boundary set - so this should return false. */ + CU_ASSERT(bdev_io_should_split(&bdev_io) == false); + + bdev.optimal_io_boundary = 32; + bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; + + /* RESETs are not based on LBAs - so this should return false. */ + CU_ASSERT(bdev_io_should_split(&bdev_io) == false); + + bdev_io.type = SPDK_BDEV_IO_TYPE_READ; + bdev_io.u.bdev.offset_blocks = 0; + bdev_io.u.bdev.num_blocks = 32; + + /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ + CU_ASSERT(bdev_io_should_split(&bdev_io) == false); + + bdev_io.u.bdev.num_blocks = 33; + + /* This I/O spans a boundary. */ + CU_ASSERT(bdev_io_should_split(&bdev_io) == true); +} + +static void +bdev_io_split_test(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_io_channel *io_ch; + struct spdk_bdev_opts bdev_opts = { + .bdev_io_pool_size = 512, + .bdev_io_cache_size = 64, + }; + struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; + struct ut_expected_io *expected_io; + uint64_t i; + int rc; + + rc = spdk_bdev_set_opts(&bdev_opts); + CU_ASSERT(rc == 0); + spdk_bdev_initialize(bdev_init_cb, NULL); + + bdev = allocate_bdev("bdev0"); + + rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(desc != NULL); + io_ch = spdk_bdev_get_io_channel(desc); + CU_ASSERT(io_ch != NULL); + + bdev->optimal_io_boundary = 16; + bdev->split_on_optimal_io_boundary = false; + + g_io_done = false; + + /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); + ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + stub_complete_io(1); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); + + bdev->split_on_optimal_io_boundary = true; + + /* Now test that a single-vector command is split correctly. + * Offset 14, length 8, payload 0xF000 + * Child - Offset 14, length 2, payload 0xF000 + * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 + * + * Set up the expected values before calling spdk_bdev_read_blocks + */ + g_io_done = false; + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); + ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); + ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + /* spdk_bdev_read_blocks will submit the first child immediately. */ + rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); + stub_complete_io(2); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); + + /* Now set up a more complex, multi-vector command that needs to be split, + * including splitting iovecs. + */ + iov[0].iov_base = (void *)0x10000; + iov[0].iov_len = 512; + iov[1].iov_base = (void *)0x20000; + iov[1].iov_len = 20 * 512; + iov[2].iov_base = (void *)0x30000; + iov[2].iov_len = 11 * 512; + + g_io_done = false; + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); + ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); + ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); + ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); + ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); + ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); + stub_complete_io(3); + CU_ASSERT(g_io_done == true); + + /* Test multi vector command that needs to be split by strip and then needs to be + * split further due to the capacity of child iovs. + */ + for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { + iov[i].iov_base = (void *)((i + 1) * 0x10000); + iov[i].iov_len = 512; + } + + bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; + g_io_done = false; + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, + BDEV_IO_NUM_CHILD_IOV); + for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { + ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); + } + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, + BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); + for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { + ut_expected_io_set_iov(expected_io, i, + (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); + } + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, + BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + stub_complete_io(1); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + stub_complete_io(1); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); + + /* Test multi vector command that needs to be split by strip and then needs to be + * split further due to the capacity of child iovs. In this case, the length of + * the rest of iovec array with an I/O boundary is the multiple of block size. + */ + + /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary + * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. + */ + for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { + iov[i].iov_base = (void *)((i + 1) * 0x10000); + iov[i].iov_len = 512; + } + for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { + iov[i].iov_base = (void *)((i + 1) * 0x10000); + iov[i].iov_len = 256; + } + iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); + iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; + + /* Add an extra iovec to trigger split */ + iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); + iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; + + bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; + g_io_done = false; + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, + BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); + for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { + ut_expected_io_set_iov(expected_io, i, + (void *)((i + 1) * 0x10000), 512); + } + for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { + ut_expected_io_set_iov(expected_io, i, + (void *)((i + 1) * 0x10000), 256); + } + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, + 1, 1); + ut_expected_io_set_iov(expected_io, 0, + (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, + 1, 1); + ut_expected_io_set_iov(expected_io, 0, + (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, + BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + stub_complete_io(1); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); + stub_complete_io(2); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); + + /* Test multi vector command that needs to be split by strip and then needs to be + * split further due to the capacity of child iovs, the child request offset should + * be rewind to last aligned offset and go success without error. + */ + for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { + iov[i].iov_base = (void *)((i + 1) * 0x10000); + iov[i].iov_len = 512; + } + iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); + iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; + + iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); + iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; + + iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); + iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; + + bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; + g_io_done = false; + g_io_status = 0; + /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, + BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); + for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { + ut_expected_io_set_iov(expected_io, i, + (void *)((i + 1) * 0x10000), 512); + } + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, + 1, 2); + ut_expected_io_set_iov(expected_io, 0, + (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); + ut_expected_io_set_iov(expected_io, 1, + (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, + 1, 1); + ut_expected_io_set_iov(expected_io, 0, + (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, + BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + stub_complete_io(1); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); + stub_complete_io(2); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); + + /* Test multi vector command that needs to be split due to the IO boundary and + * the capacity of child iovs. Especially test the case when the command is + * split due to the capacity of child iovs, the tail address is not aligned with + * block size and is rewinded to the aligned address. + * + * The iovecs used in read request is complex but is based on the data + * collected in the real issue. We change the base addresses but keep the lengths + * not to loose the credibility of the test. + */ + bdev->optimal_io_boundary = 128; + g_io_done = false; + g_io_status = 0; + + for (i = 0; i < 31; i++) { + iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); + iov[i].iov_len = 1024; + } + iov[31].iov_base = (void *)0xFEED1F00000; + iov[31].iov_len = 32768; + iov[32].iov_base = (void *)0xFEED2000000; + iov[32].iov_len = 160; + iov[33].iov_base = (void *)0xFEED2100000; + iov[33].iov_len = 4096; + iov[34].iov_base = (void *)0xFEED2200000; + iov[34].iov_len = 4096; + iov[35].iov_base = (void *)0xFEED2300000; + iov[35].iov_len = 4096; + iov[36].iov_base = (void *)0xFEED2400000; + iov[36].iov_len = 4096; + iov[37].iov_base = (void *)0xFEED2500000; + iov[37].iov_len = 4096; + iov[38].iov_base = (void *)0xFEED2600000; + iov[38].iov_len = 4096; + iov[39].iov_base = (void *)0xFEED2700000; + iov[39].iov_len = 4096; + iov[40].iov_base = (void *)0xFEED2800000; + iov[40].iov_len = 4096; + iov[41].iov_base = (void *)0xFEED2900000; + iov[41].iov_len = 4096; + iov[42].iov_base = (void *)0xFEED2A00000; + iov[42].iov_len = 4096; + iov[43].iov_base = (void *)0xFEED2B00000; + iov[43].iov_len = 12288; + iov[44].iov_base = (void *)0xFEED2C00000; + iov[44].iov_len = 8192; + iov[45].iov_base = (void *)0xFEED2F00000; + iov[45].iov_len = 4096; + iov[46].iov_base = (void *)0xFEED3000000; + iov[46].iov_len = 4096; + iov[47].iov_base = (void *)0xFEED3100000; + iov[47].iov_len = 4096; + iov[48].iov_base = (void *)0xFEED3200000; + iov[48].iov_len = 24576; + iov[49].iov_base = (void *)0xFEED3300000; + iov[49].iov_len = 16384; + iov[50].iov_base = (void *)0xFEED3400000; + iov[50].iov_len = 12288; + iov[51].iov_base = (void *)0xFEED3500000; + iov[51].iov_len = 4096; + iov[52].iov_base = (void *)0xFEED3600000; + iov[52].iov_len = 4096; + iov[53].iov_base = (void *)0xFEED3700000; + iov[53].iov_len = 4096; + iov[54].iov_base = (void *)0xFEED3800000; + iov[54].iov_len = 28672; + iov[55].iov_base = (void *)0xFEED3900000; + iov[55].iov_len = 20480; + iov[56].iov_base = (void *)0xFEED3A00000; + iov[56].iov_len = 4096; + iov[57].iov_base = (void *)0xFEED3B00000; + iov[57].iov_len = 12288; + iov[58].iov_base = (void *)0xFEED3C00000; + iov[58].iov_len = 4096; + iov[59].iov_base = (void *)0xFEED3D00000; + iov[59].iov_len = 4096; + iov[60].iov_base = (void *)0xFEED3E00000; + iov[60].iov_len = 352; + + /* The 1st child IO must be from iov[0] to iov[31] split by the capacity + * of child iovs, + */ + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); + for (i = 0; i < 32; i++) { + ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); + } + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] + * split by the IO boundary requirement. + */ + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); + ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); + ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to + * the first 864 bytes of iov[46] split by the IO boundary requirement. + */ + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); + ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), + iov[33].iov_len - 864); + ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); + ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); + ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); + ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); + ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); + ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); + ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); + ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); + ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); + ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); + ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); + ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); + ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the + * first 864 bytes of iov[52] split by the IO boundary requirement. + */ + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); + ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), + iov[46].iov_len - 864); + ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); + ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); + ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); + ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); + ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); + ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to + * the first 4096 bytes of iov[57] split by the IO boundary requirement. + */ + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); + ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), + iov[52].iov_len - 864); + ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); + ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); + ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); + ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); + ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + /* The 6th child IO must be from the remaining 7328 bytes of iov[57] + * to the first 3936 bytes of iov[58] split by the capacity of child iovs. + */ + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); + ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), + iov[57].iov_len - 4960); + ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); + ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); + ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), + iov[59].iov_len - 3936); + ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 61, 0, 543, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + stub_complete_io(1); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); + stub_complete_io(5); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + stub_complete_io(1); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); + CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); + + /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be + * split, so test that. + */ + bdev->optimal_io_boundary = 15; + g_io_done = false; + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + stub_complete_io(1); + CU_ASSERT(g_io_done == true); + + /* Test an UNMAP. This should also not be split. */ + bdev->optimal_io_boundary = 16; + g_io_done = false; + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + stub_complete_io(1); + CU_ASSERT(g_io_done == true); + + /* Test a FLUSH. This should also not be split. */ + bdev->optimal_io_boundary = 16; + g_io_done = false; + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + stub_complete_io(1); + CU_ASSERT(g_io_done == true); + + CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); + + /* Children requests return an error status */ + bdev->optimal_io_boundary = 16; + iov[0].iov_base = (void *)0x10000; + iov[0].iov_len = 512 * 64; + g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; + g_io_done = false; + g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; + + rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); + stub_complete_io(4); + CU_ASSERT(g_io_done == false); + CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); + stub_complete_io(1); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); + + /* Test if a multi vector command terminated with failure before continueing + * splitting process when one of child I/O failed. + * The multi vector command is as same as the above that needs to be split by strip + * and then needs to be split further due to the capacity of child iovs. + */ + for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { + iov[i].iov_base = (void *)((i + 1) * 0x10000); + iov[i].iov_len = 512; + } + iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); + iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; + + iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); + iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; + + iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); + iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; + + bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; + + g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; + g_io_done = false; + g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; + + rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, + BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + stub_complete_io(1); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); + + g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; + + /* for this test we will create the following conditions to hit the code path where + * we are trying to send and IO following a split that has no iovs because we had to + * trim them for alignment reasons. + * + * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 + * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV + * position 30 and overshoot by 0x2e. + * - That means we'll send the IO and loop back to pick up the remaining bytes at + * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e + * which eliniates that vector so we just send the first split IO with 30 vectors + * and let the completion pick up the last 2 vectors. + */ + bdev->optimal_io_boundary = 32; + bdev->split_on_optimal_io_boundary = true; + g_io_done = false; + + /* Init all parent IOVs to 0x212 */ + for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { + iov[i].iov_base = (void *)((i + 1) * 0x10000); + iov[i].iov_len = 0x212; + } + + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, + BDEV_IO_NUM_CHILD_IOV - 1); + /* expect 0-29 to be 1:1 with the parent iov */ + for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { + ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); + } + + /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment + * where 0x1e is the amount we overshot the 16K boundary + */ + ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, + (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was + * shortened that take it to the next boundary and then a final one to get us to + * 0x4200 bytes for the IO. + */ + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, + BDEV_IO_NUM_CHILD_IOV, 2); + /* position 30 picked up the remaining bytes to the next boundary */ + ut_expected_io_set_iov(expected_io, 0, + (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); + + /* position 31 picked the the rest of the trasnfer to get us to 0x4200 */ + ut_expected_io_set_iov(expected_io, 1, + (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, + BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + stub_complete_io(1); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + stub_complete_io(1); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); + + spdk_put_io_channel(io_ch); + spdk_bdev_close(desc); + free_bdev(bdev); + spdk_bdev_finish(bdev_fini_cb, NULL); + poll_threads(); +} + +static void +bdev_io_split_with_io_wait(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_io_channel *io_ch; + struct spdk_bdev_channel *channel; + struct spdk_bdev_mgmt_channel *mgmt_ch; + struct spdk_bdev_opts bdev_opts = { + .bdev_io_pool_size = 2, + .bdev_io_cache_size = 1, + }; + struct iovec iov[3]; + struct ut_expected_io *expected_io; + int rc; + + rc = spdk_bdev_set_opts(&bdev_opts); + CU_ASSERT(rc == 0); + spdk_bdev_initialize(bdev_init_cb, NULL); + + bdev = allocate_bdev("bdev0"); + + rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); + CU_ASSERT(rc == 0); + CU_ASSERT(desc != NULL); + io_ch = spdk_bdev_get_io_channel(desc); + CU_ASSERT(io_ch != NULL); + channel = spdk_io_channel_get_ctx(io_ch); + mgmt_ch = channel->shared_resource->mgmt_ch; + + bdev->optimal_io_boundary = 16; + bdev->split_on_optimal_io_boundary = true; + + rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); + CU_ASSERT(rc == 0); + + /* Now test that a single-vector command is split correctly. + * Offset 14, length 8, payload 0xF000 + * Child - Offset 14, length 2, payload 0xF000 + * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 + * + * Set up the expected values before calling spdk_bdev_read_blocks + */ + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); + ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); + ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + /* The following children will be submitted sequentially due to the capacity of + * spdk_bdev_io. + */ + + /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ + rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + + /* Completing the first read I/O will submit the first child */ + stub_complete_io(1); + CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + + /* Completing the first child will submit the second child */ + stub_complete_io(1); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + + /* Complete the second child I/O. This should result in our callback getting + * invoked since the parent I/O is now complete. + */ + stub_complete_io(1); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); + + /* Now set up a more complex, multi-vector command that needs to be split, + * including splitting iovecs. + */ + iov[0].iov_base = (void *)0x10000; + iov[0].iov_len = 512; + iov[1].iov_base = (void *)0x20000; + iov[1].iov_len = 20 * 512; + iov[2].iov_base = (void *)0x30000; + iov[2].iov_len = 11 * 512; + + g_io_done = false; + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); + ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); + ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); + ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); + ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); + ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + + /* The following children will be submitted sequentially due to the capacity of + * spdk_bdev_io. + */ + + /* Completing the first child will submit the second child */ + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + stub_complete_io(1); + CU_ASSERT(g_io_done == false); + + /* Completing the second child will submit the third child */ + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + stub_complete_io(1); + CU_ASSERT(g_io_done == false); + + /* Completing the third child will result in our callback getting invoked + * since the parent I/O is now complete. + */ + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + stub_complete_io(1); + CU_ASSERT(g_io_done == true); + + CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); + + spdk_put_io_channel(io_ch); + spdk_bdev_close(desc); + free_bdev(bdev); + spdk_bdev_finish(bdev_fini_cb, NULL); + poll_threads(); +} + +static void +bdev_io_alignment(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_io_channel *io_ch; + struct spdk_bdev_opts bdev_opts = { + .bdev_io_pool_size = 20, + .bdev_io_cache_size = 2, + }; + int rc; + void *buf; + struct iovec iovs[2]; + int iovcnt; + uint64_t alignment; + + rc = spdk_bdev_set_opts(&bdev_opts); + CU_ASSERT(rc == 0); + spdk_bdev_initialize(bdev_init_cb, NULL); + + fn_table.submit_request = stub_submit_request_get_buf; + bdev = allocate_bdev("bdev0"); + + rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); + CU_ASSERT(rc == 0); + CU_ASSERT(desc != NULL); + io_ch = spdk_bdev_get_io_channel(desc); + CU_ASSERT(io_ch != NULL); + + /* Create aligned buffer */ + rc = posix_memalign(&buf, 4096, 8192); + SPDK_CU_ASSERT_FATAL(rc == 0); + + /* Pass aligned single buffer with no alignment required */ + alignment = 1; + bdev->required_alignment = spdk_u32log2(alignment); + + rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); + CU_ASSERT(rc == 0); + stub_complete_io(1); + CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, + alignment)); + + rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); + CU_ASSERT(rc == 0); + stub_complete_io(1); + CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, + alignment)); + + /* Pass unaligned single buffer with no alignment required */ + alignment = 1; + bdev->required_alignment = spdk_u32log2(alignment); + + rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); + CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); + stub_complete_io(1); + + rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); + CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); + stub_complete_io(1); + + /* Pass unaligned single buffer with 512 alignment required */ + alignment = 512; + bdev->required_alignment = spdk_u32log2(alignment); + + rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); + CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); + CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, + alignment)); + stub_complete_io(1); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); + + rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); + CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); + CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, + alignment)); + stub_complete_io(1); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); + + /* Pass unaligned single buffer with 4096 alignment required */ + alignment = 4096; + bdev->required_alignment = spdk_u32log2(alignment); + + rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); + CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); + CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, + alignment)); + stub_complete_io(1); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); + + rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); + CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); + CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, + alignment)); + stub_complete_io(1); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); + + /* Pass aligned iovs with no alignment required */ + alignment = 1; + bdev->required_alignment = spdk_u32log2(alignment); + + iovcnt = 1; + iovs[0].iov_base = buf; + iovs[0].iov_len = 512; + + rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); + stub_complete_io(1); + CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); + + rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); + stub_complete_io(1); + CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); + + /* Pass unaligned iovs with no alignment required */ + alignment = 1; + bdev->required_alignment = spdk_u32log2(alignment); + + iovcnt = 2; + iovs[0].iov_base = buf + 16; + iovs[0].iov_len = 256; + iovs[1].iov_base = buf + 16 + 256 + 32; + iovs[1].iov_len = 256; + + rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); + stub_complete_io(1); + CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); + + rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); + stub_complete_io(1); + CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); + + /* Pass unaligned iov with 2048 alignment required */ + alignment = 2048; + bdev->required_alignment = spdk_u32log2(alignment); + + iovcnt = 2; + iovs[0].iov_base = buf + 16; + iovs[0].iov_len = 256; + iovs[1].iov_base = buf + 16 + 256 + 32; + iovs[1].iov_len = 256; + + rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); + CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); + CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, + alignment)); + stub_complete_io(1); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); + + rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); + CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); + CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, + alignment)); + stub_complete_io(1); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); + + /* Pass iov without allocated buffer without alignment required */ + alignment = 1; + bdev->required_alignment = spdk_u32log2(alignment); + + iovcnt = 1; + iovs[0].iov_base = NULL; + iovs[0].iov_len = 0; + + rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); + CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, + alignment)); + stub_complete_io(1); + + /* Pass iov without allocated buffer with 1024 alignment required */ + alignment = 1024; + bdev->required_alignment = spdk_u32log2(alignment); + + iovcnt = 1; + iovs[0].iov_base = NULL; + iovs[0].iov_len = 0; + + rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); + CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, + alignment)); + stub_complete_io(1); + + spdk_put_io_channel(io_ch); + spdk_bdev_close(desc); + free_bdev(bdev); + fn_table.submit_request = stub_submit_request; + spdk_bdev_finish(bdev_fini_cb, NULL); + poll_threads(); + + free(buf); +} + +static void +bdev_io_alignment_with_boundary(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_io_channel *io_ch; + struct spdk_bdev_opts bdev_opts = { + .bdev_io_pool_size = 20, + .bdev_io_cache_size = 2, + }; + int rc; + void *buf; + struct iovec iovs[2]; + int iovcnt; + uint64_t alignment; + + rc = spdk_bdev_set_opts(&bdev_opts); + CU_ASSERT(rc == 0); + spdk_bdev_initialize(bdev_init_cb, NULL); + + fn_table.submit_request = stub_submit_request_get_buf; + bdev = allocate_bdev("bdev0"); + + rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); + CU_ASSERT(rc == 0); + CU_ASSERT(desc != NULL); + io_ch = spdk_bdev_get_io_channel(desc); + CU_ASSERT(io_ch != NULL); + + /* Create aligned buffer */ + rc = posix_memalign(&buf, 4096, 131072); + SPDK_CU_ASSERT_FATAL(rc == 0); + g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; + + /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ + alignment = 512; + bdev->required_alignment = spdk_u32log2(alignment); + bdev->optimal_io_boundary = 2; + bdev->split_on_optimal_io_boundary = true; + + iovcnt = 1; + iovs[0].iov_base = NULL; + iovs[0].iov_len = 512 * 3; + + rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); + stub_complete_io(2); + + /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ + alignment = 512; + bdev->required_alignment = spdk_u32log2(alignment); + bdev->optimal_io_boundary = 16; + bdev->split_on_optimal_io_boundary = true; + + iovcnt = 1; + iovs[0].iov_base = NULL; + iovs[0].iov_len = 512 * 16; + + rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); + stub_complete_io(2); + + /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ + alignment = 512; + bdev->required_alignment = spdk_u32log2(alignment); + bdev->optimal_io_boundary = 128; + bdev->split_on_optimal_io_boundary = true; + + iovcnt = 1; + iovs[0].iov_base = buf + 16; + iovs[0].iov_len = 512 * 160; + rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); + stub_complete_io(2); + + /* 512 * 3 with 2 IO boundary */ + alignment = 512; + bdev->required_alignment = spdk_u32log2(alignment); + bdev->optimal_io_boundary = 2; + bdev->split_on_optimal_io_boundary = true; + + iovcnt = 2; + iovs[0].iov_base = buf + 16; + iovs[0].iov_len = 512; + iovs[1].iov_base = buf + 16 + 512 + 32; + iovs[1].iov_len = 1024; + + rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); + stub_complete_io(2); + + rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); + stub_complete_io(2); + + /* 512 * 64 with 32 IO boundary */ + bdev->optimal_io_boundary = 32; + iovcnt = 2; + iovs[0].iov_base = buf + 16; + iovs[0].iov_len = 16384; + iovs[1].iov_base = buf + 16 + 16384 + 32; + iovs[1].iov_len = 16384; + + rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); + stub_complete_io(3); + + rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); + stub_complete_io(3); + + /* 512 * 160 with 32 IO boundary */ + iovcnt = 1; + iovs[0].iov_base = buf + 16; + iovs[0].iov_len = 16384 + 65536; + + rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); + stub_complete_io(6); + + spdk_put_io_channel(io_ch); + spdk_bdev_close(desc); + free_bdev(bdev); + fn_table.submit_request = stub_submit_request; + spdk_bdev_finish(bdev_fini_cb, NULL); + poll_threads(); + + free(buf); +} + +static void +histogram_status_cb(void *cb_arg, int status) +{ + g_status = status; +} + +static void +histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) +{ + g_status = status; + g_histogram = histogram; +} + +static void +histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, + uint64_t total, uint64_t so_far) +{ + g_count += count; +} + +static void +bdev_histograms(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_io_channel *ch; + struct spdk_histogram_data *histogram; + uint8_t buf[4096]; + int rc; + + spdk_bdev_initialize(bdev_init_cb, NULL); + + bdev = allocate_bdev("bdev"); + + rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); + CU_ASSERT(rc == 0); + CU_ASSERT(desc != NULL); + + ch = spdk_bdev_get_io_channel(desc); + CU_ASSERT(ch != NULL); + + /* Enable histogram */ + g_status = -1; + spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); + poll_threads(); + CU_ASSERT(g_status == 0); + CU_ASSERT(bdev->internal.histogram_enabled == true); + + /* Allocate histogram */ + histogram = spdk_histogram_data_alloc(); + SPDK_CU_ASSERT_FATAL(histogram != NULL); + + /* Check if histogram is zeroed */ + spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); + poll_threads(); + CU_ASSERT(g_status == 0); + SPDK_CU_ASSERT_FATAL(g_histogram != NULL); + + g_count = 0; + spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); + + CU_ASSERT(g_count == 0); + + rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); + CU_ASSERT(rc == 0); + + spdk_delay_us(10); + stub_complete_io(1); + poll_threads(); + + rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); + CU_ASSERT(rc == 0); + + spdk_delay_us(10); + stub_complete_io(1); + poll_threads(); + + /* Check if histogram gathered data from all I/O channels */ + g_histogram = NULL; + spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); + poll_threads(); + CU_ASSERT(g_status == 0); + CU_ASSERT(bdev->internal.histogram_enabled == true); + SPDK_CU_ASSERT_FATAL(g_histogram != NULL); + + g_count = 0; + spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); + CU_ASSERT(g_count == 2); + + /* Disable histogram */ + spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); + poll_threads(); + CU_ASSERT(g_status == 0); + CU_ASSERT(bdev->internal.histogram_enabled == false); + + /* Try to run histogram commands on disabled bdev */ + spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); + poll_threads(); + CU_ASSERT(g_status == -EFAULT); + + spdk_histogram_data_free(histogram); + spdk_put_io_channel(ch); + spdk_bdev_close(desc); + free_bdev(bdev); + spdk_bdev_finish(bdev_fini_cb, NULL); + poll_threads(); +} + +static void +_bdev_compare(bool emulated) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_io_channel *ioch; + struct ut_expected_io *expected_io; + uint64_t offset, num_blocks; + uint32_t num_completed; + char aa_buf[512]; + char bb_buf[512]; + struct iovec compare_iov; + uint8_t io_type; + int rc; + + if (emulated) { + io_type = SPDK_BDEV_IO_TYPE_READ; + } else { + io_type = SPDK_BDEV_IO_TYPE_COMPARE; + } + + memset(aa_buf, 0xaa, sizeof(aa_buf)); + memset(bb_buf, 0xbb, sizeof(bb_buf)); + + g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; + + spdk_bdev_initialize(bdev_init_cb, NULL); + fn_table.submit_request = stub_submit_request_get_buf; + bdev = allocate_bdev("bdev"); + + rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); + CU_ASSERT_EQUAL(rc, 0); + SPDK_CU_ASSERT_FATAL(desc != NULL); + ioch = spdk_bdev_get_io_channel(desc); + SPDK_CU_ASSERT_FATAL(ioch != NULL); + + fn_table.submit_request = stub_submit_request_get_buf; + g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; + + offset = 50; + num_blocks = 1; + compare_iov.iov_base = aa_buf; + compare_iov.iov_len = sizeof(aa_buf); + + expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + g_io_done = false; + g_compare_read_buf = aa_buf; + g_compare_read_buf_len = sizeof(aa_buf); + rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); + CU_ASSERT_EQUAL(rc, 0); + num_completed = stub_complete_io(1); + CU_ASSERT_EQUAL(num_completed, 1); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); + + expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + g_io_done = false; + g_compare_read_buf = bb_buf; + g_compare_read_buf_len = sizeof(bb_buf); + rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); + CU_ASSERT_EQUAL(rc, 0); + num_completed = stub_complete_io(1); + CU_ASSERT_EQUAL(num_completed, 1); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); + + spdk_put_io_channel(ioch); + spdk_bdev_close(desc); + free_bdev(bdev); + fn_table.submit_request = stub_submit_request; + spdk_bdev_finish(bdev_fini_cb, NULL); + poll_threads(); + + g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; + + g_compare_read_buf = NULL; +} + +static void +bdev_compare(void) +{ + _bdev_compare(true); + _bdev_compare(false); +} + +static void +bdev_compare_and_write(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_io_channel *ioch; + struct ut_expected_io *expected_io; + uint64_t offset, num_blocks; + uint32_t num_completed; + char aa_buf[512]; + char bb_buf[512]; + char cc_buf[512]; + char write_buf[512]; + struct iovec compare_iov; + struct iovec write_iov; + int rc; + + memset(aa_buf, 0xaa, sizeof(aa_buf)); + memset(bb_buf, 0xbb, sizeof(bb_buf)); + memset(cc_buf, 0xcc, sizeof(cc_buf)); + + g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; + + spdk_bdev_initialize(bdev_init_cb, NULL); + fn_table.submit_request = stub_submit_request_get_buf; + bdev = allocate_bdev("bdev"); + + rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); + CU_ASSERT_EQUAL(rc, 0); + SPDK_CU_ASSERT_FATAL(desc != NULL); + ioch = spdk_bdev_get_io_channel(desc); + SPDK_CU_ASSERT_FATAL(ioch != NULL); + + fn_table.submit_request = stub_submit_request_get_buf; + g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; + + offset = 50; + num_blocks = 1; + compare_iov.iov_base = aa_buf; + compare_iov.iov_len = sizeof(aa_buf); + write_iov.iov_base = bb_buf; + write_iov.iov_len = sizeof(bb_buf); + + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + g_io_done = false; + g_compare_read_buf = aa_buf; + g_compare_read_buf_len = sizeof(aa_buf); + memset(write_buf, 0, sizeof(write_buf)); + g_compare_write_buf = write_buf; + g_compare_write_buf_len = sizeof(write_buf); + rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, + offset, num_blocks, io_done, NULL); + /* Trigger range locking */ + poll_threads(); + CU_ASSERT_EQUAL(rc, 0); + num_completed = stub_complete_io(1); + CU_ASSERT_EQUAL(num_completed, 1); + CU_ASSERT(g_io_done == false); + num_completed = stub_complete_io(1); + /* Trigger range unlocking */ + poll_threads(); + CU_ASSERT_EQUAL(num_completed, 1); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); + + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + + g_io_done = false; + g_compare_read_buf = cc_buf; + g_compare_read_buf_len = sizeof(cc_buf); + memset(write_buf, 0, sizeof(write_buf)); + g_compare_write_buf = write_buf; + g_compare_write_buf_len = sizeof(write_buf); + rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, + offset, num_blocks, io_done, NULL); + /* Trigger range locking */ + poll_threads(); + CU_ASSERT_EQUAL(rc, 0); + num_completed = stub_complete_io(1); + /* Trigger range unlocking earlier because we expect error here */ + poll_threads(); + CU_ASSERT_EQUAL(num_completed, 1); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); + num_completed = stub_complete_io(1); + CU_ASSERT_EQUAL(num_completed, 0); + + spdk_put_io_channel(ioch); + spdk_bdev_close(desc); + free_bdev(bdev); + fn_table.submit_request = stub_submit_request; + spdk_bdev_finish(bdev_fini_cb, NULL); + poll_threads(); + + g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; + + g_compare_read_buf = NULL; + g_compare_write_buf = NULL; +} + +static void +bdev_write_zeroes(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_io_channel *ioch; + struct ut_expected_io *expected_io; + uint64_t offset, num_io_blocks, num_blocks; + uint32_t num_completed, num_requests; + int rc; + + spdk_bdev_initialize(bdev_init_cb, NULL); + bdev = allocate_bdev("bdev"); + + rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); + CU_ASSERT_EQUAL(rc, 0); + SPDK_CU_ASSERT_FATAL(desc != NULL); + ioch = spdk_bdev_get_io_channel(desc); + SPDK_CU_ASSERT_FATAL(ioch != NULL); + + fn_table.submit_request = stub_submit_request; + g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; + + /* First test that if the bdev supports write_zeroes, the request won't be split */ + bdev->md_len = 0; + bdev->blocklen = 4096; + num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; + + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); + CU_ASSERT_EQUAL(rc, 0); + num_completed = stub_complete_io(1); + CU_ASSERT_EQUAL(num_completed, 1); + + /* Check that if write zeroes is not supported it'll be replaced by regular writes */ + ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); + num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; + num_requests = 2; + num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; + + for (offset = 0; offset < num_requests; ++offset) { + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, + offset * num_io_blocks, num_io_blocks, 0); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + } + + rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); + CU_ASSERT_EQUAL(rc, 0); + num_completed = stub_complete_io(num_requests); + CU_ASSERT_EQUAL(num_completed, num_requests); + + /* Check that the splitting is correct if bdev has interleaved metadata */ + bdev->md_interleave = true; + bdev->md_len = 64; + bdev->blocklen = 4096 + 64; + num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; + + num_requests = offset = 0; + while (offset < num_blocks) { + num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, + offset, num_io_blocks, 0); + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + offset += num_io_blocks; + num_requests++; + } + + rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); + CU_ASSERT_EQUAL(rc, 0); + num_completed = stub_complete_io(num_requests); + CU_ASSERT_EQUAL(num_completed, num_requests); + num_completed = stub_complete_io(num_requests); + assert(num_completed == 0); + + /* Check the the same for separate metadata buffer */ + bdev->md_interleave = false; + bdev->md_len = 64; + bdev->blocklen = 4096; + + num_requests = offset = 0; + while (offset < num_blocks) { + num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, + offset, num_io_blocks, 0); + expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; + TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); + offset += num_io_blocks; + num_requests++; + } + + rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); + CU_ASSERT_EQUAL(rc, 0); + num_completed = stub_complete_io(num_requests); + CU_ASSERT_EQUAL(num_completed, num_requests); + + ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); + spdk_put_io_channel(ioch); + spdk_bdev_close(desc); + free_bdev(bdev); + spdk_bdev_finish(bdev_fini_cb, NULL); + poll_threads(); +} + +static void +bdev_open_while_hotremove(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc[2] = {}; + int rc; + + bdev = allocate_bdev("bdev"); + + rc = spdk_bdev_open(bdev, false, NULL, NULL, &desc[0]); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(desc[0] != NULL); + + spdk_bdev_unregister(bdev, NULL, NULL); + + rc = spdk_bdev_open(bdev, false, NULL, NULL, &desc[1]); + CU_ASSERT(rc == -ENODEV); + SPDK_CU_ASSERT_FATAL(desc[1] == NULL); + + spdk_bdev_close(desc[0]); + free_bdev(bdev); +} + +static void +bdev_close_while_hotremove(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + int rc = 0; + + bdev = allocate_bdev("bdev"); + + rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); + CU_ASSERT_EQUAL(rc, 0); + + /* Simulate hot-unplug by unregistering bdev */ + g_event_type1 = 0xFF; + g_unregister_arg = NULL; + g_unregister_rc = -1; + spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); + /* Close device while remove event is in flight */ + spdk_bdev_close(desc); + + /* Ensure that unregister callback is delayed */ + CU_ASSERT_EQUAL(g_unregister_arg, NULL); + CU_ASSERT_EQUAL(g_unregister_rc, -1); + + poll_threads(); + + /* Event callback shall not be issued because device was closed */ + CU_ASSERT_EQUAL(g_event_type1, 0xFF); + /* Unregister callback is issued */ + CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); + CU_ASSERT_EQUAL(g_unregister_rc, 0); + + free_bdev(bdev); +} + +static void +bdev_open_ext(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc1 = NULL; + struct spdk_bdev_desc *desc2 = NULL; + int rc = 0; + + bdev = allocate_bdev("bdev"); + + rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); + CU_ASSERT_EQUAL(rc, -EINVAL); + + rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); + CU_ASSERT_EQUAL(rc, 0); + + rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); + CU_ASSERT_EQUAL(rc, 0); + + g_event_type1 = 0xFF; + g_event_type2 = 0xFF; + + /* Simulate hot-unplug by unregistering bdev */ + spdk_bdev_unregister(bdev, NULL, NULL); + poll_threads(); + + /* Check if correct events have been triggered in event callback fn */ + CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); + CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); + + free_bdev(bdev); + poll_threads(); +} + +struct timeout_io_cb_arg { + struct iovec iov; + uint8_t type; +}; + +static int +bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) +{ + struct spdk_bdev_io *bdev_io; + int n = 0; + + if (!ch) { + return -1; + } + + TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { + n++; + } + + return n; +} + +static void +bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) +{ + struct timeout_io_cb_arg *ctx = cb_arg; + + ctx->type = bdev_io->type; + ctx->iov.iov_base = bdev_io->iov.iov_base; + ctx->iov.iov_len = bdev_io->iov.iov_len; +} + +static void +bdev_set_io_timeout(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_io_channel *io_ch = NULL; + struct spdk_bdev_channel *bdev_ch = NULL; + struct timeout_io_cb_arg cb_arg; + + spdk_bdev_initialize(bdev_init_cb, NULL); + + bdev = allocate_bdev("bdev"); + + CU_ASSERT(spdk_bdev_open(bdev, true, NULL, NULL, &desc) == 0); + SPDK_CU_ASSERT_FATAL(desc != NULL); + io_ch = spdk_bdev_get_io_channel(desc); + CU_ASSERT(io_ch != NULL); + + bdev_ch = spdk_io_channel_get_ctx(io_ch); + CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); + + /* This is the part1. + * We will check the bdev_ch->io_submitted list + * TO make sure that it can link IOs and only the user submitted IOs + */ + CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); + CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); + CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); + CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); + stub_complete_io(1); + CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); + stub_complete_io(1); + CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); + + /* Split IO */ + bdev->optimal_io_boundary = 16; + bdev->split_on_optimal_io_boundary = true; + + /* Now test that a single-vector command is split correctly. + * Offset 14, length 8, payload 0xF000 + * Child - Offset 14, length 2, payload 0xF000 + * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 + * + * Set up the expected values before calling spdk_bdev_read_blocks + */ + CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); + /* We count all submitted IOs including IO that are generated by splitting. */ + CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); + stub_complete_io(1); + CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); + stub_complete_io(1); + CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); + + /* Also include the reset IO */ + CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); + CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); + poll_threads(); + stub_complete_io(1); + poll_threads(); + CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); + + /* This is part2 + * Test the desc timeout poller register + */ + + /* Successfully set the timeout */ + CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); + CU_ASSERT(desc->io_timeout_poller != NULL); + CU_ASSERT(desc->timeout_in_sec == 30); + CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); + CU_ASSERT(desc->cb_arg == &cb_arg); + + /* Change the timeout limit */ + CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); + CU_ASSERT(desc->io_timeout_poller != NULL); + CU_ASSERT(desc->timeout_in_sec == 20); + CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); + CU_ASSERT(desc->cb_arg == &cb_arg); + + /* Disable the timeout */ + CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); + CU_ASSERT(desc->io_timeout_poller == NULL); + + /* This the part3 + * We will test to catch timeout IO and check whether the IO is + * the submitted one. + */ + memset(&cb_arg, 0, sizeof(cb_arg)); + CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); + CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); + + /* Don't reach the limit */ + spdk_delay_us(15 * spdk_get_ticks_hz()); + poll_threads(); + CU_ASSERT(cb_arg.type == 0); + CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); + CU_ASSERT(cb_arg.iov.iov_len == 0); + + /* 15 + 15 = 30 reach the limit */ + spdk_delay_us(15 * spdk_get_ticks_hz()); + poll_threads(); + CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); + CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); + CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); + stub_complete_io(1); + + /* Use the same split IO above and check the IO */ + memset(&cb_arg, 0, sizeof(cb_arg)); + CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); + + /* The first child complete in time */ + spdk_delay_us(15 * spdk_get_ticks_hz()); + poll_threads(); + stub_complete_io(1); + CU_ASSERT(cb_arg.type == 0); + CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); + CU_ASSERT(cb_arg.iov.iov_len == 0); + + /* The second child reach the limit */ + spdk_delay_us(15 * spdk_get_ticks_hz()); + poll_threads(); + CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); + CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); + CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); + stub_complete_io(1); + + /* Also include the reset IO */ + memset(&cb_arg, 0, sizeof(cb_arg)); + CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); + spdk_delay_us(30 * spdk_get_ticks_hz()); + poll_threads(); + CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); + stub_complete_io(1); + poll_threads(); + + spdk_put_io_channel(io_ch); + spdk_bdev_close(desc); + free_bdev(bdev); + spdk_bdev_finish(bdev_fini_cb, NULL); + poll_threads(); +} + +static void +lba_range_overlap(void) +{ + struct lba_range r1, r2; + + r1.offset = 100; + r1.length = 50; + + r2.offset = 0; + r2.length = 1; + CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); + + r2.offset = 0; + r2.length = 100; + CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); + + r2.offset = 0; + r2.length = 110; + CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); + + r2.offset = 100; + r2.length = 10; + CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); + + r2.offset = 110; + r2.length = 20; + CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); + + r2.offset = 140; + r2.length = 150; + CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); + + r2.offset = 130; + r2.length = 200; + CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); + + r2.offset = 150; + r2.length = 100; + CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); + + r2.offset = 110; + r2.length = 0; + CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); +} + +static bool g_lock_lba_range_done; +static bool g_unlock_lba_range_done; + +static void +lock_lba_range_done(void *ctx, int status) +{ + g_lock_lba_range_done = true; +} + +static void +unlock_lba_range_done(void *ctx, int status) +{ + g_unlock_lba_range_done = true; +} + +static void +lock_lba_range_check_ranges(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_io_channel *io_ch; + struct spdk_bdev_channel *channel; + struct lba_range *range; + int ctx1; + int rc; + + spdk_bdev_initialize(bdev_init_cb, NULL); + + bdev = allocate_bdev("bdev0"); + + rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); + CU_ASSERT(rc == 0); + CU_ASSERT(desc != NULL); + io_ch = spdk_bdev_get_io_channel(desc); + CU_ASSERT(io_ch != NULL); + channel = spdk_io_channel_get_ctx(io_ch); + + g_lock_lba_range_done = false; + rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_lock_lba_range_done == true); + range = TAILQ_FIRST(&channel->locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 20); + CU_ASSERT(range->length == 10); + CU_ASSERT(range->owner_ch == channel); + + /* Unlocks must exactly match a lock. */ + g_unlock_lba_range_done = false; + rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); + CU_ASSERT(rc == -EINVAL); + CU_ASSERT(g_unlock_lba_range_done == false); + + rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + spdk_delay_us(100); + poll_threads(); + + CU_ASSERT(g_unlock_lba_range_done == true); + CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); + + spdk_put_io_channel(io_ch); + spdk_bdev_close(desc); + free_bdev(bdev); + spdk_bdev_finish(bdev_fini_cb, NULL); + poll_threads(); +} + +static void +lock_lba_range_with_io_outstanding(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_io_channel *io_ch; + struct spdk_bdev_channel *channel; + struct lba_range *range; + char buf[4096]; + int ctx1; + int rc; + + spdk_bdev_initialize(bdev_init_cb, NULL); + + bdev = allocate_bdev("bdev0"); + + rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); + CU_ASSERT(rc == 0); + CU_ASSERT(desc != NULL); + io_ch = spdk_bdev_get_io_channel(desc); + CU_ASSERT(io_ch != NULL); + channel = spdk_io_channel_get_ctx(io_ch); + + g_io_done = false; + rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); + CU_ASSERT(rc == 0); + + g_lock_lba_range_done = false; + rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + /* The lock should immediately become valid, since there are no outstanding + * write I/O. + */ + CU_ASSERT(g_io_done == false); + CU_ASSERT(g_lock_lba_range_done == true); + range = TAILQ_FIRST(&channel->locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 20); + CU_ASSERT(range->length == 10); + CU_ASSERT(range->owner_ch == channel); + CU_ASSERT(range->locked_ctx == &ctx1); + + rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + stub_complete_io(1); + spdk_delay_us(100); + poll_threads(); + + CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); + + /* Now try again, but with a write I/O. */ + g_io_done = false; + rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); + CU_ASSERT(rc == 0); + + g_lock_lba_range_done = false; + rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + /* The lock should not be fully valid yet, since a write I/O is outstanding. + * But note that the range should be on the channel's locked_list, to make sure no + * new write I/O are started. + */ + CU_ASSERT(g_io_done == false); + CU_ASSERT(g_lock_lba_range_done == false); + range = TAILQ_FIRST(&channel->locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 20); + CU_ASSERT(range->length == 10); + + /* Complete the write I/O. This should make the lock valid (checked by confirming + * our callback was invoked). + */ + stub_complete_io(1); + spdk_delay_us(100); + poll_threads(); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_lock_lba_range_done == true); + + rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); + + spdk_put_io_channel(io_ch); + spdk_bdev_close(desc); + free_bdev(bdev); + spdk_bdev_finish(bdev_fini_cb, NULL); + poll_threads(); +} + +static void +lock_lba_range_overlapped(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_io_channel *io_ch; + struct spdk_bdev_channel *channel; + struct lba_range *range; + int ctx1; + int rc; + + spdk_bdev_initialize(bdev_init_cb, NULL); + + bdev = allocate_bdev("bdev0"); + + rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); + CU_ASSERT(rc == 0); + CU_ASSERT(desc != NULL); + io_ch = spdk_bdev_get_io_channel(desc); + CU_ASSERT(io_ch != NULL); + channel = spdk_io_channel_get_ctx(io_ch); + + /* Lock range 20-29. */ + g_lock_lba_range_done = false; + rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_lock_lba_range_done == true); + range = TAILQ_FIRST(&channel->locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 20); + CU_ASSERT(range->length == 10); + + /* Try to lock range 25-39. It should not lock immediately, since it overlaps with + * 20-29. + */ + g_lock_lba_range_done = false; + rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_lock_lba_range_done == false); + range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 25); + CU_ASSERT(range->length == 15); + + /* Unlock 20-29. This should result in range 25-39 now getting locked since it + * no longer overlaps with an active lock. + */ + g_unlock_lba_range_done = false; + rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_unlock_lba_range_done == true); + CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); + range = TAILQ_FIRST(&channel->locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 25); + CU_ASSERT(range->length == 15); + + /* Lock 40-59. This should immediately lock since it does not overlap with the + * currently active 25-39 lock. + */ + g_lock_lba_range_done = false; + rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_lock_lba_range_done == true); + range = TAILQ_FIRST(&bdev->internal.locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + range = TAILQ_NEXT(range, tailq); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 40); + CU_ASSERT(range->length == 20); + + /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ + g_lock_lba_range_done = false; + rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_lock_lba_range_done == false); + range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 35); + CU_ASSERT(range->length == 10); + + /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since + * the 40-59 lock is still active. + */ + g_unlock_lba_range_done = false; + rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_unlock_lba_range_done == true); + CU_ASSERT(g_lock_lba_range_done == false); + range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 35); + CU_ASSERT(range->length == 10); + + /* Unlock 40-59. This should result in 35-44 now getting locked, since there are + * no longer any active overlapping locks. + */ + g_unlock_lba_range_done = false; + rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_unlock_lba_range_done == true); + CU_ASSERT(g_lock_lba_range_done == true); + CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); + range = TAILQ_FIRST(&bdev->internal.locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 35); + CU_ASSERT(range->length == 10); + + /* Finally, unlock 35-44. */ + g_unlock_lba_range_done = false; + rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_unlock_lba_range_done == true); + CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); + + spdk_put_io_channel(io_ch); + spdk_bdev_close(desc); + free_bdev(bdev); + spdk_bdev_finish(bdev_fini_cb, NULL); + poll_threads(); +} + +static void +abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) +{ + g_abort_done = true; + g_abort_status = bdev_io->internal.status; + spdk_bdev_free_io(bdev_io); +} + +static void +bdev_io_abort(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_io_channel *io_ch; + struct spdk_bdev_channel *channel; + struct spdk_bdev_mgmt_channel *mgmt_ch; + struct spdk_bdev_opts bdev_opts = { + .bdev_io_pool_size = 7, + .bdev_io_cache_size = 2, + }; + struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; + uint64_t io_ctx1 = 0, io_ctx2 = 0, i; + int rc; + + rc = spdk_bdev_set_opts(&bdev_opts); + CU_ASSERT(rc == 0); + spdk_bdev_initialize(bdev_init_cb, NULL); + + bdev = allocate_bdev("bdev0"); + + rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); + CU_ASSERT(rc == 0); + CU_ASSERT(desc != NULL); + io_ch = spdk_bdev_get_io_channel(desc); + CU_ASSERT(io_ch != NULL); + channel = spdk_io_channel_get_ctx(io_ch); + mgmt_ch = channel->shared_resource->mgmt_ch; + + g_abort_done = false; + + ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); + + rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); + CU_ASSERT(rc == -ENOTSUP); + + ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); + + rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_abort_done == true); + CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); + + /* Test the case that the target I/O was successfully aborted. */ + g_io_done = false; + + rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + + g_abort_done = false; + g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; + + rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); + stub_complete_io(1); + CU_ASSERT(g_abort_done == true); + CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); + + /* Test the case that the target I/O was not aborted because it completed + * in the middle of execution of the abort. + */ + g_io_done = false; + + rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + + g_abort_done = false; + g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; + + rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + + g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; + stub_complete_io(1); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); + + g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; + stub_complete_io(1); + CU_ASSERT(g_abort_done == true); + CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); + + g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; + + bdev->optimal_io_boundary = 16; + bdev->split_on_optimal_io_boundary = true; + + /* Test that a single-vector command which is split is aborted correctly. + * Offset 14, length 8, payload 0xF000 + * Child - Offset 14, length 2, payload 0xF000 + * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 + */ + g_io_done = false; + + rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); + + g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; + + rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); + stub_complete_io(2); + CU_ASSERT(g_abort_done == true); + CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); + + /* Test that a multi-vector command that needs to be split by strip and then + * needs to be split is aborted correctly. Abort is requested before the second + * child I/O was submitted. The parent I/O should complete with failure without + * submitting the second child I/O. + */ + for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { + iov[i].iov_base = (void *)((i + 1) * 0x10000); + iov[i].iov_len = 512; + } + + bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; + g_io_done = false; + rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, + BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); + + g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; + + rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); + stub_complete_io(1); + CU_ASSERT(g_abort_done == true); + CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); + + g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); + + bdev->optimal_io_boundary = 16; + g_io_done = false; + + /* Test that a ingle-vector command which is split is aborted correctly. + * Differently from the above, the child abort request will be submitted + * sequentially due to the capacity of spdk_bdev_io. + */ + rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); + CU_ASSERT(rc == 0); + CU_ASSERT(g_io_done == false); + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); + + g_abort_done = false; + g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; + + rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); + + stub_complete_io(1); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); + stub_complete_io(3); + CU_ASSERT(g_abort_done == true); + CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); + + g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; + + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); + + spdk_put_io_channel(io_ch); + spdk_bdev_close(desc); + free_bdev(bdev); + spdk_bdev_finish(bdev_fini_cb, NULL); + poll_threads(); +} + +int +main(int argc, char **argv) +{ + CU_pSuite suite = NULL; + unsigned int num_failures; + + CU_set_error_action(CUEA_ABORT); + CU_initialize_registry(); + + suite = CU_add_suite("bdev", null_init, null_clean); + + CU_ADD_TEST(suite, bytes_to_blocks_test); + CU_ADD_TEST(suite, num_blocks_test); + CU_ADD_TEST(suite, io_valid_test); + CU_ADD_TEST(suite, open_write_test); + CU_ADD_TEST(suite, alias_add_del_test); + CU_ADD_TEST(suite, get_device_stat_test); + CU_ADD_TEST(suite, bdev_io_types_test); + CU_ADD_TEST(suite, bdev_io_wait_test); + CU_ADD_TEST(suite, bdev_io_spans_boundary_test); + CU_ADD_TEST(suite, bdev_io_split_test); + CU_ADD_TEST(suite, bdev_io_split_with_io_wait); + CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); + CU_ADD_TEST(suite, bdev_io_alignment); + CU_ADD_TEST(suite, bdev_histograms); + CU_ADD_TEST(suite, bdev_write_zeroes); + CU_ADD_TEST(suite, bdev_compare_and_write); + CU_ADD_TEST(suite, bdev_compare); + CU_ADD_TEST(suite, bdev_open_while_hotremove); + CU_ADD_TEST(suite, bdev_close_while_hotremove); + CU_ADD_TEST(suite, bdev_open_ext); + CU_ADD_TEST(suite, bdev_set_io_timeout); + CU_ADD_TEST(suite, lba_range_overlap); + CU_ADD_TEST(suite, lock_lba_range_check_ranges); + CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); + CU_ADD_TEST(suite, lock_lba_range_overlapped); + CU_ADD_TEST(suite, bdev_io_abort); + + allocate_cores(1); + allocate_threads(1); + set_thread(0); + + CU_basic_set_mode(CU_BRM_VERBOSE); + CU_basic_run_tests(); + num_failures = CU_get_number_of_failures(); + CU_cleanup_registry(); + + free_threads(); + free_cores(); + + return num_failures; +} diff --git a/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/.gitignore b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/.gitignore new file mode 100644 index 000000000..906b8067c --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/.gitignore @@ -0,0 +1 @@ +bdev_ocssd_ut diff --git a/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/Makefile b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/Makefile new file mode 100644 index 000000000..7106d46fc --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/Makefile @@ -0,0 +1,38 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..) + +TEST_FILE = bdev_ocssd_ut.c + +include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk diff --git a/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/bdev_ocssd_ut.c b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/bdev_ocssd_ut.c new file mode 100644 index 000000000..a2f8e7f71 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/bdev_ocssd.c/bdev_ocssd_ut.c @@ -0,0 +1,1195 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk/stdinc.h" +#include "spdk_cunit.h" +#include "spdk/nvme_ocssd_spec.h" +#include "spdk/thread.h" +#include "spdk/bdev_module.h" +#include "spdk/util.h" +#include "spdk_internal/mock.h" + +#include "bdev/nvme/bdev_ocssd.c" +#include "bdev/nvme/common.c" +#include "common/lib/test_env.c" +#include "unit/lib/json_mock.c" + +DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); +DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_ns, bool, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid), + true); +DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 4096); +DEFINE_STUB(spdk_nvme_ns_is_active, bool, (struct spdk_nvme_ns *ns), true); +DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); +DEFINE_STUB_V(spdk_bdev_io_complete_nvme_status, (struct spdk_bdev_io *bdev_io, uint32_t cdw0, + int sct, int sc)); +DEFINE_STUB(spdk_bdev_io_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_io *bdev_io), + NULL); +DEFINE_STUB(spdk_bdev_push_media_events, int, (struct spdk_bdev *bdev, + const struct spdk_bdev_media_event *events, + size_t num_events), 0); +DEFINE_STUB_V(spdk_bdev_notify_media_management, (struct spdk_bdev *bdev)); +DEFINE_STUB_V(nvme_ctrlr_depopulate_namespace_done, (struct nvme_bdev_ctrlr *ctrlr)); +DEFINE_STUB_V(spdk_bdev_module_finish_done, (void)); + +struct nvme_request { + spdk_nvme_cmd_cb cb_fn; + void *cb_arg; + TAILQ_ENTRY(nvme_request) tailq; +}; + +struct spdk_nvme_qpair { + TAILQ_HEAD(, nvme_request) requests; +}; + +struct spdk_nvme_ns { + uint32_t nsid; +}; + +struct spdk_nvme_ctrlr { + struct spdk_nvme_transport_id trid; + struct spdk_ocssd_geometry_data geometry; + struct spdk_nvme_qpair *admin_qpair; + struct spdk_nvme_ns *ns; + uint32_t ns_count; + struct spdk_ocssd_chunk_information_entry *chunk_info; + uint64_t num_chunks; + + LIST_ENTRY(spdk_nvme_ctrlr) list; +}; + +static LIST_HEAD(, spdk_nvme_ctrlr) g_ctrlr_list = LIST_HEAD_INITIALIZER(g_ctrlr_list); +static TAILQ_HEAD(, spdk_bdev) g_bdev_list = TAILQ_HEAD_INITIALIZER(g_bdev_list); +static struct spdk_thread *g_thread; + +static struct spdk_nvme_ctrlr * +find_controller(const struct spdk_nvme_transport_id *trid) +{ + struct spdk_nvme_ctrlr *ctrlr; + + LIST_FOREACH(ctrlr, &g_ctrlr_list, list) { + if (!spdk_nvme_transport_id_compare(trid, &ctrlr->trid)) { + return ctrlr; + } + } + + return NULL; +} + +static void +free_controller(struct spdk_nvme_ctrlr *ctrlr) +{ + CU_ASSERT(!nvme_bdev_ctrlr_get(&ctrlr->trid)); + LIST_REMOVE(ctrlr, list); + spdk_nvme_ctrlr_free_io_qpair(ctrlr->admin_qpair); + free(ctrlr->chunk_info); + free(ctrlr->ns); + free(ctrlr); +} + +static uint64_t +chunk_offset_to_lba(struct spdk_ocssd_geometry_data *geo, uint64_t offset) +{ + uint64_t chk, pu, grp; + uint64_t chk_off, pu_off, grp_off; + + chk_off = geo->lbaf.lbk_len; + pu_off = geo->lbaf.chk_len + chk_off; + grp_off = geo->lbaf.pu_len + pu_off; + + chk = offset % geo->num_chk; + pu = (offset / geo->num_chk) % geo->num_pu; + grp = (offset / (geo->num_chk * geo->num_pu)) % geo->num_grp; + + return chk << chk_off | + pu << pu_off | + grp << grp_off; +} + +static struct spdk_nvme_ctrlr * +create_controller(const struct spdk_nvme_transport_id *trid, uint32_t ns_count, + const struct spdk_ocssd_geometry_data *geo) +{ + struct spdk_nvme_ctrlr *ctrlr; + uint32_t nsid, offset; + + SPDK_CU_ASSERT_FATAL(!find_controller(trid)); + + ctrlr = calloc(1, sizeof(*ctrlr)); + SPDK_CU_ASSERT_FATAL(ctrlr != NULL); + + ctrlr->ns = calloc(ns_count, sizeof(*ctrlr->ns)); + SPDK_CU_ASSERT_FATAL(ctrlr->ns != NULL); + + ctrlr->num_chunks = geo->num_grp * geo->num_pu * geo->num_chk; + ctrlr->chunk_info = calloc(ctrlr->num_chunks, sizeof(*ctrlr->chunk_info)); + SPDK_CU_ASSERT_FATAL(ctrlr->chunk_info != NULL); + + for (nsid = 0; nsid < ns_count; ++nsid) { + ctrlr->ns[nsid].nsid = nsid + 1; + } + + ctrlr->geometry = *geo; + ctrlr->trid = *trid; + ctrlr->ns_count = ns_count; + ctrlr->admin_qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, NULL, 0); + + for (offset = 0; offset < ctrlr->num_chunks; ++offset) { + ctrlr->chunk_info[offset].cs.free = 1; + ctrlr->chunk_info[offset].slba = chunk_offset_to_lba(&ctrlr->geometry, offset); + ctrlr->chunk_info[offset].wp = ctrlr->chunk_info[offset].slba; + } + + SPDK_CU_ASSERT_FATAL(ctrlr->admin_qpair != NULL); + + LIST_INSERT_HEAD(&g_ctrlr_list, ctrlr, list); + + return ctrlr; +} + +static int +io_channel_create_cb(void *io_device, void *ctx_buf) +{ + return 0; +} + +static void +io_channel_destroy_cb(void *io_device, void *ctx_buf) +{} + +void +nvme_ctrlr_populate_namespace_done(struct nvme_async_probe_ctx *ctx, + struct nvme_bdev_ns *ns, int rc) +{ + CU_ASSERT_EQUAL(rc, 0); +} + +static struct nvme_bdev_ctrlr * +create_nvme_bdev_controller(const struct spdk_nvme_transport_id *trid, const char *name) +{ + struct spdk_nvme_ctrlr *ctrlr; + struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + uint32_t nsid; + + ctrlr = find_controller(trid); + + SPDK_CU_ASSERT_FATAL(ctrlr != NULL); + SPDK_CU_ASSERT_FATAL(!nvme_bdev_ctrlr_get(trid)); + + nvme_bdev_ctrlr = calloc(1, sizeof(*nvme_bdev_ctrlr)); + SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); + + nvme_bdev_ctrlr->namespaces = calloc(ctrlr->ns_count, sizeof(struct nvme_bdev_ns *)); + SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->namespaces != NULL); + + nvme_bdev_ctrlr->trid = calloc(1, sizeof(struct spdk_nvme_transport_id)); + SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->trid != NULL); + + nvme_bdev_ctrlr->ctrlr = ctrlr; + nvme_bdev_ctrlr->num_ns = ctrlr->ns_count; + nvme_bdev_ctrlr->ref = 0; + *nvme_bdev_ctrlr->trid = *trid; + nvme_bdev_ctrlr->name = strdup(name); + + for (nsid = 0; nsid < ctrlr->ns_count; ++nsid) { + nvme_bdev_ctrlr->namespaces[nsid] = calloc(1, sizeof(struct nvme_bdev_ns)); + SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->namespaces[nsid] != NULL); + + nvme_bdev_ctrlr->namespaces[nsid]->id = nsid + 1; + nvme_bdev_ctrlr->namespaces[nsid]->ctrlr = nvme_bdev_ctrlr; + nvme_bdev_ctrlr->namespaces[nsid]->type = NVME_BDEV_NS_OCSSD; + TAILQ_INIT(&nvme_bdev_ctrlr->namespaces[nsid]->bdevs); + + bdev_ocssd_populate_namespace(nvme_bdev_ctrlr, nvme_bdev_ctrlr->namespaces[nsid], NULL); + } + + while (spdk_thread_poll(g_thread, 0, 0) > 0) {} + + spdk_io_device_register(nvme_bdev_ctrlr, io_channel_create_cb, + io_channel_destroy_cb, 0, name); + + TAILQ_INSERT_TAIL(&g_nvme_bdev_ctrlrs, nvme_bdev_ctrlr, tailq); + + return nvme_bdev_ctrlr; +} + +static struct nvme_request * +alloc_request(spdk_nvme_cmd_cb cb_fn, void *cb_arg) +{ + struct nvme_request *ctx; + + ctx = calloc(1, sizeof(*ctx)); + SPDK_CU_ASSERT_FATAL(ctx != NULL); + + ctx->cb_fn = cb_fn; + ctx->cb_arg = cb_arg; + + return ctx; +} + +uint32_t +spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) +{ + return ctrlr->ns_count; +} + +uint32_t +spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) +{ + return ns->nsid; +} + +struct spdk_nvme_ns * +spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) +{ + if (nsid == 0 || nsid > ctrlr->ns_count) { + return NULL; + } + + return &ctrlr->ns[nsid - 1]; +} + +struct spdk_nvme_ctrlr * +spdk_nvme_connect(const struct spdk_nvme_transport_id *trid, + const struct spdk_nvme_ctrlr_opts *opts, + size_t opts_size) +{ + return find_controller(trid); +} + +int +spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) +{ + return 0; +} + +struct spdk_bdev * +spdk_bdev_get_by_name(const char *bdev_name) +{ + struct spdk_bdev *bdev; + + SPDK_CU_ASSERT_FATAL(bdev_name != NULL); + + TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) { + if (!strcmp(bdev->name, bdev_name)) { + return bdev; + } + } + + return NULL; +} + +const char * +spdk_bdev_get_name(const struct spdk_bdev *bdev) +{ + return bdev->name; +} + +int +spdk_bdev_register(struct spdk_bdev *bdev) +{ + CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(bdev->name)); + TAILQ_INSERT_TAIL(&g_bdev_list, bdev, internal.link); + + return 0; +} + +void +spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) +{ + int rc; + + CU_ASSERT_EQUAL(spdk_bdev_get_by_name(bdev->name), bdev); + TAILQ_REMOVE(&g_bdev_list, bdev, internal.link); + + rc = bdev->fn_table->destruct(bdev->ctxt); + if (rc <= 0 && cb_fn != NULL) { + cb_fn(cb_arg, 0); + } +} + +size_t +spdk_bdev_get_zone_size(const struct spdk_bdev *bdev) +{ + return bdev->zone_size; +} + +int +spdk_nvme_ocssd_ctrlr_cmd_geometry(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, + void *payload, uint32_t payload_size, + spdk_nvme_cmd_cb cb_fn, void *cb_arg) +{ + struct spdk_nvme_cpl cpl = {}; + + CU_ASSERT_EQUAL(payload_size, sizeof(ctrlr->geometry)); + memcpy(payload, &ctrlr->geometry, sizeof(ctrlr->geometry)); + + cb_fn(cb_arg, &cpl); + + return 0; +} + +int +spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, + const struct spdk_nvme_transport_id *trid2) +{ + return memcmp(trid1, trid2, sizeof(*trid1)); +} + +void +spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) +{ +} + +void +spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) +{ +} + +int32_t +spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) +{ + return spdk_nvme_qpair_process_completions(ctrlr->admin_qpair, 0); +} + +struct spdk_nvme_qpair * +spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, + const struct spdk_nvme_io_qpair_opts *opts, + size_t opts_size) +{ + struct spdk_nvme_qpair *qpair; + + qpair = calloc(1, sizeof(*qpair)); + SPDK_CU_ASSERT_FATAL(qpair != NULL); + + TAILQ_INIT(&qpair->requests); + return qpair; +} + +int +spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) +{ + CU_ASSERT(TAILQ_EMPTY(&qpair->requests)); + free(qpair); + + return 0; +} + +int32_t +spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions) +{ + struct nvme_request *req; + struct spdk_nvme_cpl cpl = {}; + int32_t num_requests = 0; + + while ((req = TAILQ_FIRST(&qpair->requests))) { + TAILQ_REMOVE(&qpair->requests, req, tailq); + + req->cb_fn(req->cb_arg, &cpl); + free(req); + + num_requests++; + } + + return num_requests; +} + +int +spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, + uint64_t lba, uint32_t lba_count, + spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, + spdk_nvme_req_reset_sgl_cb reset_sgl_fn, + spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, + uint16_t apptag_mask, uint16_t apptag) +{ + struct nvme_request *req; + + req = alloc_request(cb_fn, cb_arg); + TAILQ_INSERT_TAIL(&qpair->requests, req, tailq); + + return 0; +} + +int +spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, + uint64_t lba, uint32_t lba_count, + spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, + spdk_nvme_req_reset_sgl_cb reset_sgl_fn, + spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, + uint16_t apptag_mask, uint16_t apptag) +{ + struct nvme_request *req; + + req = alloc_request(cb_fn, cb_arg); + TAILQ_INSERT_TAIL(&qpair->requests, req, tailq); + + return 0; +} + +int +spdk_nvme_ocssd_ns_cmd_vector_reset(struct spdk_nvme_ns *ns, + struct spdk_nvme_qpair *qpair, + uint64_t *lba_list, uint32_t num_lbas, + struct spdk_ocssd_chunk_information_entry *chunk_info, + spdk_nvme_cmd_cb cb_fn, void *cb_arg) +{ + struct nvme_request *req; + + req = alloc_request(cb_fn, cb_arg); + TAILQ_INSERT_TAIL(&qpair->requests, req, tailq); + + return 0; +} + +static struct spdk_nvme_cpl g_chunk_info_cpl; +static bool g_zone_info_status = true; + +int +spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, + uint8_t log_page, uint32_t nsid, + void *payload, uint32_t payload_size, + uint64_t offset, + spdk_nvme_cmd_cb cb_fn, void *cb_arg) +{ + SPDK_CU_ASSERT_FATAL(offset + payload_size <= sizeof(*ctrlr->chunk_info) * ctrlr->num_chunks); + memcpy(payload, ((char *)ctrlr->chunk_info) + offset, payload_size); + + cb_fn(cb_arg, &g_chunk_info_cpl); + + return 0; +} + +static void +create_bdev_cb(const char *bdev_name, int status, void *ctx) +{ + *(int *)ctx = status; +} + +static int +create_bdev(const char *ctrlr_name, const char *bdev_name, uint32_t nsid, + const struct bdev_ocssd_range *range) +{ + int status = EFAULT; + + bdev_ocssd_create_bdev(ctrlr_name, bdev_name, nsid, range, create_bdev_cb, &status); + + while (spdk_thread_poll(g_thread, 0, 0) > 0) {} + + return status; +} + +static void +delete_nvme_bdev_controller(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr) +{ + struct nvme_bdev *nvme_bdev, *tmp; + struct nvme_bdev_ns *nvme_ns; + bool empty = true; + uint32_t nsid; + + nvme_bdev_ctrlr->destruct = true; + + for (nsid = 0; nsid < nvme_bdev_ctrlr->num_ns; ++nsid) { + nvme_ns = nvme_bdev_ctrlr->namespaces[nsid]; + + if (!TAILQ_EMPTY(&nvme_ns->bdevs)) { + TAILQ_FOREACH_SAFE(nvme_bdev, &nvme_ns->bdevs, tailq, tmp) { + spdk_bdev_unregister(&nvme_bdev->disk, NULL, NULL); + } + + empty = false; + } + + bdev_ocssd_depopulate_namespace(nvme_bdev_ctrlr->namespaces[nsid]); + } + + if (empty) { + nvme_bdev_ctrlr_destruct(nvme_bdev_ctrlr); + } + + while (spdk_thread_poll(g_thread, 0, 0) > 0) {} + + CU_ASSERT(TAILQ_EMPTY(&g_nvme_bdev_ctrlrs)); +} + +static void +test_create_controller(void) +{ + struct spdk_nvme_ctrlr *ctrlr; + struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" }; + struct spdk_ocssd_geometry_data geometry = {}; + struct spdk_bdev *bdev; + struct bdev_ocssd_range range; + const char *controller_name = "nvme0"; + const size_t ns_count = 16; + char namebuf[128]; + uint32_t nsid; + int rc; + + geometry = (struct spdk_ocssd_geometry_data) { + .clba = 512, + .num_chk = 64, + .num_pu = 8, + .num_grp = 4, + .maxoc = 69, + .maxocpu = 68, + .ws_opt = 86, + .lbaf = { + .lbk_len = 9, + .chk_len = 6, + .pu_len = 3, + .grp_len = 2, + } + }; + + ctrlr = create_controller(&trid, ns_count, &geometry); + nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name); + + for (nsid = 1; nsid <= ns_count; ++nsid) { + snprintf(namebuf, sizeof(namebuf), "%sn%"PRIu32, controller_name, nsid); + rc = create_bdev(controller_name, namebuf, nsid, NULL); + CU_ASSERT_EQUAL(rc, 0); + + bdev = spdk_bdev_get_by_name(namebuf); + SPDK_CU_ASSERT_FATAL(bdev != NULL); + CU_ASSERT_TRUE(bdev->zoned); + } + + delete_nvme_bdev_controller(nvme_bdev_ctrlr); + + /* Verify that after deletion the bdevs can still be created */ + nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name); + + for (nsid = 1; nsid <= ns_count; ++nsid) { + snprintf(namebuf, sizeof(namebuf), "%sn%"PRIu32, controller_name, nsid); + rc = create_bdev(controller_name, namebuf, nsid, NULL); + CU_ASSERT_EQUAL(rc, 0); + + bdev = spdk_bdev_get_by_name(namebuf); + SPDK_CU_ASSERT_FATAL(bdev != NULL); + CU_ASSERT_TRUE(bdev->zoned); + } + + delete_nvme_bdev_controller(nvme_bdev_ctrlr); + + nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name); + + /* Verify it's not possible to create a bdev on non-existent namespace */ + rc = create_bdev(controller_name, "invalid", ns_count + 1, NULL); + CU_ASSERT_EQUAL(rc, -ENODEV); + + delete_nvme_bdev_controller(nvme_bdev_ctrlr); + + /* Verify the correctness of parallel unit range validation */ + nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name); + + range.begin = 0; + range.end = geometry.num_grp * geometry.num_pu; + + rc = create_bdev(controller_name, "invalid", 1, &range); + CU_ASSERT_EQUAL(rc, -EINVAL); + + /* Verify it's not possible for the bdevs to overlap */ + range.begin = 0; + range.end = 16; + rc = create_bdev(controller_name, "valid", 1, &range); + CU_ASSERT_EQUAL(rc, 0); + bdev = spdk_bdev_get_by_name("valid"); + CU_ASSERT_PTR_NOT_NULL(bdev); + + range.begin = 16; + range.end = 31; + rc = create_bdev(controller_name, "invalid", 1, &range); + CU_ASSERT_EQUAL(rc, -EINVAL); + + /* But it is possible to create them without overlap */ + range.begin = 17; + range.end = 31; + rc = create_bdev(controller_name, "valid2", 1, &range); + CU_ASSERT_EQUAL(rc, 0); + bdev = spdk_bdev_get_by_name("valid2"); + CU_ASSERT_PTR_NOT_NULL(bdev); + + delete_nvme_bdev_controller(nvme_bdev_ctrlr); + + free_controller(ctrlr); +} + +static void +test_device_geometry(void) +{ + struct spdk_nvme_ctrlr *ctrlr; + struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" }; + const char *controller_name = "nvme0"; + const char *bdev_name = "nvme0n1"; + struct spdk_ocssd_geometry_data geometry; + struct spdk_bdev *bdev; + int rc; + + geometry = (struct spdk_ocssd_geometry_data) { + .clba = 512, + .num_chk = 64, + .num_pu = 8, + .num_grp = 4, + .maxoc = 69, + .maxocpu = 68, + .ws_opt = 86, + .lbaf = { + .lbk_len = 9, + .chk_len = 6, + .pu_len = 3, + .grp_len = 2, + } + }; + + ctrlr = create_controller(&trid, 1, &geometry); + nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name); + + rc = create_bdev(controller_name, bdev_name, 1, NULL); + CU_ASSERT_EQUAL(rc, 0); + + bdev = spdk_bdev_get_by_name(bdev_name); + CU_ASSERT_EQUAL(bdev->blockcnt, geometry.clba * + geometry.num_chk * + geometry.num_pu * + geometry.num_grp); + CU_ASSERT_EQUAL(bdev->zone_size, geometry.clba); + CU_ASSERT_EQUAL(bdev->optimal_open_zones, geometry.num_pu * geometry.num_grp); + CU_ASSERT_EQUAL(bdev->max_open_zones, geometry.maxocpu); + CU_ASSERT_EQUAL(bdev->write_unit_size, geometry.ws_opt); + + delete_nvme_bdev_controller(nvme_bdev_ctrlr); + + free_controller(ctrlr); +} + +static uint64_t +generate_lba(const struct spdk_ocssd_geometry_data *geo, uint64_t lbk, + uint64_t chk, uint64_t pu, uint64_t grp) +{ + uint64_t lba, len; + + lba = lbk; + len = geo->lbaf.lbk_len; + CU_ASSERT(lbk < (1ull << geo->lbaf.lbk_len)); + + lba |= chk << len; + len += geo->lbaf.chk_len; + CU_ASSERT(chk < (1ull << geo->lbaf.chk_len)); + + lba |= pu << len; + len += geo->lbaf.pu_len; + CU_ASSERT(pu < (1ull << geo->lbaf.pu_len)); + + lba |= grp << len; + + return lba; +} + +static void +test_lba_translation(void) +{ + struct spdk_nvme_ctrlr *ctrlr; + struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" }; + const char *controller_name = "nvme0"; + const char *bdev_name = "nvme0n1"; + struct spdk_ocssd_geometry_data geometry = {}; + struct ocssd_bdev *ocssd_bdev; + struct spdk_bdev *bdev; + uint64_t lba; + int rc; + + geometry = (struct spdk_ocssd_geometry_data) { + .clba = 512, + .num_chk = 64, + .num_pu = 8, + .num_grp = 4, + .lbaf = { + .lbk_len = 9, + .chk_len = 6, + .pu_len = 3, + .grp_len = 2, + } + }; + + ctrlr = create_controller(&trid, 1, &geometry); + nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name); + + rc = create_bdev(controller_name, bdev_name, 1, NULL); + CU_ASSERT_EQUAL(rc, 0); + + bdev = spdk_bdev_get_by_name(bdev_name); + SPDK_CU_ASSERT_FATAL(bdev != NULL); + ocssd_bdev = SPDK_CONTAINEROF(bdev, struct ocssd_bdev, nvme_bdev.disk); + + lba = bdev_ocssd_to_disk_lba(ocssd_bdev, 0); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 0, 0)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), 0); + + lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size - 1); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, bdev->zone_size - 1, 0, 0, 0)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size - 1); + + lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 1, 0)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size); + + lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * geometry.num_pu); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 0, 1)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size * geometry.num_pu); + + lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * geometry.num_pu + 68); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 0, 0, 1)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size * geometry.num_pu + 68); + + lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size + 68); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 0, 1, 0)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size + 68); + + delete_nvme_bdev_controller(nvme_bdev_ctrlr); + free_controller(ctrlr); + + geometry = (struct spdk_ocssd_geometry_data) { + .clba = 5120, + .num_chk = 501, + .num_pu = 9, + .num_grp = 1, + .lbaf = { + .lbk_len = 13, + .chk_len = 9, + .pu_len = 4, + .grp_len = 1, + } + }; + + ctrlr = create_controller(&trid, 1, &geometry); + nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name); + + rc = create_bdev(controller_name, bdev_name, 1, NULL); + CU_ASSERT_EQUAL(rc, 0); + + bdev = spdk_bdev_get_by_name(bdev_name); + SPDK_CU_ASSERT_FATAL(bdev != NULL); + ocssd_bdev = SPDK_CONTAINEROF(bdev, struct ocssd_bdev, nvme_bdev.disk); + + lba = bdev_ocssd_to_disk_lba(ocssd_bdev, 0); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 0, 0)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), 0); + + lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size - 1); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, bdev->zone_size - 1, 0, 0, 0)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size - 1); + + lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 1, 0)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size); + + lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * (geometry.num_pu - 1)); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, geometry.num_pu - 1, 0)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size * (geometry.num_pu - 1)); + + lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * geometry.num_pu * geometry.num_grp); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 1, 0, 0)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), + bdev->zone_size * geometry.num_pu * geometry.num_grp); + + lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * geometry.num_pu * geometry.num_grp + 68); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 1, 0, 0)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), + bdev->zone_size * geometry.num_pu * geometry.num_grp + 68); + + delete_nvme_bdev_controller(nvme_bdev_ctrlr); + + free_controller(ctrlr); +} + +static void +punit_range_to_addr(const struct spdk_nvme_ctrlr *ctrlr, uint64_t punit, + uint64_t *grp, uint64_t *pu) +{ + const struct spdk_ocssd_geometry_data *geo = &ctrlr->geometry; + + *grp = punit / geo->num_pu; + *pu = punit % geo->num_pu; + + CU_ASSERT(*grp < geo->num_grp); +} + +static void +test_parallel_unit_range(void) +{ + struct spdk_nvme_ctrlr *ctrlr; + struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" }; + const char *controller_name = "nvme0"; + const char *bdev_name[] = { "nvme0n1", "nvme0n2", "nvme0n3" }; + const struct bdev_ocssd_range range[3] = { { 0, 5 }, { 6, 18 }, { 19, 23 } }; + struct ocssd_bdev *ocssd_bdev[3]; + struct spdk_ocssd_geometry_data geometry = {}; + struct spdk_bdev *bdev[3]; + uint64_t lba, i, offset, grp, pu, zone_size; + int rc; + + geometry = (struct spdk_ocssd_geometry_data) { + .clba = 500, + .num_chk = 60, + .num_pu = 8, + .num_grp = 3, + .lbaf = { + .lbk_len = 9, + .chk_len = 6, + .pu_len = 3, + .grp_len = 2, + } + }; + + ctrlr = create_controller(&trid, 1, &geometry); + nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name); + + for (i = 0; i < SPDK_COUNTOF(range); ++i) { + rc = create_bdev(controller_name, bdev_name[i], 1, &range[i]); + CU_ASSERT_EQUAL(rc, 0); + + bdev[i] = spdk_bdev_get_by_name(bdev_name[i]); + SPDK_CU_ASSERT_FATAL(bdev[i] != NULL); + ocssd_bdev[i] = SPDK_CONTAINEROF(bdev[i], struct ocssd_bdev, nvme_bdev.disk); + } + + zone_size = bdev[0]->zone_size; + CU_ASSERT_EQUAL(zone_size, bdev[1]->zone_size); + CU_ASSERT_EQUAL(zone_size, bdev[2]->zone_size); + + /* Verify the first addresses are correct */ + lba = bdev_ocssd_to_disk_lba(ocssd_bdev[0], 0); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 0, 0)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[0], lba), 0); + + lba = bdev_ocssd_to_disk_lba(ocssd_bdev[1], 0); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 6, 0)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[1], lba), 0); + + lba = bdev_ocssd_to_disk_lba(ocssd_bdev[2], 0); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 3, 2)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[2], lba), 0); + + /* Verify last address correctness */ + lba = bdev_ocssd_to_disk_lba(ocssd_bdev[0], bdev[0]->blockcnt - 1); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, geometry.clba - 1, geometry.num_chk - 1, 5, 0)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[0], lba), bdev[0]->blockcnt - 1); + + lba = bdev_ocssd_to_disk_lba(ocssd_bdev[1], bdev[1]->blockcnt - 1); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, geometry.clba - 1, geometry.num_chk - 1, 2, 2)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[1], lba), bdev[1]->blockcnt - 1); + + lba = bdev_ocssd_to_disk_lba(ocssd_bdev[2], bdev[2]->blockcnt - 1); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, geometry.clba - 1, geometry.num_chk - 1, 7, 2)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[2], lba), bdev[2]->blockcnt - 1); + + /* Verify correct jumps across parallel units / groups */ + for (i = 0; i < SPDK_COUNTOF(range); ++i) { + for (offset = 0; offset < bdev_ocssd_num_parallel_units(ocssd_bdev[i]); ++offset) { + punit_range_to_addr(ctrlr, range[i].begin + offset, &grp, &pu); + lba = bdev_ocssd_to_disk_lba(ocssd_bdev[i], offset * zone_size + 68); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 0, pu, grp)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[i], lba), + offset * zone_size + 68); + } + } + + /* Verify correct address wrapping */ + for (i = 0; i < SPDK_COUNTOF(range); ++i) { + punit_range_to_addr(ctrlr, range[i].begin, &grp, &pu); + + offset = bdev_ocssd_num_parallel_units(ocssd_bdev[i]) * zone_size + 68; + lba = bdev_ocssd_to_disk_lba(ocssd_bdev[i], offset); + CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 1, pu, grp)); + assert(lba == generate_lba(&geometry, 68, 1, pu, grp)); + CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev[i], lba), offset); + } + + delete_nvme_bdev_controller(nvme_bdev_ctrlr); + + free_controller(ctrlr); +} + +static void +get_zone_info_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) +{ + CU_ASSERT_EQUAL(g_zone_info_status, success); +} + +static uint64_t +generate_chunk_offset(const struct spdk_ocssd_geometry_data *geo, uint64_t chk, + uint64_t pu, uint64_t grp) +{ + return grp * geo->num_pu * geo->num_chk + + pu * geo->num_chk + chk; +} + +static struct spdk_bdev_io * +alloc_ocssd_io(void) +{ + struct spdk_bdev_io *bdev_io; + + bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct bdev_ocssd_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + + return bdev_io; +} + +static struct spdk_ocssd_chunk_information_entry * +get_chunk_info(struct spdk_nvme_ctrlr *ctrlr, uint64_t offset) +{ + assert(offset < ctrlr->num_chunks); + SPDK_CU_ASSERT_FATAL(offset < ctrlr->num_chunks); + return &ctrlr->chunk_info[offset]; +} + +enum chunk_state { + CHUNK_STATE_FREE, + CHUNK_STATE_CLOSED, + CHUNK_STATE_OPEN, + CHUNK_STATE_OFFLINE +}; + +static void +set_chunk_state(struct spdk_ocssd_chunk_information_entry *chunk, enum chunk_state state) +{ + memset(&chunk->cs, 0, sizeof(chunk->cs)); + switch (state) { + case CHUNK_STATE_FREE: + chunk->cs.free = 1; + break; + case CHUNK_STATE_CLOSED: + chunk->cs.closed = 1; + break; + case CHUNK_STATE_OPEN: + chunk->cs.open = 1; + break; + case CHUNK_STATE_OFFLINE: + chunk->cs.offline = 1; + break; + default: + SPDK_CU_ASSERT_FATAL(0 && "Invalid state"); + } +} + +static void +test_get_zone_info(void) +{ + struct spdk_nvme_ctrlr *ctrlr; + struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" }; + const char *controller_name = "nvme0"; + const char *bdev_name = "nvme0n1"; + struct spdk_bdev *bdev; + struct spdk_bdev_io *bdev_io; +#define MAX_ZONE_INFO_COUNT 64 + struct spdk_bdev_zone_info zone_info[MAX_ZONE_INFO_COUNT]; + struct spdk_ocssd_chunk_information_entry *chunk_info; + struct spdk_ocssd_geometry_data geometry; + uint64_t chunk_offset; + int rc, offset; + + geometry = (struct spdk_ocssd_geometry_data) { + .clba = 512, + .num_chk = 64, + .num_pu = 8, + .num_grp = 4, + .lbaf = { + .lbk_len = 9, + .chk_len = 6, + .pu_len = 3, + .grp_len = 2, + } + }; + + ctrlr = create_controller(&trid, 1, &geometry); + nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name); + + rc = create_bdev(controller_name, bdev_name, 1, NULL); + CU_ASSERT_EQUAL(rc, 0); + + bdev = spdk_bdev_get_by_name(bdev_name); + SPDK_CU_ASSERT_FATAL(bdev != NULL); + + bdev_io = alloc_ocssd_io(); + bdev_io->internal.cb = get_zone_info_cb; + bdev_io->bdev = bdev; + + /* Verify empty zone */ + bdev_io->u.zone_mgmt.zone_id = 0; + bdev_io->u.zone_mgmt.num_zones = 1; + bdev_io->u.zone_mgmt.buf = &zone_info; + chunk_info = get_chunk_info(ctrlr, 0); + set_chunk_state(chunk_info, CHUNK_STATE_FREE); + chunk_info->wp = 0; + + rc = bdev_ocssd_get_zone_info(NULL, bdev_io); + CU_ASSERT_EQUAL(rc, 0); + + CU_ASSERT_EQUAL(zone_info[0].state, SPDK_BDEV_ZONE_STATE_EMPTY); + CU_ASSERT_EQUAL(zone_info[0].zone_id, 0); + CU_ASSERT_EQUAL(zone_info[0].write_pointer, 0); + CU_ASSERT_EQUAL(zone_info[0].capacity, geometry.clba); + + /* Verify open zone */ + bdev_io->u.zone_mgmt.zone_id = bdev->zone_size; + bdev_io->u.zone_mgmt.num_zones = 1; + bdev_io->u.zone_mgmt.buf = &zone_info; + chunk_info = get_chunk_info(ctrlr, generate_chunk_offset(&geometry, 0, 1, 0)); + set_chunk_state(chunk_info, CHUNK_STATE_OPEN); + chunk_info->wp = chunk_info->slba + 68; + chunk_info->cnlb = 511; + chunk_info->ct.size_deviate = 1; + + rc = bdev_ocssd_get_zone_info(NULL, bdev_io); + CU_ASSERT_EQUAL(rc, 0); + + CU_ASSERT_EQUAL(zone_info[0].state, SPDK_BDEV_ZONE_STATE_OPEN); + CU_ASSERT_EQUAL(zone_info[0].zone_id, bdev->zone_size); + CU_ASSERT_EQUAL(zone_info[0].write_pointer, bdev->zone_size + 68); + CU_ASSERT_EQUAL(zone_info[0].capacity, chunk_info->cnlb); + + /* Verify offline zone at 2nd chunk */ + bdev_io->u.zone_mgmt.zone_id = bdev->zone_size * geometry.num_pu * geometry.num_grp; + bdev_io->u.zone_mgmt.num_zones = 1; + bdev_io->u.zone_mgmt.buf = &zone_info; + chunk_info = get_chunk_info(ctrlr, generate_chunk_offset(&geometry, 1, 0, 0)); + set_chunk_state(chunk_info, CHUNK_STATE_OFFLINE); + chunk_info->wp = chunk_info->slba; + + rc = bdev_ocssd_get_zone_info(NULL, bdev_io); + CU_ASSERT_EQUAL(rc, 0); + + CU_ASSERT_EQUAL(zone_info[0].state, SPDK_BDEV_ZONE_STATE_OFFLINE); + CU_ASSERT_EQUAL(zone_info[0].zone_id, bdev_io->u.zone_mgmt.zone_id); + CU_ASSERT_EQUAL(zone_info[0].write_pointer, bdev_io->u.zone_mgmt.zone_id); + + /* Verify multiple zones at a time */ + bdev_io->u.zone_mgmt.zone_id = 0; + bdev_io->u.zone_mgmt.num_zones = MAX_ZONE_INFO_COUNT; + bdev_io->u.zone_mgmt.buf = &zone_info; + + for (offset = 0; offset < MAX_ZONE_INFO_COUNT; ++offset) { + chunk_offset = generate_chunk_offset(&geometry, + (offset / (geometry.num_grp * geometry.num_pu)) % geometry.num_chk, + offset % geometry.num_pu, + (offset / geometry.num_pu) % geometry.num_grp); + + + chunk_info = get_chunk_info(ctrlr, chunk_offset); + set_chunk_state(chunk_info, CHUNK_STATE_OPEN); + chunk_info->wp = chunk_info->slba + 68; + chunk_info->ct.size_deviate = 0; + } + + rc = bdev_ocssd_get_zone_info(NULL, bdev_io); + CU_ASSERT_EQUAL(rc, 0); + + for (offset = 0; offset < MAX_ZONE_INFO_COUNT; ++offset) { + CU_ASSERT_EQUAL(zone_info[offset].state, SPDK_BDEV_ZONE_STATE_OPEN); + CU_ASSERT_EQUAL(zone_info[offset].zone_id, bdev->zone_size * offset); + CU_ASSERT_EQUAL(zone_info[offset].write_pointer, bdev->zone_size * offset + 68); + CU_ASSERT_EQUAL(zone_info[offset].capacity, geometry.clba); + } + + /* Verify misaligned start zone LBA */ + bdev_io->u.zone_mgmt.zone_id = 1; + bdev_io->u.zone_mgmt.num_zones = MAX_ZONE_INFO_COUNT; + bdev_io->u.zone_mgmt.buf = &zone_info; + + rc = bdev_ocssd_get_zone_info(NULL, bdev_io); + CU_ASSERT_EQUAL(rc, -EINVAL); + + /* Verify correct NVMe error forwarding */ + bdev_io->u.zone_mgmt.zone_id = 0; + bdev_io->u.zone_mgmt.num_zones = MAX_ZONE_INFO_COUNT; + bdev_io->u.zone_mgmt.buf = &zone_info; + chunk_info = get_chunk_info(ctrlr, 0); + set_chunk_state(chunk_info, CHUNK_STATE_FREE); + + rc = bdev_ocssd_get_zone_info(NULL, bdev_io); + CU_ASSERT_EQUAL(rc, 0); + g_chunk_info_cpl = (struct spdk_nvme_cpl) { + .status = { + .sct = SPDK_NVME_SCT_GENERIC, + .sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR + } + }; + g_zone_info_status = false; + + g_chunk_info_cpl = (struct spdk_nvme_cpl) {}; + g_zone_info_status = true; + + delete_nvme_bdev_controller(nvme_bdev_ctrlr); + + free(bdev_io); + free_controller(ctrlr); +} + +int +main(int argc, const char **argv) +{ + CU_pSuite suite = NULL; + unsigned int num_failures; + + CU_set_error_action(CUEA_ABORT); + CU_initialize_registry(); + + suite = CU_add_suite("ocssd", NULL, NULL); + + CU_ADD_TEST(suite, test_create_controller); + CU_ADD_TEST(suite, test_device_geometry); + CU_ADD_TEST(suite, test_lba_translation); + CU_ADD_TEST(suite, test_parallel_unit_range); + CU_ADD_TEST(suite, test_get_zone_info); + + g_thread = spdk_thread_create("test", NULL); + spdk_set_thread(g_thread); + + CU_basic_set_mode(CU_BRM_VERBOSE); + CU_basic_run_tests(); + num_failures = CU_get_number_of_failures(); + + spdk_thread_exit(g_thread); + while (!spdk_thread_is_exited(g_thread)) { + spdk_thread_poll(g_thread, 0, 0); + } + spdk_thread_destroy(g_thread); + + CU_cleanup_registry(); + + return num_failures; +} diff --git a/src/spdk/test/unit/lib/bdev/bdev_zone.c/.gitignore b/src/spdk/test/unit/lib/bdev/bdev_zone.c/.gitignore new file mode 100644 index 000000000..99af16132 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/bdev_zone.c/.gitignore @@ -0,0 +1 @@ +bdev_zone_ut diff --git a/src/spdk/test/unit/lib/bdev/bdev_zone.c/Makefile b/src/spdk/test/unit/lib/bdev/bdev_zone.c/Makefile new file mode 100644 index 000000000..52dc65f23 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/bdev_zone.c/Makefile @@ -0,0 +1,38 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..) + +TEST_FILE = bdev_zone_ut.c + +include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk diff --git a/src/spdk/test/unit/lib/bdev/bdev_zone.c/bdev_zone_ut.c b/src/spdk/test/unit/lib/bdev/bdev_zone.c/bdev_zone_ut.c new file mode 100644 index 000000000..589e105b9 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/bdev_zone.c/bdev_zone_ut.c @@ -0,0 +1,429 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE AiRE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk/stdinc.h" +#include "spdk_cunit.h" +#include "spdk/env.h" +#include "spdk_internal/mock.h" + +#include "bdev/bdev_zone.c" + +DEFINE_STUB_V(bdev_io_init, (struct spdk_bdev_io *bdev_io, + struct spdk_bdev *bdev, void *cb_arg, + spdk_bdev_io_completion_cb cb)); + +DEFINE_STUB_V(bdev_io_submit, (struct spdk_bdev_io *bdev_io)); + +/* Construct zone_io_operation structure */ +struct zone_io_operation { + struct spdk_bdev_desc *desc; + struct spdk_io_channel *ch; + struct iovec iov; + union { + struct { + uint64_t zone_id; + size_t num_zones; + enum spdk_bdev_zone_action zone_action; + void *buf; + struct spdk_bdev_zone_info *info_; + } zone_mgmt; + struct { + void *md_buf; + struct iovec *iovs; + int iovcnt; + uint64_t num_blocks; + uint64_t offset_blocks; + uint64_t start_lba; + } bdev; + }; + spdk_bdev_io_completion_cb cb; + void *cb_arg; + enum spdk_bdev_io_type io_type; +}; + +/* Global variables */ +struct zone_io_operation *g_zone_op = NULL; +static struct spdk_bdev *g_bdev = NULL; +static struct spdk_bdev_io *g_bdev_io = NULL; +static struct spdk_bdev_zone_info g_zone_info = {0}; +static enum spdk_bdev_zone_action g_zone_action = SPDK_BDEV_ZONE_OPEN; +static enum spdk_bdev_zone_action g_unexpected_zone_action = SPDK_BDEV_ZONE_CLOSE; +static enum spdk_bdev_io_type g_io_type = SPDK_BDEV_IO_TYPE_GET_ZONE_INFO; + +static uint64_t g_expected_zone_id; +static uint64_t g_expected_num_zones; +static uint64_t g_unexpected_zone_id; +static uint64_t g_unexpected_num_zones; +static uint64_t g_num_blocks; +static uint64_t g_unexpected_num_blocks; +static uint64_t g_start_lba; +static uint64_t g_unexpected_start_lba; +static uint64_t g_bdev_blocklen; +static uint64_t g_unexpected_bdev_blocklen; +static bool g_append_with_md; +static int g_unexpected_iovcnt; +static void *g_md_buf; +static void *g_unexpetced_md_buf; +static void *g_buf; +static void *g_unexpected_buf; + +static int +test_setup(void) +{ + /* Initiate expected and unexpected value here */ + g_expected_zone_id = 0x1000; + g_expected_num_zones = 1024; + g_unexpected_zone_id = 0xFFFF; + g_unexpected_num_zones = 0; + g_num_blocks = 4096 * 1024; + g_unexpected_num_blocks = 0; + g_start_lba = 4096; + g_unexpected_start_lba = 0; + g_bdev_blocklen = 4096; + g_unexpected_bdev_blocklen = 0; + g_append_with_md = false; + g_unexpected_iovcnt = 1000; + g_md_buf = (void *)0xEFDCFEDE; + g_unexpetced_md_buf = (void *)0xFECDEFDC; + g_buf = (void *)0xFEEDBEEF; + g_unexpected_buf = (void *)0xDEADBEEF; + + return 0; +} + +static int +test_cleanup(void) +{ + return 0; +} + +static void +start_operation(void) +{ + g_zone_op = calloc(1, sizeof(struct zone_io_operation)); + SPDK_CU_ASSERT_FATAL(g_zone_op != NULL); + + switch (g_io_type) { + case SPDK_BDEV_IO_TYPE_ZONE_APPEND: + g_zone_op->bdev.iovs = &g_zone_op->iov; + g_zone_op->bdev.iovs[0].iov_base = g_unexpected_buf; + g_zone_op->bdev.iovs[0].iov_len = g_unexpected_num_blocks * g_unexpected_bdev_blocklen; + g_zone_op->bdev.iovcnt = g_unexpected_iovcnt; + g_zone_op->bdev.md_buf = g_unexpetced_md_buf; + g_zone_op->bdev.num_blocks = g_unexpected_num_blocks; + g_zone_op->bdev.offset_blocks = g_unexpected_zone_id; + g_zone_op->bdev.start_lba = g_unexpected_start_lba; + break; + default: + g_zone_op->bdev.iovcnt = 0; + g_zone_op->zone_mgmt.zone_id = g_unexpected_zone_id; + g_zone_op->zone_mgmt.num_zones = g_unexpected_num_zones; + g_zone_op->zone_mgmt.zone_action = g_unexpected_zone_action; + g_zone_op->zone_mgmt.buf = g_unexpected_buf; + break; + } +} + +static void +stop_operation(void) +{ + free(g_bdev_io); + free(g_bdev); + free(g_zone_op); + g_bdev_io = NULL; + g_bdev = NULL; + g_zone_op = NULL; +} + +struct spdk_bdev_io * +bdev_channel_get_io(struct spdk_bdev_channel *channel) +{ + struct spdk_bdev_io *bdev_io; + + bdev_io = calloc(1, sizeof(struct spdk_bdev_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + + bdev_io->internal.ch = channel; + bdev_io->type = g_io_type; + + CU_ASSERT(g_zone_op != NULL); + + switch (g_io_type) { + case SPDK_BDEV_IO_TYPE_GET_ZONE_INFO: + case SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT: + bdev_io->u.bdev.iovcnt = 0; + bdev_io->u.zone_mgmt.zone_id = g_zone_op->zone_mgmt.zone_id; + bdev_io->u.zone_mgmt.num_zones = g_zone_op->zone_mgmt.num_zones; + bdev_io->u.zone_mgmt.zone_action = g_zone_op->zone_mgmt.zone_action; + bdev_io->u.zone_mgmt.buf = g_zone_op->zone_mgmt.buf; + break; + case SPDK_BDEV_IO_TYPE_ZONE_APPEND: + bdev_io->u.bdev.iovs = g_zone_op->bdev.iovs; + bdev_io->u.bdev.iovs[0].iov_base = g_zone_op->bdev.iovs[0].iov_base; + bdev_io->u.bdev.iovs[0].iov_len = g_zone_op->bdev.iovs[0].iov_len; + bdev_io->u.bdev.iovcnt = g_zone_op->bdev.iovcnt; + bdev_io->u.bdev.md_buf = g_zone_op->bdev.md_buf; + bdev_io->u.bdev.num_blocks = g_zone_op->bdev.num_blocks; + bdev_io->u.bdev.offset_blocks = g_zone_op->bdev.offset_blocks; + break; + default: + CU_ASSERT(0); + } + + g_bdev_io = bdev_io; + + return bdev_io; +} + +int +spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb, + void *remove_ctx, struct spdk_bdev_desc **_desc) +{ + *_desc = (void *)0x1; + return 0; +} + +struct spdk_io_channel * +spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc) +{ + return (struct spdk_io_channel *)0x1; +} + +void +spdk_put_io_channel(struct spdk_io_channel *ch) +{ + CU_ASSERT(ch == (void *)1); +} + +struct spdk_bdev * +spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) +{ + struct spdk_bdev *bdev; + + bdev = calloc(1, sizeof(struct spdk_bdev)); + SPDK_CU_ASSERT_FATAL(bdev != NULL); + + if (g_io_type == SPDK_BDEV_IO_TYPE_ZONE_APPEND) { + bdev->blocklen = g_bdev_blocklen; + } + + g_bdev = bdev; + + return bdev; +} + +static void +test_get_zone_size(void) +{ + struct spdk_bdev bdev = {}; + uint64_t get_zone_size; + + bdev.zone_size = 1024 * 4096; + + get_zone_size = spdk_bdev_get_zone_size(&bdev); + CU_ASSERT(get_zone_size == 1024 * 4096); +} + +static void +test_get_max_open_zones(void) +{ + struct spdk_bdev bdev = {}; + uint32_t get_max_open_zones; + + bdev.max_open_zones = 8192; + + get_max_open_zones = spdk_bdev_get_max_open_zones(&bdev); + CU_ASSERT(get_max_open_zones == 8192); +} + +static void +test_get_optimal_open_zones(void) +{ + struct spdk_bdev bdev = {}; + uint32_t get_optimal_open_zones; + + bdev.optimal_open_zones = 4096; + + get_optimal_open_zones = spdk_bdev_get_optimal_open_zones(&bdev); + CU_ASSERT(get_optimal_open_zones == 4096); +} + +static void +test_bdev_io_get_append_location(void) +{ + struct spdk_bdev_io bdev_io = {}; + uint64_t get_offset_blocks; + + bdev_io.u.bdev.offset_blocks = 1024 * 10; + + get_offset_blocks = spdk_bdev_io_get_append_location(&bdev_io); + CU_ASSERT(get_offset_blocks == 1024 * 10); +} + +static void +test_zone_get_operation(void) +{ + test_get_zone_size(); + test_get_max_open_zones(); + test_get_optimal_open_zones(); +} + +#define DECLARE_VIRTUAL_BDEV_START() \ + struct spdk_bdev bdev; \ + struct spdk_io_channel *ch; \ + struct spdk_bdev_desc *desc = NULL; \ + int rc; \ + memset(&bdev, 0, sizeof(bdev)); \ + bdev.name = "bdev_zone_ut"; \ + rc = spdk_bdev_open(&bdev, true, NULL, NULL, &desc); \ + CU_ASSERT(rc == 0); \ + SPDK_CU_ASSERT_FATAL(desc != NULL); \ + ch = spdk_bdev_get_io_channel(desc); \ + CU_ASSERT(ch != NULL);\ + +static void +test_bdev_zone_get_info(void) +{ + DECLARE_VIRTUAL_BDEV_START(); + + g_zone_info.zone_id = g_expected_zone_id; + g_io_type = SPDK_BDEV_IO_TYPE_GET_ZONE_INFO; + + start_operation(); + + rc = spdk_bdev_get_zone_info(desc, ch, g_expected_zone_id, g_expected_num_zones, &g_zone_info, NULL, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->type == SPDK_BDEV_IO_TYPE_GET_ZONE_INFO); + CU_ASSERT(g_bdev_io->u.zone_mgmt.zone_id == g_expected_zone_id); + CU_ASSERT(g_bdev_io->u.zone_mgmt.num_zones == g_expected_num_zones); + CU_ASSERT(g_bdev_io->u.zone_mgmt.buf == &g_zone_info); + + stop_operation(); +} + +static void +test_bdev_zone_management(void) +{ + DECLARE_VIRTUAL_BDEV_START(); + + g_zone_info.zone_id = g_expected_zone_id; + g_io_type = SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT; + + start_operation(); + + rc = spdk_bdev_zone_management(desc, ch, g_expected_zone_id, g_zone_action, NULL, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->type == SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT); + CU_ASSERT(g_bdev_io->u.zone_mgmt.zone_id == g_expected_zone_id); + CU_ASSERT(g_bdev_io->u.zone_mgmt.zone_action == g_zone_action); + CU_ASSERT(g_bdev_io->u.zone_mgmt.num_zones == 1); + + stop_operation(); +} + +static void +test_bdev_zone_append(void) +{ + DECLARE_VIRTUAL_BDEV_START(); + + g_io_type = SPDK_BDEV_IO_TYPE_ZONE_APPEND; + g_append_with_md = false; + + start_operation(); + + rc = spdk_bdev_zone_append(desc, ch, g_buf, g_start_lba, g_num_blocks, NULL, NULL); + + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->internal.desc == desc); + CU_ASSERT(g_bdev_io->type == SPDK_BDEV_IO_TYPE_ZONE_APPEND); + CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == g_buf); + CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_len == g_num_blocks * g_bdev_blocklen); + CU_ASSERT(g_bdev_io->u.bdev.iovcnt == 1); + CU_ASSERT(g_bdev_io->u.bdev.md_buf == NULL); + CU_ASSERT(g_bdev_io->u.bdev.num_blocks == g_num_blocks); + CU_ASSERT(g_bdev_io->u.bdev.offset_blocks == g_expected_zone_id); + + stop_operation(); +} + +static void +test_bdev_zone_append_with_md(void) +{ + DECLARE_VIRTUAL_BDEV_START(); + + g_io_type = SPDK_BDEV_IO_TYPE_ZONE_APPEND; + g_append_with_md = true; + + start_operation(); + + rc = spdk_bdev_zone_append_with_md(desc, ch, g_buf, g_md_buf, g_start_lba, g_num_blocks, NULL, + NULL); + + CU_ASSERT(rc == 0); + CU_ASSERT(g_bdev_io->internal.desc == desc); + CU_ASSERT(g_bdev_io->type == SPDK_BDEV_IO_TYPE_ZONE_APPEND); + CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == g_buf); + CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_len == g_num_blocks * g_bdev_blocklen); + CU_ASSERT(g_bdev_io->u.bdev.iovcnt == 1); + CU_ASSERT(g_bdev_io->u.bdev.md_buf == g_md_buf); + CU_ASSERT(g_bdev_io->u.bdev.num_blocks == g_num_blocks); + CU_ASSERT(g_bdev_io->u.bdev.offset_blocks == g_expected_zone_id); + + stop_operation(); +} + +int +main(int argc, char **argv) +{ + CU_pSuite suite = NULL; + unsigned int num_failures; + + CU_set_error_action(CUEA_ABORT); + CU_initialize_registry(); + + suite = CU_add_suite("zone", test_setup, test_cleanup); + CU_ADD_TEST(suite, test_zone_get_operation); + CU_ADD_TEST(suite, test_bdev_zone_get_info); + CU_ADD_TEST(suite, test_bdev_zone_management); + CU_ADD_TEST(suite, test_bdev_zone_append); + CU_ADD_TEST(suite, test_bdev_zone_append_with_md); + CU_ADD_TEST(suite, test_bdev_io_get_append_location); + + CU_basic_set_mode(CU_BRM_VERBOSE); + CU_basic_run_tests(); + num_failures = CU_get_number_of_failures(); + CU_cleanup_registry(); + return num_failures; +} diff --git a/src/spdk/test/unit/lib/bdev/compress.c/.gitignore b/src/spdk/test/unit/lib/bdev/compress.c/.gitignore new file mode 100644 index 000000000..bac80ced6 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/compress.c/.gitignore @@ -0,0 +1 @@ +compress_ut diff --git a/src/spdk/test/unit/lib/bdev/compress.c/Makefile b/src/spdk/test/unit/lib/bdev/compress.c/Makefile new file mode 100644 index 000000000..6f33eef39 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/compress.c/Makefile @@ -0,0 +1,39 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..) + +TEST_FILE = compress_ut.c +CFLAGS += $(ENV_CFLAGS) + +include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk diff --git a/src/spdk/test/unit/lib/bdev/compress.c/compress_ut.c b/src/spdk/test/unit/lib/bdev/compress.c/compress_ut.c new file mode 100644 index 000000000..53c14310c --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/compress.c/compress_ut.c @@ -0,0 +1,1140 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk_cunit.h" +/* We have our own mock for this */ +#define UNIT_TEST_NO_VTOPHYS +#include "common/lib/test_env.c" +#include "spdk_internal/mock.h" +#include "unit/lib/json_mock.c" +#include "spdk/reduce.h" + +#include <rte_compressdev.h> + +/* There will be one if the data perfectly matches the chunk size, + * or there could be an offset into the data and a remainder after + * the data or both for a max of 3. + */ +#define UT_MBUFS_PER_OP 3 +/* For testing the crossing of a huge page boundary on address translation, + * we'll have an extra one but we only test on the source side. + */ +#define UT_MBUFS_PER_OP_BOUND_TEST 4 + +struct spdk_bdev_io *g_bdev_io; +struct spdk_io_channel *g_io_ch; +struct rte_comp_op g_comp_op[2]; +struct vbdev_compress g_comp_bdev; +struct comp_device_qp g_device_qp; +struct compress_dev g_device; +struct rte_compressdev_capabilities g_cdev_cap; +static struct rte_mbuf *g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST]; +static struct rte_mbuf *g_dst_mbufs[UT_MBUFS_PER_OP]; +static struct rte_mbuf g_expected_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST]; +static struct rte_mbuf g_expected_dst_mbufs[UT_MBUFS_PER_OP]; +struct comp_bdev_io *g_io_ctx; +struct comp_io_channel *g_comp_ch; + +/* Those functions are defined as static inline in DPDK, so we can't + * mock them straight away. We use defines to redirect them into + * our custom functions. + */ + +static void mock_rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, + uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo); +#define rte_pktmbuf_attach_extbuf mock_rte_pktmbuf_attach_extbuf +static void mock_rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, + uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo) +{ + assert(m != NULL); + m->buf_addr = buf_addr; + m->buf_iova = buf_iova; + m->buf_len = buf_len; + m->data_len = m->pkt_len = 0; +} + +static char *mock_rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len); +#define rte_pktmbuf_append mock_rte_pktmbuf_append +static char *mock_rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len) +{ + m->pkt_len = m->pkt_len + len; + return NULL; +} + +static inline int mock_rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail); +#define rte_pktmbuf_chain mock_rte_pktmbuf_chain +static inline int mock_rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail) +{ + struct rte_mbuf *cur_tail; + + cur_tail = rte_pktmbuf_lastseg(head); + cur_tail->next = tail; + + return 0; +} + +uint16_t ut_max_nb_queue_pairs = 0; +void __rte_experimental mock_rte_compressdev_info_get(uint8_t dev_id, + struct rte_compressdev_info *dev_info); +#define rte_compressdev_info_get mock_rte_compressdev_info_get +void __rte_experimental +mock_rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info) +{ + dev_info->max_nb_queue_pairs = ut_max_nb_queue_pairs; + dev_info->capabilities = &g_cdev_cap; + dev_info->driver_name = "compress_isal"; +} + +int ut_rte_compressdev_configure = 0; +int __rte_experimental mock_rte_compressdev_configure(uint8_t dev_id, + struct rte_compressdev_config *config); +#define rte_compressdev_configure mock_rte_compressdev_configure +int __rte_experimental +mock_rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config) +{ + return ut_rte_compressdev_configure; +} + +int ut_rte_compressdev_queue_pair_setup = 0; +int __rte_experimental mock_rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, + uint32_t max_inflight_ops, int socket_id); +#define rte_compressdev_queue_pair_setup mock_rte_compressdev_queue_pair_setup +int __rte_experimental +mock_rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, + uint32_t max_inflight_ops, int socket_id) +{ + return ut_rte_compressdev_queue_pair_setup; +} + +int ut_rte_compressdev_start = 0; +int __rte_experimental mock_rte_compressdev_start(uint8_t dev_id); +#define rte_compressdev_start mock_rte_compressdev_start +int __rte_experimental +mock_rte_compressdev_start(uint8_t dev_id) +{ + return ut_rte_compressdev_start; +} + +int ut_rte_compressdev_private_xform_create = 0; +int __rte_experimental mock_rte_compressdev_private_xform_create(uint8_t dev_id, + const struct rte_comp_xform *xform, void **private_xform); +#define rte_compressdev_private_xform_create mock_rte_compressdev_private_xform_create +int __rte_experimental +mock_rte_compressdev_private_xform_create(uint8_t dev_id, + const struct rte_comp_xform *xform, void **private_xform) +{ + return ut_rte_compressdev_private_xform_create; +} + +uint8_t ut_rte_compressdev_count = 0; +uint8_t __rte_experimental mock_rte_compressdev_count(void); +#define rte_compressdev_count mock_rte_compressdev_count +uint8_t __rte_experimental +mock_rte_compressdev_count(void) +{ + return ut_rte_compressdev_count; +} + +struct rte_mempool *ut_rte_comp_op_pool_create = NULL; +struct rte_mempool *__rte_experimental mock_rte_comp_op_pool_create(const char *name, + unsigned int nb_elts, unsigned int cache_size, uint16_t user_size, + int socket_id); +#define rte_comp_op_pool_create mock_rte_comp_op_pool_create +struct rte_mempool *__rte_experimental +mock_rte_comp_op_pool_create(const char *name, unsigned int nb_elts, + unsigned int cache_size, uint16_t user_size, int socket_id) +{ + return ut_rte_comp_op_pool_create; +} + +void mock_rte_pktmbuf_free(struct rte_mbuf *m); +#define rte_pktmbuf_free mock_rte_pktmbuf_free +void mock_rte_pktmbuf_free(struct rte_mbuf *m) +{ +} + +static bool ut_boundary_alloc = false; +static int ut_rte_pktmbuf_alloc_bulk = 0; +int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, + unsigned count); +#define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk +int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, + unsigned count) +{ + int i; + + /* This mocked function only supports the alloc of up to 3 src and 3 dst. */ + ut_rte_pktmbuf_alloc_bulk += count; + + if (ut_rte_pktmbuf_alloc_bulk == 1) { + /* allocation of an extra mbuf for boundary cross test */ + ut_boundary_alloc = true; + g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1]->next = NULL; + *mbufs = g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1]; + ut_rte_pktmbuf_alloc_bulk = 0; + } else if (ut_rte_pktmbuf_alloc_bulk == UT_MBUFS_PER_OP) { + /* first test allocation, src mbufs */ + for (i = 0; i < UT_MBUFS_PER_OP; i++) { + g_src_mbufs[i]->next = NULL; + *mbufs++ = g_src_mbufs[i]; + } + } else if (ut_rte_pktmbuf_alloc_bulk == UT_MBUFS_PER_OP * 2) { + /* second test allocation, dst mbufs */ + for (i = 0; i < UT_MBUFS_PER_OP; i++) { + g_dst_mbufs[i]->next = NULL; + *mbufs++ = g_dst_mbufs[i]; + } + ut_rte_pktmbuf_alloc_bulk = 0; + } else { + return -1; + } + return 0; +} + +struct rte_mempool * +rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, + uint16_t priv_size, uint16_t data_room_size, int socket_id) +{ + struct spdk_mempool *tmp; + + tmp = spdk_mempool_create("mbuf_mp", 1024, sizeof(struct rte_mbuf), + SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, + SPDK_ENV_SOCKET_ID_ANY); + + return (struct rte_mempool *)tmp; +} + +void +rte_mempool_free(struct rte_mempool *mp) +{ + if (mp) { + spdk_mempool_free((struct spdk_mempool *)mp); + } +} + +static int ut_spdk_reduce_vol_op_complete_err = 0; +void +spdk_reduce_vol_writev(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt, + uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn, + void *cb_arg) +{ + cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err); +} + +void +spdk_reduce_vol_readv(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt, + uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn, + void *cb_arg) +{ + cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err); +} + +#include "bdev/compress/vbdev_compress.c" + +/* SPDK stubs */ +DEFINE_STUB(spdk_bdev_get_aliases, const struct spdk_bdev_aliases_list *, + (const struct spdk_bdev *bdev), NULL); +DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); +DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io)); +DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev, + enum spdk_bdev_io_type io_type), 0); +DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev)); +DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); +DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0); +DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0); +DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, + void *cb_arg)); +DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write, + spdk_bdev_remove_cb_t remove_cb, + void *remove_ctx, struct spdk_bdev_desc **_desc), 0); +DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, + struct spdk_bdev_module *module), 0); +DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module)); +DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *bdev), 0); +DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL); +DEFINE_STUB(spdk_bdev_io_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_io *bdev_io), + 0); +DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch, + struct spdk_bdev_io_wait_entry *entry), 0); +DEFINE_STUB_V(spdk_reduce_vol_unload, (struct spdk_reduce_vol *vol, + spdk_reduce_vol_op_complete cb_fn, void *cb_arg)); +DEFINE_STUB_V(spdk_reduce_vol_load, (struct spdk_reduce_backing_dev *backing_dev, + spdk_reduce_vol_op_with_handle_complete cb_fn, void *cb_arg)); +DEFINE_STUB(spdk_reduce_vol_get_params, const struct spdk_reduce_vol_params *, + (struct spdk_reduce_vol *vol), NULL); + +/* DPDK stubs */ +DEFINE_STUB(rte_socket_id, unsigned, (void), 0); +DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0); +DEFINE_STUB_V(rte_comp_op_free, (struct rte_comp_op *op)); +DEFINE_STUB(rte_comp_op_alloc, struct rte_comp_op *, (struct rte_mempool *mempool), NULL); + +int g_small_size_counter = 0; +int g_small_size_modify = 0; +uint64_t g_small_size = 0; +uint64_t +spdk_vtophys(void *buf, uint64_t *size) +{ + g_small_size_counter++; + if (g_small_size_counter == g_small_size_modify) { + *size = g_small_size; + g_small_size_counter = 0; + g_small_size_modify = 0; + } + return (uint64_t)buf; +} + +void +spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) +{ + cb(g_io_ch, g_bdev_io, true); +} + +/* Mock these functions to call the callback and then return the value we require */ +int ut_spdk_bdev_readv_blocks = 0; +int +spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + struct iovec *iov, int iovcnt, + uint64_t offset_blocks, uint64_t num_blocks, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg); + return ut_spdk_bdev_readv_blocks; +} + +int ut_spdk_bdev_writev_blocks = 0; +bool ut_spdk_bdev_writev_blocks_mocked = false; +int +spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + struct iovec *iov, int iovcnt, + uint64_t offset_blocks, uint64_t num_blocks, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg); + return ut_spdk_bdev_writev_blocks; +} + +int ut_spdk_bdev_unmap_blocks = 0; +bool ut_spdk_bdev_unmap_blocks_mocked = false; +int +spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + uint64_t offset_blocks, uint64_t num_blocks, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg); + return ut_spdk_bdev_unmap_blocks; +} + +int ut_spdk_bdev_flush_blocks = 0; +bool ut_spdk_bdev_flush_blocks_mocked = false; +int +spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb, + void *cb_arg) +{ + cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg); + return ut_spdk_bdev_flush_blocks; +} + +int ut_spdk_bdev_reset = 0; +bool ut_spdk_bdev_reset_mocked = false; +int +spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg); + return ut_spdk_bdev_reset; +} + +bool g_completion_called = false; +void +spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) +{ + bdev_io->internal.status = status; + g_completion_called = true; +} + +static uint16_t ut_rte_compressdev_dequeue_burst = 0; +uint16_t +rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_comp_op **ops, + uint16_t nb_op) +{ + if (ut_rte_compressdev_dequeue_burst == 0) { + return 0; + } + + ops[0] = &g_comp_op[0]; + ops[1] = &g_comp_op[1]; + + return ut_rte_compressdev_dequeue_burst; +} + +static int ut_compress_done[2]; +/* done_count and done_idx together control which expected assertion + * value to use when dequeuing 2 operations. + */ +static uint16_t done_count = 1; +static uint16_t done_idx = 0; +static void +_compress_done(void *_req, int reduce_errno) +{ + if (done_count == 1) { + CU_ASSERT(reduce_errno == ut_compress_done[0]); + } else if (done_count == 2) { + CU_ASSERT(reduce_errno == ut_compress_done[done_idx++]); + } +} + +static void +_get_mbuf_array(struct rte_mbuf *mbuf_array[UT_MBUFS_PER_OP_BOUND_TEST], + struct rte_mbuf *mbuf_head, int mbuf_count, bool null_final) +{ + int i; + + for (i = 0; i < mbuf_count; i++) { + mbuf_array[i] = mbuf_head; + if (mbuf_head) { + mbuf_head = mbuf_head->next; + } + } + if (null_final) { + mbuf_array[i - 1] = NULL; + } +} + +#define FAKE_ENQUEUE_SUCCESS 255 +#define FAKE_ENQUEUE_ERROR 128 +#define FAKE_ENQUEUE_BUSY 64 +static uint16_t ut_enqueue_value = FAKE_ENQUEUE_SUCCESS; +static struct rte_comp_op ut_expected_op; +uint16_t +rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_comp_op **ops, + uint16_t nb_ops) +{ + struct rte_comp_op *op = *ops; + struct rte_mbuf *op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST]; + struct rte_mbuf *exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST]; + int i, num_src_mbufs = UT_MBUFS_PER_OP; + + switch (ut_enqueue_value) { + case FAKE_ENQUEUE_BUSY: + op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED; + return 0; + break; + case FAKE_ENQUEUE_SUCCESS: + op->status = RTE_COMP_OP_STATUS_SUCCESS; + return 1; + break; + case FAKE_ENQUEUE_ERROR: + op->status = RTE_COMP_OP_STATUS_ERROR; + return 0; + break; + default: + break; + } + + /* by design the compress module will never send more than 1 op at a time */ + CU_ASSERT(op->private_xform == ut_expected_op.private_xform); + + /* setup our local pointers to the chained mbufs, those pointed to in the + * operation struct and the expected values. + */ + _get_mbuf_array(op_mbuf, op->m_src, SPDK_COUNTOF(op_mbuf), true); + _get_mbuf_array(exp_mbuf, ut_expected_op.m_src, SPDK_COUNTOF(exp_mbuf), true); + + if (ut_boundary_alloc == true) { + /* if we crossed a boundary, we need to check the 4th src mbuf and + * reset the global that is used to identify whether we crossed + * or not + */ + num_src_mbufs = UT_MBUFS_PER_OP_BOUND_TEST; + exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] = ut_expected_op.m_src->next->next->next; + op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] = op->m_src->next->next->next; + ut_boundary_alloc = false; + } + + + for (i = 0; i < num_src_mbufs; i++) { + CU_ASSERT(op_mbuf[i]->buf_addr == exp_mbuf[i]->buf_addr); + CU_ASSERT(op_mbuf[i]->buf_iova == exp_mbuf[i]->buf_iova); + CU_ASSERT(op_mbuf[i]->buf_len == exp_mbuf[i]->buf_len); + CU_ASSERT(op_mbuf[i]->pkt_len == exp_mbuf[i]->pkt_len); + } + + /* if only 3 mbufs were used in the test, the 4th should be zeroed */ + if (num_src_mbufs == UT_MBUFS_PER_OP) { + CU_ASSERT(op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] == NULL); + CU_ASSERT(exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] == NULL); + } + + CU_ASSERT(op->m_src->userdata == ut_expected_op.m_src->userdata); + CU_ASSERT(op->src.offset == ut_expected_op.src.offset); + CU_ASSERT(op->src.length == ut_expected_op.src.length); + + /* check dst mbuf values */ + _get_mbuf_array(op_mbuf, op->m_dst, SPDK_COUNTOF(op_mbuf), true); + _get_mbuf_array(exp_mbuf, ut_expected_op.m_dst, SPDK_COUNTOF(exp_mbuf), true); + + for (i = 0; i < UT_MBUFS_PER_OP; i++) { + CU_ASSERT(op_mbuf[i]->buf_addr == exp_mbuf[i]->buf_addr); + CU_ASSERT(op_mbuf[i]->buf_iova == exp_mbuf[i]->buf_iova); + CU_ASSERT(op_mbuf[i]->buf_len == exp_mbuf[i]->buf_len); + CU_ASSERT(op_mbuf[i]->pkt_len == exp_mbuf[i]->pkt_len); + } + CU_ASSERT(op->dst.offset == ut_expected_op.dst.offset); + + return ut_enqueue_value; +} + +/* Global setup for all tests that share a bunch of preparation... */ +static int +test_setup(void) +{ + struct spdk_thread *thread; + int i; + + spdk_thread_lib_init(NULL, 0); + + thread = spdk_thread_create(NULL, NULL); + spdk_set_thread(thread); + + g_comp_bdev.reduce_thread = thread; + g_comp_bdev.backing_dev.unmap = _comp_reduce_unmap; + g_comp_bdev.backing_dev.readv = _comp_reduce_readv; + g_comp_bdev.backing_dev.writev = _comp_reduce_writev; + g_comp_bdev.backing_dev.compress = _comp_reduce_compress; + g_comp_bdev.backing_dev.decompress = _comp_reduce_decompress; + g_comp_bdev.backing_dev.blocklen = 512; + g_comp_bdev.backing_dev.blockcnt = 1024 * 16; + + g_comp_bdev.device_qp = &g_device_qp; + g_comp_bdev.device_qp->device = &g_device; + + TAILQ_INIT(&g_comp_bdev.queued_comp_ops); + + g_comp_xform = (struct rte_comp_xform) { + .type = RTE_COMP_COMPRESS, + .compress = { + .algo = RTE_COMP_ALGO_DEFLATE, + .deflate.huffman = RTE_COMP_HUFFMAN_DEFAULT, + .level = RTE_COMP_LEVEL_MAX, + .window_size = DEFAULT_WINDOW_SIZE, + .chksum = RTE_COMP_CHECKSUM_NONE, + .hash_algo = RTE_COMP_HASH_ALGO_NONE + } + }; + + g_decomp_xform = (struct rte_comp_xform) { + .type = RTE_COMP_DECOMPRESS, + .decompress = { + .algo = RTE_COMP_ALGO_DEFLATE, + .chksum = RTE_COMP_CHECKSUM_NONE, + .window_size = DEFAULT_WINDOW_SIZE, + .hash_algo = RTE_COMP_HASH_ALGO_NONE + } + }; + g_device.comp_xform = &g_comp_xform; + g_device.decomp_xform = &g_decomp_xform; + g_cdev_cap.comp_feature_flags = RTE_COMP_FF_SHAREABLE_PRIV_XFORM; + g_device.cdev_info.driver_name = "compress_isal"; + g_device.cdev_info.capabilities = &g_cdev_cap; + for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST; i++) { + g_src_mbufs[i] = calloc(1, sizeof(struct rte_mbuf)); + } + for (i = 0; i < UT_MBUFS_PER_OP; i++) { + g_dst_mbufs[i] = calloc(1, sizeof(struct rte_mbuf)); + } + + g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct comp_bdev_io)); + g_bdev_io->u.bdev.iovs = calloc(128, sizeof(struct iovec)); + g_bdev_io->bdev = &g_comp_bdev.comp_bdev; + g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct comp_io_channel)); + g_io_ch->thread = thread; + g_comp_ch = (struct comp_io_channel *)((uint8_t *)g_io_ch + sizeof(struct spdk_io_channel)); + g_io_ctx = (struct comp_bdev_io *)g_bdev_io->driver_ctx; + + g_io_ctx->comp_ch = g_comp_ch; + g_io_ctx->comp_bdev = &g_comp_bdev; + g_comp_bdev.device_qp = &g_device_qp; + + for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST - 1; i++) { + g_expected_src_mbufs[i].next = &g_expected_src_mbufs[i + 1]; + } + g_expected_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1].next = NULL; + + /* we only test w/4 mbufs on src side */ + for (i = 0; i < UT_MBUFS_PER_OP - 1; i++) { + g_expected_dst_mbufs[i].next = &g_expected_dst_mbufs[i + 1]; + } + g_expected_dst_mbufs[UT_MBUFS_PER_OP - 1].next = NULL; + + return 0; +} + +/* Global teardown for all tests */ +static int +test_cleanup(void) +{ + struct spdk_thread *thread; + int i; + + for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST; i++) { + free(g_src_mbufs[i]); + } + for (i = 0; i < UT_MBUFS_PER_OP; i++) { + free(g_dst_mbufs[i]); + } + free(g_bdev_io->u.bdev.iovs); + free(g_bdev_io); + free(g_io_ch); + + thread = spdk_get_thread(); + spdk_thread_exit(thread); + while (!spdk_thread_is_exited(thread)) { + spdk_thread_poll(thread, 0, 0); + } + spdk_thread_destroy(thread); + + spdk_thread_lib_fini(); + + return 0; +} + +static void +test_compress_operation(void) +{ + struct iovec src_iovs[3] = {}; + int src_iovcnt; + struct iovec dst_iovs[3] = {}; + int dst_iovcnt; + struct spdk_reduce_vol_cb_args cb_arg; + int rc, i; + struct vbdev_comp_op *op; + struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP]; + struct rte_mbuf *exp_dst_mbuf[UT_MBUFS_PER_OP]; + + src_iovcnt = dst_iovcnt = 3; + for (i = 0; i < dst_iovcnt; i++) { + src_iovs[i].iov_len = 0x1000; + dst_iovs[i].iov_len = 0x1000; + src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i; + dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i; + } + + /* test rte_comp_op_alloc failure */ + MOCK_SET(rte_comp_op_alloc, NULL); + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, + &dst_iovs[0], dst_iovcnt, true, &cb_arg); + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false); + while (!TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops)) { + op = TAILQ_FIRST(&g_comp_bdev.queued_comp_ops); + TAILQ_REMOVE(&g_comp_bdev.queued_comp_ops, op, link); + free(op); + } + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + CU_ASSERT(rc == 0); + MOCK_SET(rte_comp_op_alloc, &g_comp_op[0]); + + /* test mempool get failure */ + ut_rte_pktmbuf_alloc_bulk = -1; + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, + &dst_iovs[0], dst_iovcnt, true, &cb_arg); + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false); + while (!TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops)) { + op = TAILQ_FIRST(&g_comp_bdev.queued_comp_ops); + TAILQ_REMOVE(&g_comp_bdev.queued_comp_ops, op, link); + free(op); + } + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + CU_ASSERT(rc == 0); + ut_rte_pktmbuf_alloc_bulk = 0; + + /* test enqueue failure busy */ + ut_enqueue_value = FAKE_ENQUEUE_BUSY; + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, + &dst_iovs[0], dst_iovcnt, true, &cb_arg); + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false); + while (!TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops)) { + op = TAILQ_FIRST(&g_comp_bdev.queued_comp_ops); + TAILQ_REMOVE(&g_comp_bdev.queued_comp_ops, op, link); + free(op); + } + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + CU_ASSERT(rc == 0); + ut_enqueue_value = 1; + + /* test enqueue failure error */ + ut_enqueue_value = FAKE_ENQUEUE_ERROR; + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, + &dst_iovs[0], dst_iovcnt, true, &cb_arg); + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + CU_ASSERT(rc == -EINVAL); + ut_enqueue_value = FAKE_ENQUEUE_SUCCESS; + + /* test success with 3 vector iovec */ + ut_expected_op.private_xform = &g_decomp_xform; + ut_expected_op.src.offset = 0; + ut_expected_op.src.length = src_iovs[0].iov_len + src_iovs[1].iov_len + src_iovs[2].iov_len; + + /* setup the src expected values */ + _get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false); + ut_expected_op.m_src = exp_src_mbuf[0]; + + for (i = 0; i < UT_MBUFS_PER_OP; i++) { + exp_src_mbuf[i]->userdata = &cb_arg; + exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base; + exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len); + exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len; + exp_src_mbuf[i]->pkt_len = src_iovs[i].iov_len; + } + + /* setup the dst expected values */ + _get_mbuf_array(exp_dst_mbuf, &g_expected_dst_mbufs[0], SPDK_COUNTOF(exp_dst_mbuf), false); + ut_expected_op.dst.offset = 0; + ut_expected_op.m_dst = exp_dst_mbuf[0]; + + for (i = 0; i < UT_MBUFS_PER_OP; i++) { + exp_dst_mbuf[i]->buf_addr = dst_iovs[i].iov_base; + exp_dst_mbuf[i]->buf_iova = spdk_vtophys(dst_iovs[i].iov_base, &dst_iovs[i].iov_len); + exp_dst_mbuf[i]->buf_len = dst_iovs[i].iov_len; + exp_dst_mbuf[i]->pkt_len = dst_iovs[i].iov_len; + } + + rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, + &dst_iovs[0], dst_iovcnt, false, &cb_arg); + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + CU_ASSERT(rc == 0); + +} + +static void +test_compress_operation_cross_boundary(void) +{ + struct iovec src_iovs[3] = {}; + int src_iovcnt; + struct iovec dst_iovs[3] = {}; + int dst_iovcnt; + struct spdk_reduce_vol_cb_args cb_arg; + int rc, i; + struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP_BOUND_TEST]; + struct rte_mbuf *exp_dst_mbuf[UT_MBUFS_PER_OP_BOUND_TEST]; + + /* Setup the same basic 3 IOV test as used in the simple success case + * but then we'll start testing a vtophy boundary crossing at each + * position. + */ + src_iovcnt = dst_iovcnt = 3; + for (i = 0; i < dst_iovcnt; i++) { + src_iovs[i].iov_len = 0x1000; + dst_iovs[i].iov_len = 0x1000; + src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i; + dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i; + } + + ut_expected_op.private_xform = &g_decomp_xform; + ut_expected_op.src.offset = 0; + ut_expected_op.src.length = src_iovs[0].iov_len + src_iovs[1].iov_len + src_iovs[2].iov_len; + + /* setup the src expected values */ + _get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false); + ut_expected_op.m_src = exp_src_mbuf[0]; + + for (i = 0; i < UT_MBUFS_PER_OP; i++) { + exp_src_mbuf[i]->userdata = &cb_arg; + exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base; + exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len); + exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len; + exp_src_mbuf[i]->pkt_len = src_iovs[i].iov_len; + } + + /* setup the dst expected values, we don't test needing a 4th dst mbuf */ + _get_mbuf_array(exp_dst_mbuf, &g_expected_dst_mbufs[0], SPDK_COUNTOF(exp_dst_mbuf), false); + ut_expected_op.dst.offset = 0; + ut_expected_op.m_dst = exp_dst_mbuf[0]; + + for (i = 0; i < UT_MBUFS_PER_OP; i++) { + exp_dst_mbuf[i]->buf_addr = dst_iovs[i].iov_base; + exp_dst_mbuf[i]->buf_iova = spdk_vtophys(dst_iovs[i].iov_base, &dst_iovs[i].iov_len); + exp_dst_mbuf[i]->buf_len = dst_iovs[i].iov_len; + exp_dst_mbuf[i]->pkt_len = dst_iovs[i].iov_len; + } + + /* force the 1st IOV to get partial length from spdk_vtophys */ + g_small_size_counter = 0; + g_small_size_modify = 1; + g_small_size = 0x800; + exp_src_mbuf[3]->userdata = &cb_arg; + + /* first only has shorter length */ + exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x800; + + /* 2nd was inserted by the boundary crossing condition and finishes off + * the length from the first */ + exp_src_mbuf[1]->buf_addr = (void *)0x10000800; + exp_src_mbuf[1]->buf_iova = 0x10000800; + exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x800; + + /* 3rd looks like that the 2nd would have */ + exp_src_mbuf[2]->buf_addr = (void *)0x10001000; + exp_src_mbuf[2]->buf_iova = 0x10001000; + exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x1000; + + /* a new 4th looks like what the 3rd would have */ + exp_src_mbuf[3]->buf_addr = (void *)0x10002000; + exp_src_mbuf[3]->buf_iova = 0x10002000; + exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x1000; + + rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, + &dst_iovs[0], dst_iovcnt, false, &cb_arg); + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + CU_ASSERT(rc == 0); + + /* Now force the 2nd IOV to get partial length from spdk_vtophys */ + g_small_size_counter = 0; + g_small_size_modify = 2; + g_small_size = 0x800; + + /* first is normal */ + exp_src_mbuf[0]->buf_addr = (void *)0x10000000; + exp_src_mbuf[0]->buf_iova = 0x10000000; + exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x1000; + + /* second only has shorter length */ + exp_src_mbuf[1]->buf_addr = (void *)0x10001000; + exp_src_mbuf[1]->buf_iova = 0x10001000; + exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x800; + + /* 3rd was inserted by the boundary crossing condition and finishes off + * the length from the first */ + exp_src_mbuf[2]->buf_addr = (void *)0x10001800; + exp_src_mbuf[2]->buf_iova = 0x10001800; + exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x800; + + /* a new 4th looks like what the 3rd would have */ + exp_src_mbuf[3]->buf_addr = (void *)0x10002000; + exp_src_mbuf[3]->buf_iova = 0x10002000; + exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x1000; + + rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, + &dst_iovs[0], dst_iovcnt, false, &cb_arg); + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + CU_ASSERT(rc == 0); + + /* Finally force the 3rd IOV to get partial length from spdk_vtophys */ + g_small_size_counter = 0; + g_small_size_modify = 3; + g_small_size = 0x800; + + /* first is normal */ + exp_src_mbuf[0]->buf_addr = (void *)0x10000000; + exp_src_mbuf[0]->buf_iova = 0x10000000; + exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x1000; + + /* second is normal */ + exp_src_mbuf[1]->buf_addr = (void *)0x10001000; + exp_src_mbuf[1]->buf_iova = 0x10001000; + exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x1000; + + /* 3rd has shorter length */ + exp_src_mbuf[2]->buf_addr = (void *)0x10002000; + exp_src_mbuf[2]->buf_iova = 0x10002000; + exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x800; + + /* a new 4th handles the remainder from the 3rd */ + exp_src_mbuf[3]->buf_addr = (void *)0x10002800; + exp_src_mbuf[3]->buf_iova = 0x10002800; + exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x800; + + rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, + &dst_iovs[0], dst_iovcnt, false, &cb_arg); + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + CU_ASSERT(rc == 0); +} + +static void +test_poller(void) +{ + int rc; + struct spdk_reduce_vol_cb_args *cb_args; + struct rte_mbuf mbuf[4]; /* one src, one dst, 2 ops */ + struct vbdev_comp_op *op_to_queue; + struct iovec src_iovs[3] = {}; + struct iovec dst_iovs[3] = {}; + int i; + + cb_args = calloc(1, sizeof(*cb_args)); + SPDK_CU_ASSERT_FATAL(cb_args != NULL); + cb_args->cb_fn = _compress_done; + memset(&g_comp_op[0], 0, sizeof(struct rte_comp_op)); + g_comp_op[0].m_src = &mbuf[0]; + g_comp_op[1].m_src = &mbuf[1]; + g_comp_op[0].m_dst = &mbuf[2]; + g_comp_op[1].m_dst = &mbuf[3]; + for (i = 0; i < 3; i++) { + src_iovs[i].iov_len = 0x1000; + dst_iovs[i].iov_len = 0x1000; + src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i; + dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i; + } + + /* Error from dequeue, nothing needing to be resubmitted. + */ + ut_rte_compressdev_dequeue_burst = 1; + /* setup what we want dequeue to return for the op */ + g_comp_op[0].m_src->userdata = (void *)cb_args; + g_comp_op[0].produced = 1; + g_comp_op[0].status = 1; + /* value asserted in the reduce callback */ + ut_compress_done[0] = -EINVAL; + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + rc = comp_dev_poller((void *)&g_comp_bdev); + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + CU_ASSERT(rc == SPDK_POLLER_BUSY); + + /* Success from dequeue, 2 ops. nothing needing to be resubmitted. + */ + ut_rte_compressdev_dequeue_burst = 2; + /* setup what we want dequeue to return for the op */ + g_comp_op[0].m_src->userdata = (void *)cb_args; + g_comp_op[0].produced = 16; + g_comp_op[0].status = 0; + g_comp_op[1].m_src->userdata = (void *)cb_args; + g_comp_op[1].produced = 32; + g_comp_op[1].status = 0; + /* value asserted in the reduce callback */ + ut_compress_done[0] = 16; + ut_compress_done[1] = 32; + done_count = 2; + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + rc = comp_dev_poller((void *)&g_comp_bdev); + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + CU_ASSERT(rc == SPDK_POLLER_BUSY); + + /* Success from dequeue, one op to be resubmitted. + */ + ut_rte_compressdev_dequeue_burst = 1; + /* setup what we want dequeue to return for the op */ + g_comp_op[0].m_src->userdata = (void *)cb_args; + g_comp_op[0].produced = 16; + g_comp_op[0].status = 0; + /* value asserted in the reduce callback */ + ut_compress_done[0] = 16; + done_count = 1; + op_to_queue = calloc(1, sizeof(struct vbdev_comp_op)); + SPDK_CU_ASSERT_FATAL(op_to_queue != NULL); + op_to_queue->backing_dev = &g_comp_bdev.backing_dev; + op_to_queue->src_iovs = &src_iovs[0]; + op_to_queue->src_iovcnt = 3; + op_to_queue->dst_iovs = &dst_iovs[0]; + op_to_queue->dst_iovcnt = 3; + op_to_queue->compress = true; + op_to_queue->cb_arg = cb_args; + ut_enqueue_value = FAKE_ENQUEUE_SUCCESS; + TAILQ_INSERT_TAIL(&g_comp_bdev.queued_comp_ops, + op_to_queue, + link); + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false); + rc = comp_dev_poller((void *)&g_comp_bdev); + CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); + CU_ASSERT(rc == SPDK_POLLER_BUSY); + + /* op_to_queue is freed in code under test */ + free(cb_args); +} + +static void +test_vbdev_compress_submit_request(void) +{ + /* Single element block size write */ + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; + g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; + g_completion_called = false; + vbdev_compress_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(g_completion_called == true); + CU_ASSERT(g_io_ctx->orig_io == g_bdev_io); + CU_ASSERT(g_io_ctx->comp_bdev == &g_comp_bdev); + CU_ASSERT(g_io_ctx->comp_ch == g_comp_ch); + + /* same write but now fail it */ + ut_spdk_reduce_vol_op_complete_err = 1; + g_completion_called = false; + vbdev_compress_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); + CU_ASSERT(g_completion_called == true); + + /* test a read success */ + g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; + ut_spdk_reduce_vol_op_complete_err = 0; + g_completion_called = false; + vbdev_compress_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(g_completion_called == true); + + /* test a read failure */ + ut_spdk_reduce_vol_op_complete_err = 1; + g_completion_called = false; + vbdev_compress_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); + CU_ASSERT(g_completion_called == true); +} + +static void +test_passthru(void) +{ + +} + +static void +test_reset(void) +{ + /* TODO: There are a few different ways to do this given that + * the code uses spdk_for_each_channel() to implement reset + * handling. SUbmitting w/o UT for this function for now and + * will follow up with something shortly. + */ +} + +static void +test_initdrivers(void) +{ + int rc; + + /* test return values from rte_vdev_init() */ + MOCK_SET(rte_vdev_init, -EEXIST); + rc = vbdev_init_compress_drivers(); + /* This is not an error condition, we already have one */ + CU_ASSERT(rc == 0); + + /* error */ + MOCK_SET(rte_vdev_init, -2); + rc = vbdev_init_compress_drivers(); + CU_ASSERT(rc == -EINVAL); + CU_ASSERT(g_mbuf_mp == NULL); + CU_ASSERT(g_comp_op_mp == NULL); + + /* compressdev count 0 */ + ut_rte_compressdev_count = 0; + MOCK_SET(rte_vdev_init, 0); + rc = vbdev_init_compress_drivers(); + CU_ASSERT(rc == 0); + + /* bogus count */ + ut_rte_compressdev_count = RTE_COMPRESS_MAX_DEVS + 1; + rc = vbdev_init_compress_drivers(); + CU_ASSERT(rc == -EINVAL); + + /* can't get mbuf pool */ + ut_rte_compressdev_count = 1; + MOCK_SET(spdk_mempool_create, NULL); + rc = vbdev_init_compress_drivers(); + CU_ASSERT(rc == -ENOMEM); + MOCK_CLEAR(spdk_mempool_create); + + /* can't get comp op pool */ + ut_rte_comp_op_pool_create = NULL; + rc = vbdev_init_compress_drivers(); + CU_ASSERT(rc == -ENOMEM); + + /* error on create_compress_dev() */ + ut_rte_comp_op_pool_create = (struct rte_mempool *)&test_initdrivers; + ut_rte_compressdev_configure = -1; + rc = vbdev_init_compress_drivers(); + CU_ASSERT(rc == -1); + + /* error on create_compress_dev() but coverage for large num queues */ + ut_max_nb_queue_pairs = 99; + rc = vbdev_init_compress_drivers(); + CU_ASSERT(rc == -1); + + /* qpair setup fails */ + ut_rte_compressdev_configure = 0; + ut_max_nb_queue_pairs = 0; + ut_rte_compressdev_queue_pair_setup = -1; + rc = vbdev_init_compress_drivers(); + CU_ASSERT(rc == -EINVAL); + + /* rte_compressdev_start fails */ + ut_rte_compressdev_queue_pair_setup = 0; + ut_rte_compressdev_start = -1; + rc = vbdev_init_compress_drivers(); + CU_ASSERT(rc == -1); + + /* rte_compressdev_private_xform_create() fails */ + ut_rte_compressdev_start = 0; + ut_rte_compressdev_private_xform_create = -2; + rc = vbdev_init_compress_drivers(); + CU_ASSERT(rc == -2); + + /* success */ + ut_rte_compressdev_private_xform_create = 0; + rc = vbdev_init_compress_drivers(); + CU_ASSERT(rc == 0); + spdk_mempool_free((struct spdk_mempool *)g_mbuf_mp); +} + +static void +test_supported_io(void) +{ + +} + +int +main(int argc, char **argv) +{ + CU_pSuite suite = NULL; + unsigned int num_failures; + + CU_set_error_action(CUEA_ABORT); + CU_initialize_registry(); + + suite = CU_add_suite("compress", test_setup, test_cleanup); + CU_ADD_TEST(suite, test_compress_operation); + CU_ADD_TEST(suite, test_compress_operation_cross_boundary); + CU_ADD_TEST(suite, test_vbdev_compress_submit_request); + CU_ADD_TEST(suite, test_passthru); + CU_ADD_TEST(suite, test_initdrivers); + CU_ADD_TEST(suite, test_supported_io); + CU_ADD_TEST(suite, test_poller); + CU_ADD_TEST(suite, test_reset); + + CU_basic_set_mode(CU_BRM_VERBOSE); + CU_basic_run_tests(); + num_failures = CU_get_number_of_failures(); + CU_cleanup_registry(); + return num_failures; +} diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore b/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore new file mode 100644 index 000000000..b2777562d --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore @@ -0,0 +1 @@ +crypto_ut diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/Makefile b/src/spdk/test/unit/lib/bdev/crypto.c/Makefile new file mode 100644 index 000000000..a987fbf2e --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/crypto.c/Makefile @@ -0,0 +1,39 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..) + +TEST_FILE = crypto_ut.c +CFLAGS += $(ENV_CFLAGS) + +include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c b/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c new file mode 100644 index 000000000..f6298fd7d --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c @@ -0,0 +1,1084 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk_cunit.h" + +#include "common/lib/test_env.c" +#include "spdk_internal/mock.h" +#include "unit/lib/json_mock.c" + +#include <rte_crypto.h> +#include <rte_cryptodev.h> + +#define MAX_TEST_BLOCKS 8192 +struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS]; +struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS]; + +uint16_t g_dequeue_mock; +uint16_t g_enqueue_mock; +unsigned ut_rte_crypto_op_bulk_alloc; +int ut_rte_crypto_op_attach_sym_session = 0; +#define MOCK_INFO_GET_1QP_AESNI 0 +#define MOCK_INFO_GET_1QP_QAT 1 +#define MOCK_INFO_GET_1QP_BOGUS_PMD 2 +int ut_rte_cryptodev_info_get = 0; +bool ut_rte_cryptodev_info_get_mocked = false; + +/* Those functions are defined as static inline in DPDK, so we can't + * mock them straight away. We use defines to redirect them into + * our custom functions. + */ +static bool g_resubmit_test = false; +#define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst +static inline uint16_t +mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, + struct rte_crypto_op **ops, uint16_t nb_ops) +{ + int i; + + CU_ASSERT(nb_ops > 0); + + for (i = 0; i < nb_ops; i++) { + /* Use this empty (til now) array of pointers to store + * enqueued operations for assertion in dev_full test. + */ + g_test_dev_full_ops[i] = *ops++; + if (g_resubmit_test == true) { + CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF); + } + } + + return g_enqueue_mock; +} + +#define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst +static inline uint16_t +mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, + struct rte_crypto_op **ops, uint16_t nb_ops) +{ + int i; + + CU_ASSERT(nb_ops > 0); + + for (i = 0; i < g_dequeue_mock; i++) { + *ops++ = g_test_crypto_ops[i]; + } + + return g_dequeue_mock; +} + +/* Instead of allocating real memory, assign the allocations to our + * test array for assertion in tests. + */ +#define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc +static inline unsigned +mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool, + enum rte_crypto_op_type type, + struct rte_crypto_op **ops, uint16_t nb_ops) +{ + int i; + + for (i = 0; i < nb_ops; i++) { + *ops++ = g_test_crypto_ops[i]; + } + return ut_rte_crypto_op_bulk_alloc; +} + +#define rte_mempool_put_bulk mock_rte_mempool_put_bulk +static __rte_always_inline void +mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table, + unsigned int n) +{ + return; +} + +#define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session +static inline int +mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op, + struct rte_cryptodev_sym_session *sess) +{ + return ut_rte_crypto_op_attach_sym_session; +} + +#define rte_lcore_count mock_rte_lcore_count +static inline unsigned +mock_rte_lcore_count(void) +{ + return 1; +} + +#include "bdev/crypto/vbdev_crypto.c" + +/* SPDK stubs */ +DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch, + struct spdk_bdev_io_wait_entry *entry), 0); +DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, + (struct spdk_conf *cp, const char *name), NULL); +DEFINE_STUB(spdk_conf_section_get_nval, char *, + (struct spdk_conf_section *sp, const char *key, int idx), NULL); +DEFINE_STUB(spdk_conf_section_get_nmval, char *, + (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL); +DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); +DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io)); +DEFINE_STUB_V(spdk_bdev_io_put_aux_buf, (struct spdk_bdev_io *bdev_io, void *aux_buf)); +DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev, + enum spdk_bdev_io_type io_type), 0); +DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev)); +DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); +DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0); +DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64); +DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0); +DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, + void *cb_arg)); +DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write, + spdk_bdev_remove_cb_t remove_cb, + void *remove_ctx, struct spdk_bdev_desc **_desc), 0); +DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, + struct spdk_bdev_module *module), 0); +DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module)); +DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *vbdev), 0); + +/* DPDK stubs */ +DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0); +DEFINE_STUB_V(rte_mempool_free, (struct rte_mempool *mp)); +DEFINE_STUB(rte_mempool_create, struct rte_mempool *, (const char *name, unsigned n, + unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags), (struct rte_mempool *)1); +DEFINE_STUB(rte_socket_id, unsigned, (void), 0); +DEFINE_STUB(rte_crypto_op_pool_create, struct rte_mempool *, + (const char *name, enum rte_crypto_op_type type, unsigned nb_elts, + unsigned cache_size, uint16_t priv_size, int socket_id), (struct rte_mempool *)1); +DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0); +DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0); +#if RTE_VERSION >= RTE_VERSION_NUM(19, 02, 0, 0) +DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id, + const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0); +DEFINE_STUB(rte_cryptodev_sym_session_pool_create, struct rte_mempool *, (const char *name, + uint32_t nb_elts, + uint32_t elt_size, uint32_t cache_size, uint16_t priv_size, + int socket_id), (struct rte_mempool *)1); +#else +DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id, struct rte_mempool *session_pool), 0); +#endif +DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0); +DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id)); +DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *, + (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1); +DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id, + struct rte_cryptodev_sym_session *sess, + struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0); +DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0); +DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0); +DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0); + +struct rte_cryptodev *rte_cryptodevs; + +/* global vars and setup/cleanup functions used for all test functions */ +struct spdk_bdev_io *g_bdev_io; +struct crypto_bdev_io *g_io_ctx; +struct crypto_io_channel *g_crypto_ch; +struct spdk_io_channel *g_io_ch; +struct vbdev_dev g_device; +struct vbdev_crypto g_crypto_bdev; +struct device_qp g_dev_qp; + +void +rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info) +{ + dev_info->max_nb_queue_pairs = 1; + if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) { + dev_info->driver_name = g_driver_names[0]; + } else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) { + dev_info->driver_name = g_driver_names[1]; + } else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) { + dev_info->driver_name = "junk"; + } +} + +unsigned int +rte_cryptodev_sym_get_private_session_size(uint8_t dev_id) +{ + return (unsigned int)dev_id; +} + +void +spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb) +{ + cb(g_io_ch, g_bdev_io, (void *)0xDEADBEEF); +} + +void +spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) +{ + cb(g_io_ch, g_bdev_io, true); +} + +/* Mock these functions to call the callback and then return the value we require */ +int ut_spdk_bdev_readv_blocks = 0; +bool ut_spdk_bdev_readv_blocks_mocked = false; +int +spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + struct iovec *iov, int iovcnt, + uint64_t offset_blocks, uint64_t num_blocks, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg); + return ut_spdk_bdev_readv_blocks; +} + +int ut_spdk_bdev_writev_blocks = 0; +bool ut_spdk_bdev_writev_blocks_mocked = false; +int +spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + struct iovec *iov, int iovcnt, + uint64_t offset_blocks, uint64_t num_blocks, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg); + return ut_spdk_bdev_writev_blocks; +} + +int ut_spdk_bdev_unmap_blocks = 0; +bool ut_spdk_bdev_unmap_blocks_mocked = false; +int +spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + uint64_t offset_blocks, uint64_t num_blocks, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg); + return ut_spdk_bdev_unmap_blocks; +} + +int ut_spdk_bdev_flush_blocks = 0; +bool ut_spdk_bdev_flush_blocks_mocked = false; +int +spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb, + void *cb_arg) +{ + cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg); + return ut_spdk_bdev_flush_blocks; +} + +int ut_spdk_bdev_reset = 0; +bool ut_spdk_bdev_reset_mocked = false; +int +spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg); + return ut_spdk_bdev_reset; +} + +bool g_completion_called = false; +void +spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) +{ + bdev_io->internal.status = status; + g_completion_called = true; +} + +/* Global setup for all tests that share a bunch of preparation... */ +static int +test_setup(void) +{ + int i, rc; + + /* Prepare essential variables for test routines */ + g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io)); + g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128); + g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev; + g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel)); + g_crypto_ch = (struct crypto_io_channel *)((uint8_t *)g_io_ch + sizeof(struct spdk_io_channel)); + g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx; + memset(&g_device, 0, sizeof(struct vbdev_dev)); + memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto)); + g_dev_qp.device = &g_device; + g_io_ctx->crypto_ch = g_crypto_ch; + g_io_ctx->crypto_bdev = &g_crypto_bdev; + g_crypto_ch->device_qp = &g_dev_qp; + TAILQ_INIT(&g_crypto_ch->pending_cry_ios); + TAILQ_INIT(&g_crypto_ch->queued_cry_ops); + + /* Allocate a real mbuf pool so we can test error paths */ + g_mbuf_mp = spdk_mempool_create("mbuf_mp", NUM_MBUFS, sizeof(struct rte_mbuf), + SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, + SPDK_ENV_SOCKET_ID_ANY); + + /* Instead of allocating real rte mempools for these, it's easier and provides the + * same coverage just calloc them here. + */ + for (i = 0; i < MAX_TEST_BLOCKS; i++) { + rc = posix_memalign((void **)&g_test_crypto_ops[i], 64, + sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op) + + AES_CBC_IV_LENGTH + QUEUED_OP_LENGTH); + if (rc != 0) { + assert(false); + } + memset(g_test_crypto_ops[i], 0, sizeof(struct rte_crypto_op) + + sizeof(struct rte_crypto_sym_op) + QUEUED_OP_LENGTH); + } + return 0; +} + +/* Global teardown for all tests */ +static int +test_cleanup(void) +{ + int i; + + spdk_mempool_free(g_mbuf_mp); + for (i = 0; i < MAX_TEST_BLOCKS; i++) { + free(g_test_crypto_ops[i]); + } + free(g_bdev_io->u.bdev.iovs); + free(g_bdev_io); + free(g_io_ch); + return 0; +} + +static void +test_error_paths(void) +{ + /* Single element block size write, just to test error paths + * in vbdev_crypto_submit_request(). + */ + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + g_bdev_io->u.bdev.iovcnt = 1; + g_bdev_io->u.bdev.num_blocks = 1; + g_bdev_io->u.bdev.iovs[0].iov_len = 512; + g_crypto_bdev.crypto_bdev.blocklen = 512; + g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; + g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1; + + /* test failure of spdk_mempool_get_bulk(), will result in success because it + * will get queued. + */ + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + MOCK_SET(spdk_mempool_get, NULL); + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); + + /* same thing but switch to reads to test error path in _crypto_complete_io() */ + g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + TAILQ_INSERT_TAIL(&g_crypto_ch->pending_cry_ios, g_bdev_io, module_link); + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); + /* Now with the read_blocks failing */ + g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + MOCK_SET(spdk_bdev_readv_blocks, -1); + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); + MOCK_SET(spdk_bdev_readv_blocks, 0); + MOCK_CLEAR(spdk_mempool_get); + + /* test failure of rte_crypto_op_bulk_alloc() */ + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + ut_rte_crypto_op_bulk_alloc = 0; + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); + ut_rte_crypto_op_bulk_alloc = 1; + + /* test failure of rte_crypto_op_attach_sym_session() */ + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + ut_rte_crypto_op_attach_sym_session = -1; + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); + ut_rte_crypto_op_attach_sym_session = 0; +} + +static void +test_simple_write(void) +{ + /* Single element block size write */ + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + g_bdev_io->u.bdev.iovcnt = 1; + g_bdev_io->u.bdev.num_blocks = 1; + g_bdev_io->u.bdev.offset_blocks = 0; + g_bdev_io->u.bdev.iovs[0].iov_len = 512; + g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write; + g_crypto_bdev.crypto_bdev.blocklen = 512; + g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; + g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1; + + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1); + CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == 512); + CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL); + CU_ASSERT(g_io_ctx->aux_offset_blocks == 0); + CU_ASSERT(g_io_ctx->aux_num_blocks == 1); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL); + CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512); + CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512); + + spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src); + spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_dst); +} + +static void +test_simple_read(void) +{ + /* Single element block size read */ + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + g_bdev_io->u.bdev.iovcnt = 1; + g_bdev_io->u.bdev.num_blocks = 1; + g_bdev_io->u.bdev.iovs[0].iov_len = 512; + g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read; + g_crypto_bdev.crypto_bdev.blocklen = 512; + g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; + g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1; + + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL); + CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512); + CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL); + + spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src); +} + +static void +test_large_rw(void) +{ + unsigned block_len = 512; + unsigned num_blocks = CRYPTO_MAX_IO / block_len; + unsigned io_len = block_len * num_blocks; + unsigned i; + + /* Multi block size read, multi-element */ + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + g_bdev_io->u.bdev.iovcnt = 1; + g_bdev_io->u.bdev.num_blocks = num_blocks; + g_bdev_io->u.bdev.iovs[0].iov_len = io_len; + g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw; + g_crypto_bdev.crypto_bdev.blocklen = block_len; + g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; + g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; + + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks); + + for (i = 0; i < num_blocks; i++) { + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len)); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); + CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); + CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL); + spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src); + } + + /* Multi block size write, multi-element */ + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + g_bdev_io->u.bdev.iovcnt = 1; + g_bdev_io->u.bdev.num_blocks = num_blocks; + g_bdev_io->u.bdev.iovs[0].iov_len = io_len; + g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw; + g_crypto_bdev.crypto_bdev.blocklen = block_len; + g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; + g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; + + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks); + + for (i = 0; i < num_blocks; i++) { + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len)); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); + CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); + CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io); + CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == io_len); + CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL); + CU_ASSERT(g_io_ctx->aux_offset_blocks == 0); + CU_ASSERT(g_io_ctx->aux_num_blocks == num_blocks); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len); + spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src); + spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst); + } +} + +static void +test_dev_full(void) +{ + struct vbdev_crypto_op *queued_op; + struct rte_crypto_sym_op *sym_op; + struct crypto_bdev_io *io_ctx; + + /* Two element block size read */ + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + g_bdev_io->u.bdev.iovcnt = 1; + g_bdev_io->u.bdev.num_blocks = 2; + g_bdev_io->u.bdev.iovs[0].iov_len = 512; + g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF; + g_bdev_io->u.bdev.iovs[1].iov_len = 512; + g_bdev_io->u.bdev.iovs[1].iov_base = (void *)0xFEEDBEEF; + g_crypto_bdev.crypto_bdev.blocklen = 512; + g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; + g_enqueue_mock = g_dequeue_mock = 1; + ut_rte_crypto_op_bulk_alloc = 2; + + g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true); + + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 2); + sym_op = g_test_crypto_ops[0]->sym; + CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF); + CU_ASSERT(sym_op->m_src->data_len == 512); + CU_ASSERT(sym_op->m_src->next == NULL); + CU_ASSERT(sym_op->cipher.data.length == 512); + CU_ASSERT(sym_op->cipher.data.offset == 0); + CU_ASSERT(sym_op->m_src->userdata == g_bdev_io); + CU_ASSERT(sym_op->m_dst == NULL); + + /* make sure one got queued and confirm its values */ + CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false); + queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops); + sym_op = queued_op->crypto_op->sym; + TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link); + CU_ASSERT(queued_op->bdev_io == g_bdev_io); + CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]); + CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xFEEDBEEF); + CU_ASSERT(sym_op->m_src->data_len == 512); + CU_ASSERT(sym_op->m_src->next == NULL); + CU_ASSERT(sym_op->cipher.data.length == 512); + CU_ASSERT(sym_op->cipher.data.offset == 0); + CU_ASSERT(sym_op->m_src->userdata == g_bdev_io); + CU_ASSERT(sym_op->m_dst == NULL); + CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true); + spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src); + spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[1]->sym->m_src); + + /* Non-busy reason for enqueue failure, all were rejected. */ + g_enqueue_mock = 0; + g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR; + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx; + CU_ASSERT(io_ctx->bdev_io_status == SPDK_BDEV_IO_STATUS_FAILED); +} + +static void +test_crazy_rw(void) +{ + unsigned block_len = 512; + int num_blocks = 4; + int i; + + /* Multi block size read, single element, strange IOV makeup */ + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + g_bdev_io->u.bdev.iovcnt = 3; + g_bdev_io->u.bdev.num_blocks = num_blocks; + g_bdev_io->u.bdev.iovs[0].iov_len = 512; + g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw; + g_bdev_io->u.bdev.iovs[1].iov_len = 1024; + g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512; + g_bdev_io->u.bdev.iovs[2].iov_len = 512; + g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024; + + g_crypto_bdev.crypto_bdev.blocklen = block_len; + g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; + g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; + + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks); + + for (i = 0; i < num_blocks; i++) { + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len)); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); + CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); + CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL); + spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src); + } + + /* Multi block size write, single element strange IOV makeup */ + num_blocks = 8; + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + g_bdev_io->u.bdev.iovcnt = 4; + g_bdev_io->u.bdev.num_blocks = num_blocks; + g_bdev_io->u.bdev.iovs[0].iov_len = 2048; + g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw; + g_bdev_io->u.bdev.iovs[1].iov_len = 512; + g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048; + g_bdev_io->u.bdev.iovs[2].iov_len = 512; + g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512; + g_bdev_io->u.bdev.iovs[3].iov_len = 1024; + g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512; + + g_crypto_bdev.crypto_bdev.blocklen = block_len; + g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; + g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; + + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks); + + for (i = 0; i < num_blocks; i++) { + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len)); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); + CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); + CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst); + spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src); + spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst); + } +} + +static void +test_passthru(void) +{ + /* Make sure these follow our completion callback, test success & fail. */ + g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP; + MOCK_SET(spdk_bdev_unmap_blocks, 0); + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); + MOCK_SET(spdk_bdev_unmap_blocks, -1); + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); + MOCK_CLEAR(spdk_bdev_unmap_blocks); + + g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH; + MOCK_SET(spdk_bdev_flush_blocks, 0); + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); + MOCK_SET(spdk_bdev_flush_blocks, -1); + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); + MOCK_CLEAR(spdk_bdev_flush_blocks); + + /* We should never get a WZ command, we report that we don't support it. */ + g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES; + vbdev_crypto_submit_request(g_io_ch, g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); +} + +static void +test_reset(void) +{ + /* TODO: There are a few different ways to do this given that + * the code uses spdk_for_each_channel() to implement reset + * handling. Submitting w/o UT for this function for now and + * will follow up with something shortly. + */ +} + +static void +init_cleanup(void) +{ + spdk_mempool_free(g_mbuf_mp); + rte_mempool_free(g_session_mp); + g_mbuf_mp = NULL; + g_session_mp = NULL; + if (g_session_mp_priv != NULL) { + /* g_session_mp_priv may or may not be set depending on the DPDK version */ + rte_mempool_free(g_session_mp_priv); + } +} + +static void +test_initdrivers(void) +{ + int rc; + static struct spdk_mempool *orig_mbuf_mp; + static struct rte_mempool *orig_session_mp; + static struct rte_mempool *orig_session_mp_priv; + + /* These tests will alloc and free our g_mbuf_mp + * so save that off here and restore it after each test is over. + */ + orig_mbuf_mp = g_mbuf_mp; + orig_session_mp = g_session_mp; + orig_session_mp_priv = g_session_mp_priv; + + g_session_mp_priv = NULL; + g_session_mp = NULL; + g_mbuf_mp = NULL; + + /* No drivers available, not an error though */ + MOCK_SET(rte_cryptodev_count, 0); + rc = vbdev_crypto_init_crypto_drivers(); + CU_ASSERT(rc == 0); + CU_ASSERT(g_mbuf_mp == NULL); + CU_ASSERT(g_session_mp == NULL); + CU_ASSERT(g_session_mp_priv == NULL); + + /* Test failure of DPDK dev init. */ + MOCK_SET(rte_cryptodev_count, 2); + MOCK_SET(rte_vdev_init, -1); + rc = vbdev_crypto_init_crypto_drivers(); + CU_ASSERT(rc == -EINVAL); + CU_ASSERT(g_mbuf_mp == NULL); + CU_ASSERT(g_session_mp == NULL); + CU_ASSERT(g_session_mp_priv == NULL); + MOCK_SET(rte_vdev_init, 0); + + /* Can't create session pool. */ + MOCK_SET(spdk_mempool_create, NULL); + rc = vbdev_crypto_init_crypto_drivers(); + CU_ASSERT(rc == -ENOMEM); + CU_ASSERT(g_mbuf_mp == NULL); + CU_ASSERT(g_session_mp == NULL); + CU_ASSERT(g_session_mp_priv == NULL); + MOCK_CLEAR(spdk_mempool_create); + + /* Can't create op pool. */ + MOCK_SET(rte_crypto_op_pool_create, NULL); + rc = vbdev_crypto_init_crypto_drivers(); + CU_ASSERT(rc == -ENOMEM); + CU_ASSERT(g_mbuf_mp == NULL); + CU_ASSERT(g_session_mp == NULL); + CU_ASSERT(g_session_mp_priv == NULL); + MOCK_SET(rte_crypto_op_pool_create, (struct rte_mempool *)1); + + /* Check resources are not sufficient */ + MOCK_CLEARED_ASSERT(spdk_mempool_create); + rc = vbdev_crypto_init_crypto_drivers(); + CU_ASSERT(rc == -EINVAL); + + /* Test crypto dev configure failure. */ + MOCK_SET(rte_cryptodev_device_count_by_driver, 2); + MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI); + MOCK_SET(rte_cryptodev_configure, -1); + MOCK_CLEARED_ASSERT(spdk_mempool_create); + rc = vbdev_crypto_init_crypto_drivers(); + MOCK_SET(rte_cryptodev_configure, 0); + CU_ASSERT(g_mbuf_mp == NULL); + CU_ASSERT(g_session_mp == NULL); + CU_ASSERT(g_session_mp_priv == NULL); + CU_ASSERT(rc == -EINVAL); + + /* Test failure of qp setup. */ + MOCK_SET(rte_cryptodev_queue_pair_setup, -1); + MOCK_CLEARED_ASSERT(spdk_mempool_create); + rc = vbdev_crypto_init_crypto_drivers(); + CU_ASSERT(rc == -EINVAL); + CU_ASSERT(g_mbuf_mp == NULL); + CU_ASSERT(g_session_mp == NULL); + CU_ASSERT(g_session_mp_priv == NULL); + MOCK_SET(rte_cryptodev_queue_pair_setup, 0); + + /* Test failure of dev start. */ + MOCK_SET(rte_cryptodev_start, -1); + MOCK_CLEARED_ASSERT(spdk_mempool_create); + rc = vbdev_crypto_init_crypto_drivers(); + CU_ASSERT(rc == -EINVAL); + CU_ASSERT(g_mbuf_mp == NULL); + CU_ASSERT(g_session_mp == NULL); + CU_ASSERT(g_session_mp_priv == NULL); + MOCK_SET(rte_cryptodev_start, 0); + + /* Test bogus PMD */ + MOCK_CLEARED_ASSERT(spdk_mempool_create); + MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD); + rc = vbdev_crypto_init_crypto_drivers(); + CU_ASSERT(g_mbuf_mp == NULL); + CU_ASSERT(g_session_mp == NULL); + CU_ASSERT(rc == -EINVAL); + + /* Test happy path QAT. */ + MOCK_CLEARED_ASSERT(spdk_mempool_create); + MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT); + rc = vbdev_crypto_init_crypto_drivers(); + CU_ASSERT(g_mbuf_mp != NULL); + CU_ASSERT(g_session_mp != NULL); + init_cleanup(); + CU_ASSERT(rc == 0); + + /* Test happy path AESNI. */ + MOCK_CLEARED_ASSERT(spdk_mempool_create); + MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI); + rc = vbdev_crypto_init_crypto_drivers(); + init_cleanup(); + CU_ASSERT(rc == 0); + + /* restore our initial values. */ + g_mbuf_mp = orig_mbuf_mp; + g_session_mp = orig_session_mp; + g_session_mp_priv = orig_session_mp_priv; +} + +static void +test_crypto_op_complete(void) +{ + /* Make sure completion code respects failure. */ + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; + g_completion_called = false; + _crypto_operation_complete(g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); + CU_ASSERT(g_completion_called == true); + + /* Test read completion. */ + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; + g_completion_called = false; + _crypto_operation_complete(g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(g_completion_called == true); + + /* Test write completion success. */ + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; + g_completion_called = false; + MOCK_SET(spdk_bdev_writev_blocks, 0); + _crypto_operation_complete(g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(g_completion_called == true); + + /* Test write completion failed. */ + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; + g_completion_called = false; + MOCK_SET(spdk_bdev_writev_blocks, -1); + _crypto_operation_complete(g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); + CU_ASSERT(g_completion_called == true); + + /* Test bogus type for this completion. */ + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET; + g_completion_called = false; + _crypto_operation_complete(g_bdev_io); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); + CU_ASSERT(g_completion_called == true); +} + +static void +test_supported_io(void) +{ + void *ctx = NULL; + bool rc = true; + + /* Make sure we always report false to WZ, we need the bdev layer to + * send real 0's so we can encrypt/decrypt them. + */ + rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES); + CU_ASSERT(rc == false); +} + +static void +test_poller(void) +{ + int rc; + struct rte_mbuf *src_mbufs[2]; + struct vbdev_crypto_op *op_to_resubmit; + + /* test regular 1 op to dequeue and complete */ + g_dequeue_mock = g_enqueue_mock = 1; + spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 1); + g_test_crypto_ops[0]->sym->m_src = src_mbufs[0]; + g_test_crypto_ops[0]->sym->m_src->userdata = g_bdev_io; + g_test_crypto_ops[0]->sym->m_dst = NULL; + g_io_ctx->cryop_cnt_remaining = 1; + g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; + rc = crypto_dev_poller(g_crypto_ch); + CU_ASSERT(rc == 1); + + /* We have nothing dequeued but have some to resubmit */ + g_dequeue_mock = 0; + CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true); + + /* add an op to the queued list. */ + g_resubmit_test = true; + op_to_resubmit = (struct vbdev_crypto_op *)((uint8_t *)g_test_crypto_ops[0] + QUEUED_OP_OFFSET); + op_to_resubmit->crypto_op = (void *)0xDEADBEEF; + op_to_resubmit->bdev_io = g_bdev_io; + TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops, + op_to_resubmit, + link); + CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false); + rc = crypto_dev_poller(g_crypto_ch); + g_resubmit_test = false; + CU_ASSERT(rc == 0); + CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true); + + /* 2 to dequeue but 2nd one failed */ + g_dequeue_mock = g_enqueue_mock = 2; + g_io_ctx->cryop_cnt_remaining = 2; + spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 2); + g_test_crypto_ops[0]->sym->m_src = src_mbufs[0]; + g_test_crypto_ops[0]->sym->m_src->userdata = g_bdev_io; + g_test_crypto_ops[0]->sym->m_dst = NULL; + g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + g_test_crypto_ops[1]->sym->m_src = src_mbufs[1]; + g_test_crypto_ops[1]->sym->m_src->userdata = g_bdev_io; + g_test_crypto_ops[1]->sym->m_dst = NULL; + g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; + rc = crypto_dev_poller(g_crypto_ch); + CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); + CU_ASSERT(rc == 2); +} + +/* Helper function for test_assign_device_qp() */ +static void +_clear_device_qp_lists(void) +{ + struct device_qp *device_qp = NULL; + + while (!TAILQ_EMPTY(&g_device_qp_qat)) { + device_qp = TAILQ_FIRST(&g_device_qp_qat); + TAILQ_REMOVE(&g_device_qp_qat, device_qp, link); + free(device_qp); + + } + CU_ASSERT(TAILQ_EMPTY(&g_device_qp_qat) == true); + while (!TAILQ_EMPTY(&g_device_qp_aesni_mb)) { + device_qp = TAILQ_FIRST(&g_device_qp_aesni_mb); + TAILQ_REMOVE(&g_device_qp_aesni_mb, device_qp, link); + free(device_qp); + } + CU_ASSERT(TAILQ_EMPTY(&g_device_qp_aesni_mb) == true); +} + +/* Helper function for test_assign_device_qp() */ +static void +_check_expected_values(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp, + struct crypto_io_channel *crypto_ch, uint8_t expected_index, + uint8_t current_index) +{ + _assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch); + CU_ASSERT(g_crypto_ch->device_qp->index == expected_index); + CU_ASSERT(g_next_qat_index == current_index); +} + +static void +test_assign_device_qp(void) +{ + struct device_qp *device_qp = NULL; + int i; + + /* start with a known state, clear the device/qp lists */ + _clear_device_qp_lists(); + + /* make sure that one AESNI_MB qp is found */ + device_qp = calloc(1, sizeof(struct device_qp)); + TAILQ_INSERT_TAIL(&g_device_qp_aesni_mb, device_qp, link); + g_crypto_ch->device_qp = NULL; + g_crypto_bdev.drv_name = AESNI_MB; + _assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch); + CU_ASSERT(g_crypto_ch->device_qp != NULL); + + /* QAT testing is more complex as the code under test load balances by + * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo + * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions + * each with 2 qp so the "spread" betwen assignments is 32. + */ + g_qat_total_qp = 96; + for (i = 0; i < g_qat_total_qp; i++) { + device_qp = calloc(1, sizeof(struct device_qp)); + device_qp->index = i; + TAILQ_INSERT_TAIL(&g_device_qp_qat, device_qp, link); + } + g_crypto_ch->device_qp = NULL; + g_crypto_bdev.drv_name = QAT; + + /* First assignment will assign to 0 and next at 32. */ + _check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch, + 0, QAT_VF_SPREAD); + + /* Second assignment will assign to 32 and next at 64. */ + _check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch, + QAT_VF_SPREAD, QAT_VF_SPREAD * 2); + + /* Third assignment will assign to 64 and next at 0. */ + _check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch, + QAT_VF_SPREAD * 2, 0); + + /* Fourth assignment will assign to 1 and next at 33. */ + _check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch, + 1, QAT_VF_SPREAD + 1); + + _clear_device_qp_lists(); +} + +int +main(int argc, char **argv) +{ + CU_pSuite suite = NULL; + unsigned int num_failures; + + CU_set_error_action(CUEA_ABORT); + CU_initialize_registry(); + + suite = CU_add_suite("crypto", test_setup, test_cleanup); + CU_ADD_TEST(suite, test_error_paths); + CU_ADD_TEST(suite, test_simple_write); + CU_ADD_TEST(suite, test_simple_read); + CU_ADD_TEST(suite, test_large_rw); + CU_ADD_TEST(suite, test_dev_full); + CU_ADD_TEST(suite, test_crazy_rw); + CU_ADD_TEST(suite, test_passthru); + CU_ADD_TEST(suite, test_initdrivers); + CU_ADD_TEST(suite, test_crypto_op_complete); + CU_ADD_TEST(suite, test_supported_io); + CU_ADD_TEST(suite, test_reset); + CU_ADD_TEST(suite, test_poller); + CU_ADD_TEST(suite, test_assign_device_qp); + + CU_basic_set_mode(CU_BRM_VERBOSE); + CU_basic_run_tests(); + num_failures = CU_get_number_of_failures(); + CU_cleanup_registry(); + return num_failures; +} diff --git a/src/spdk/test/unit/lib/bdev/gpt/Makefile b/src/spdk/test/unit/lib/bdev/gpt/Makefile new file mode 100644 index 000000000..2fad9ba03 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/gpt/Makefile @@ -0,0 +1,44 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..) +include $(SPDK_ROOT_DIR)/mk/spdk.common.mk + +DIRS-y = gpt.c + +.PHONY: all clean $(DIRS-y) + +all: $(DIRS-y) +clean: $(DIRS-y) + +include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk diff --git a/src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore new file mode 100644 index 000000000..74d476f5c --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore @@ -0,0 +1 @@ +gpt_ut diff --git a/src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile new file mode 100644 index 000000000..202fe9cb4 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile @@ -0,0 +1,38 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../..) + +TEST_FILE = gpt_ut.c + +include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk diff --git a/src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c new file mode 100644 index 000000000..8095fce19 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c @@ -0,0 +1,363 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk_cunit.h" + +#include "common/lib/test_env.c" + +#include "bdev/gpt/gpt.c" + +static void +test_check_mbr(void) +{ + struct spdk_gpt *gpt; + struct spdk_mbr *mbr; + unsigned char a[SPDK_GPT_BUFFER_SIZE]; + int re; + + /* Set gpt is NULL */ + re = gpt_parse_mbr(NULL); + CU_ASSERT(re == -1); + + /* Set gpt->buf is NULL */ + gpt = calloc(1, sizeof(*gpt)); + SPDK_CU_ASSERT_FATAL(gpt != NULL); + re = gpt_parse_mbr(gpt); + CU_ASSERT(re == -1); + + /* Set *gpt is "aaa...", all are mismatch include mbr_signature */ + memset(a, 'a', sizeof(a)); + gpt->buf = &a[0]; + re = gpt_check_mbr(gpt); + CU_ASSERT(re == -1); + + /* Set mbr->mbr_signature matched, start lba mismatch */ + mbr = (struct spdk_mbr *)gpt->buf; + mbr->mbr_signature = 0xAA55; + re = gpt_check_mbr(gpt); + CU_ASSERT(re == -1); + + /* Set mbr->partitions[0].start lba matched, os_type mismatch */ + mbr->partitions[0].start_lba = 1; + re = gpt_check_mbr(gpt); + CU_ASSERT(re == -1); + + /* Set mbr->partitions[0].os_type matched, size_lba mismatch */ + mbr->partitions[0].os_type = 0xEE; + re = gpt_check_mbr(gpt); + CU_ASSERT(re == -1); + + /* Set mbr->partitions[0].size_lba matched, passing case */ + mbr->partitions[0].size_lba = 0xFFFFFFFF; + re = gpt_check_mbr(gpt); + CU_ASSERT(re == 0); + + free(gpt); +} + +static void +test_read_header(void) +{ + struct spdk_gpt *gpt; + struct spdk_gpt_header *head; + unsigned char a[SPDK_GPT_BUFFER_SIZE]; + int re; + + /* gpt_read_header(NULL) does not exist, NULL is filtered out in gpt_parse_mbr() */ + gpt = calloc(1, sizeof(*gpt)); + SPDK_CU_ASSERT_FATAL(gpt != NULL); + gpt->parse_phase = SPDK_GPT_PARSE_PHASE_PRIMARY; + gpt->sector_size = 512; + + /* Set *gpt is "aaa..." */ + memset(a, 'a', sizeof(a)); + gpt->buf = &a[0]; + gpt->buf_size = sizeof(a); + + /* Set header_size mismatch */ + gpt->sector_size = 512; + head = (struct spdk_gpt_header *)(gpt->buf + GPT_PRIMARY_PARTITION_TABLE_LBA * gpt->sector_size); + to_le32(&head->header_size, 0x258); + re = gpt_read_header(gpt); + CU_ASSERT(re == -1); + + /* Set head->header_size matched, header_crc32 mismatch */ + head->header_size = sizeof(*head); + to_le32(&head->header_crc32, 0x22D18C80); + re = gpt_read_header(gpt); + CU_ASSERT(re == -1); + + /* Set head->header_crc32 matched, gpt_signature mismatch */ + to_le32(&head->header_crc32, 0xC5B2117E); + re = gpt_read_header(gpt); + CU_ASSERT(re == -1); + + /* Set head->gpt_signature matched, head->my_lba mismatch */ + to_le32(&head->header_crc32, 0xD637335A); + head->gpt_signature[0] = 'E'; + head->gpt_signature[1] = 'F'; + head->gpt_signature[2] = 'I'; + head->gpt_signature[3] = ' '; + head->gpt_signature[4] = 'P'; + head->gpt_signature[5] = 'A'; + head->gpt_signature[6] = 'R'; + head->gpt_signature[7] = 'T'; + re = gpt_read_header(gpt); + CU_ASSERT(re == -1); + + /* Set head->my_lba matched, lba_end usable_lba mismatch */ + to_le32(&head->header_crc32, 0xB3CDB2D2); + to_le64(&head->my_lba, 0x1); + re = gpt_read_header(gpt); + CU_ASSERT(re == -1); + + /* Set gpt->lba_end usable_lba matched, passing case */ + to_le32(&head->header_crc32, 0x5531F2F0); + to_le64(&gpt->lba_start, 0x0); + to_le64(&gpt->lba_end, 0x2E935FFE); + to_le64(&head->first_usable_lba, 0xA); + to_le64(&head->last_usable_lba, 0xF4240); + re = gpt_read_header(gpt); + CU_ASSERT(re == 0); + + free(gpt); +} + +static void +test_read_partitions(void) +{ + struct spdk_gpt *gpt; + struct spdk_gpt_header *head; + unsigned char a[SPDK_GPT_BUFFER_SIZE]; + int re; + + /* gpt_read_partitions(NULL) does not exist, NULL is filtered out in gpt_parse_mbr() */ + gpt = calloc(1, sizeof(*gpt)); + SPDK_CU_ASSERT_FATAL(gpt != NULL); + gpt->parse_phase = SPDK_GPT_PARSE_PHASE_PRIMARY; + gpt->sector_size = 512; + + /* Set *gpt is "aaa..." */ + memset(a, 'a', sizeof(a)); + gpt->buf = &a[0]; + gpt->buf_size = sizeof(a); + + /* Set num_partition_entries exceeds Max value of entries GPT supported */ + gpt->sector_size = 512; + head = (struct spdk_gpt_header *)(gpt->buf + GPT_PRIMARY_PARTITION_TABLE_LBA * gpt->sector_size); + gpt->header = head; + to_le32(&head->num_partition_entries, 0x100); + re = gpt_read_partitions(gpt); + CU_ASSERT(re == -1); + + /* Set num_partition_entries within Max value, size_of_partition_entry mismatch */ + to_le32(&head->header_crc32, 0x573857BE); + to_le32(&head->num_partition_entries, 0x40); + to_le32(&head->size_of_partition_entry, 0x0); + re = gpt_read_partitions(gpt); + CU_ASSERT(re == -1); + + /* Set size_of_partition_entry matched, partition_entry_lba mismatch */ + to_le32(&head->header_crc32, 0x5279B712); + to_le32(&head->size_of_partition_entry, 0x80); + to_le64(&head->partition_entry_lba, 0x64); + re = gpt_read_partitions(gpt); + CU_ASSERT(re == -1); + + /* Set partition_entry_lba matched, partition_entry_array_crc32 mismatch */ + to_le32(&head->header_crc32, 0xEC093B43); + to_le64(&head->partition_entry_lba, 0x20); + to_le32(&head->partition_entry_array_crc32, 0x0); + re = gpt_read_partitions(gpt); + CU_ASSERT(re == -1); + + /* Set partition_entry_array_crc32 matched, passing case */ + to_le32(&head->header_crc32, 0xE1A08822); + to_le32(&head->partition_entry_array_crc32, 0xEBEE44FB); + to_le32(&head->num_partition_entries, 0x80); + re = gpt_read_partitions(gpt); + CU_ASSERT(re == 0); + + free(gpt); +} + +static void +test_parse_mbr_and_primary(void) +{ + struct spdk_gpt *gpt; + struct spdk_mbr *mbr; + struct spdk_gpt_header *head; + unsigned char a[SPDK_GPT_BUFFER_SIZE]; + int re; + + /* Set gpt is NULL */ + re = gpt_parse_mbr(NULL); + CU_ASSERT(re == -1); + + /* Set gpt->buf is NULL */ + gpt = calloc(1, sizeof(*gpt)); + SPDK_CU_ASSERT_FATAL(gpt != NULL); + gpt->parse_phase = SPDK_GPT_PARSE_PHASE_PRIMARY; + gpt->sector_size = 512; + re = gpt_parse_mbr(gpt); + CU_ASSERT(re == -1); + + /* Set *gpt is "aaa...", check_mbr failed */ + memset(a, 'a', sizeof(a)); + gpt->buf = &a[0]; + gpt->buf_size = sizeof(a); + re = gpt_parse_mbr(gpt); + CU_ASSERT(re == -1); + + /* Set check_mbr passed */ + mbr = (struct spdk_mbr *)gpt->buf; + mbr->mbr_signature = 0xAA55; + mbr->partitions[0].start_lba = 1; + mbr->partitions[0].os_type = 0xEE; + mbr->partitions[0].size_lba = 0xFFFFFFFF; + re = gpt_parse_mbr(gpt); + CU_ASSERT(re == 0); + + /* Expect read_header failed */ + re = gpt_parse_partition_table(gpt); + CU_ASSERT(re == -1); + + /* Set read_header passed, read_partitions failed */ + head = (struct spdk_gpt_header *)(gpt->buf + GPT_PRIMARY_PARTITION_TABLE_LBA * gpt->sector_size); + head->header_size = sizeof(*head); + head->gpt_signature[0] = 'E'; + head->gpt_signature[1] = 'F'; + head->gpt_signature[2] = 'I'; + head->gpt_signature[3] = ' '; + head->gpt_signature[4] = 'P'; + head->gpt_signature[5] = 'A'; + head->gpt_signature[6] = 'R'; + head->gpt_signature[7] = 'T'; + to_le32(&head->header_crc32, 0x5531F2F0); + to_le64(&head->my_lba, 0x1); + to_le64(&gpt->lba_start, 0x0); + to_le64(&gpt->lba_end, 0x2E935FFE); + to_le64(&head->first_usable_lba, 0xA); + to_le64(&head->last_usable_lba, 0xF4240); + re = gpt_parse_partition_table(gpt); + CU_ASSERT(re == -1); + + /* Set read_partitions passed, all passed */ + to_le32(&head->size_of_partition_entry, 0x80); + to_le64(&head->partition_entry_lba, 0x20); + to_le32(&head->header_crc32, 0x845A09AA); + to_le32(&head->partition_entry_array_crc32, 0xEBEE44FB); + to_le32(&head->num_partition_entries, 0x80); + re = gpt_parse_partition_table(gpt); + CU_ASSERT(re == 0); + + free(gpt); +} + +static void +test_parse_secondary(void) +{ + struct spdk_gpt *gpt; + struct spdk_gpt_header *head; + unsigned char a[SPDK_GPT_BUFFER_SIZE]; + int re; + + /* gpt_parse_partition_table(NULL) does not exist, NULL is filtered out in gpt_parse_mbr() */ + gpt = calloc(1, sizeof(*gpt)); + SPDK_CU_ASSERT_FATAL(gpt != NULL); + gpt->parse_phase = SPDK_GPT_PARSE_PHASE_SECONDARY; + gpt->sector_size = 512; + + /* Set *gpt is "aaa...", read_header failed */ + memset(a, 'a', sizeof(a)); + gpt->buf = &a[0]; + gpt->buf_size = sizeof(a); + re = gpt_parse_partition_table(gpt); + CU_ASSERT(re == -1); + + /* Set read_header passed, read_partitions failed */ + head = (struct spdk_gpt_header *)(gpt->buf + gpt->buf_size - gpt->sector_size); + head->header_size = sizeof(*head); + head->gpt_signature[0] = 'E'; + head->gpt_signature[1] = 'F'; + head->gpt_signature[2] = 'I'; + head->gpt_signature[3] = ' '; + head->gpt_signature[4] = 'P'; + head->gpt_signature[5] = 'A'; + head->gpt_signature[6] = 'R'; + head->gpt_signature[7] = 'T'; + to_le32(&head->header_crc32, 0xAA68A167); + to_le64(&head->my_lba, 0x63FFFFF); + to_le64(&gpt->lba_start, 0x0); + to_le64(&gpt->lba_end, 0x63FFFFF); + to_le64(&gpt->total_sectors, 0x6400000); + to_le64(&head->first_usable_lba, 0xA); + to_le64(&head->last_usable_lba, 0x63FFFDE); + re = gpt_parse_partition_table(gpt); + CU_ASSERT(re == -1); + + /* Set read_partitions passed, all passed */ + to_le32(&head->size_of_partition_entry, 0x80); + to_le64(&head->partition_entry_lba, 0x63FFFDF); + to_le32(&head->header_crc32, 0x204129E8); + to_le32(&head->partition_entry_array_crc32, 0xEBEE44FB); + to_le32(&head->num_partition_entries, 0x80); + re = gpt_parse_partition_table(gpt); + CU_ASSERT(re == 0); + + free(gpt); +} + +int +main(int argc, char **argv) +{ + CU_pSuite suite = NULL; + unsigned int num_failures; + + CU_set_error_action(CUEA_ABORT); + CU_initialize_registry(); + + suite = CU_add_suite("gpt_parse", NULL, NULL); + + CU_ADD_TEST(suite, test_parse_mbr_and_primary); + CU_ADD_TEST(suite, test_parse_secondary); + CU_ADD_TEST(suite, test_check_mbr); + CU_ADD_TEST(suite, test_read_header); + CU_ADD_TEST(suite, test_read_partitions); + + CU_basic_set_mode(CU_BRM_VERBOSE); + CU_basic_run_tests(); + num_failures = CU_get_number_of_failures(); + CU_cleanup_registry(); + return num_failures; +} diff --git a/src/spdk/test/unit/lib/bdev/mt/Makefile b/src/spdk/test/unit/lib/bdev/mt/Makefile new file mode 100644 index 000000000..a19b345aa --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/mt/Makefile @@ -0,0 +1,44 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..) +include $(SPDK_ROOT_DIR)/mk/spdk.common.mk + +DIRS-y = bdev.c + +.PHONY: all clean $(DIRS-y) + +all: $(DIRS-y) +clean: $(DIRS-y) + +include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk diff --git a/src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore b/src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore new file mode 100644 index 000000000..a5a22d0d3 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore @@ -0,0 +1 @@ +bdev_ut diff --git a/src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile b/src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile new file mode 100644 index 000000000..46b2987ae --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile @@ -0,0 +1,38 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../..) + +TEST_FILE = bdev_ut.c + +include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk diff --git a/src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c b/src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c new file mode 100644 index 000000000..351404a37 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c @@ -0,0 +1,1994 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk_cunit.h" + +#include "common/lib/ut_multithread.c" +#include "unit/lib/json_mock.c" + +#include "spdk/config.h" +/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ +#undef SPDK_CONFIG_VTUNE + +#include "bdev/bdev.c" + +#define BDEV_UT_NUM_THREADS 3 + +DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp, + const char *name), NULL); +DEFINE_STUB(spdk_conf_section_get_nmval, char *, + (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL); +DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1); + +struct spdk_trace_histories *g_trace_histories; +DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); +DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix)); +DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); +DEFINE_STUB_V(spdk_trace_register_description, (const char *name, + uint16_t tpoint_id, uint8_t owner_type, + uint8_t object_type, uint8_t new_object, + uint8_t arg1_type, const char *arg1_name)); +DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, + uint32_t size, uint64_t object_id, uint64_t arg1)); +DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); +DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); + +struct ut_bdev { + struct spdk_bdev bdev; + void *io_target; +}; + +struct ut_bdev_channel { + TAILQ_HEAD(, spdk_bdev_io) outstanding_io; + uint32_t outstanding_cnt; + uint32_t avail_cnt; +}; + +int g_io_device; +struct ut_bdev g_bdev; +struct spdk_bdev_desc *g_desc; +bool g_teardown_done = false; +bool g_get_io_channel = true; +bool g_create_ch = true; +bool g_init_complete_called = false; +bool g_fini_start_called = true; +int g_status = 0; +int g_count = 0; +struct spdk_histogram_data *g_histogram = NULL; + +static int +stub_create_ch(void *io_device, void *ctx_buf) +{ + struct ut_bdev_channel *ch = ctx_buf; + + if (g_create_ch == false) { + return -1; + } + + TAILQ_INIT(&ch->outstanding_io); + ch->outstanding_cnt = 0; + /* + * When avail gets to 0, the submit_request function will return ENOMEM. + * Most tests to not want ENOMEM to occur, so by default set this to a + * big value that won't get hit. The ENOMEM tests can then override this + * value to something much smaller to induce ENOMEM conditions. + */ + ch->avail_cnt = 2048; + return 0; +} + +static void +stub_destroy_ch(void *io_device, void *ctx_buf) +{ +} + +static struct spdk_io_channel * +stub_get_io_channel(void *ctx) +{ + struct ut_bdev *ut_bdev = ctx; + + if (g_get_io_channel == true) { + return spdk_get_io_channel(ut_bdev->io_target); + } else { + return NULL; + } +} + +static int +stub_destruct(void *ctx) +{ + return 0; +} + +static void +stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) +{ + struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); + struct spdk_bdev_io *io; + + if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) { + while (!TAILQ_EMPTY(&ch->outstanding_io)) { + io = TAILQ_FIRST(&ch->outstanding_io); + TAILQ_REMOVE(&ch->outstanding_io, io, module_link); + ch->outstanding_cnt--; + spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED); + ch->avail_cnt++; + } + } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { + TAILQ_FOREACH(io, &ch->outstanding_io, module_link) { + if (io == bdev_io->u.abort.bio_to_abort) { + TAILQ_REMOVE(&ch->outstanding_io, io, module_link); + ch->outstanding_cnt--; + spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED); + ch->avail_cnt++; + + spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); + return; + } + } + + spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); + return; + } + + if (ch->avail_cnt > 0) { + TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); + ch->outstanding_cnt++; + ch->avail_cnt--; + } else { + spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); + } +} + +static uint32_t +stub_complete_io(void *io_target, uint32_t num_to_complete) +{ + struct spdk_io_channel *_ch = spdk_get_io_channel(io_target); + struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); + struct spdk_bdev_io *io; + bool complete_all = (num_to_complete == 0); + uint32_t num_completed = 0; + + while (complete_all || num_completed < num_to_complete) { + if (TAILQ_EMPTY(&ch->outstanding_io)) { + break; + } + io = TAILQ_FIRST(&ch->outstanding_io); + TAILQ_REMOVE(&ch->outstanding_io, io, module_link); + ch->outstanding_cnt--; + spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS); + ch->avail_cnt++; + num_completed++; + } + + spdk_put_io_channel(_ch); + return num_completed; +} + +static bool +stub_io_type_supported(void *ctx, enum spdk_bdev_io_type type) +{ + return true; +} + +static struct spdk_bdev_fn_table fn_table = { + .get_io_channel = stub_get_io_channel, + .destruct = stub_destruct, + .submit_request = stub_submit_request, + .io_type_supported = stub_io_type_supported, +}; + +struct spdk_bdev_module bdev_ut_if; + +static int +module_init(void) +{ + spdk_bdev_module_init_done(&bdev_ut_if); + return 0; +} + +static void +module_fini(void) +{ +} + +static void +init_complete(void) +{ + g_init_complete_called = true; +} + +static void +fini_start(void) +{ + g_fini_start_called = true; +} + +struct spdk_bdev_module bdev_ut_if = { + .name = "bdev_ut", + .module_init = module_init, + .module_fini = module_fini, + .async_init = true, + .init_complete = init_complete, + .fini_start = fini_start, +}; + +SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) + +static void +register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target) +{ + memset(ut_bdev, 0, sizeof(*ut_bdev)); + + ut_bdev->io_target = io_target; + ut_bdev->bdev.ctxt = ut_bdev; + ut_bdev->bdev.name = name; + ut_bdev->bdev.fn_table = &fn_table; + ut_bdev->bdev.module = &bdev_ut_if; + ut_bdev->bdev.blocklen = 4096; + ut_bdev->bdev.blockcnt = 1024; + + spdk_bdev_register(&ut_bdev->bdev); +} + +static void +unregister_bdev(struct ut_bdev *ut_bdev) +{ + /* Handle any deferred messages. */ + poll_threads(); + spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL); +} + +static void +bdev_init_cb(void *done, int rc) +{ + CU_ASSERT(rc == 0); + *(bool *)done = true; +} + +static void +setup_test(void) +{ + bool done = false; + + allocate_cores(BDEV_UT_NUM_THREADS); + allocate_threads(BDEV_UT_NUM_THREADS); + set_thread(0); + spdk_bdev_initialize(bdev_init_cb, &done); + spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch, + sizeof(struct ut_bdev_channel), NULL); + register_bdev(&g_bdev, "ut_bdev", &g_io_device); + spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc); +} + +static void +finish_cb(void *cb_arg) +{ + g_teardown_done = true; +} + +static void +teardown_test(void) +{ + set_thread(0); + g_teardown_done = false; + spdk_bdev_close(g_desc); + g_desc = NULL; + unregister_bdev(&g_bdev); + spdk_io_device_unregister(&g_io_device, NULL); + spdk_bdev_finish(finish_cb, NULL); + poll_threads(); + memset(&g_bdev, 0, sizeof(g_bdev)); + CU_ASSERT(g_teardown_done == true); + g_teardown_done = false; + free_threads(); + free_cores(); +} + +static uint32_t +bdev_io_tailq_cnt(bdev_io_tailq_t *tailq) +{ + struct spdk_bdev_io *io; + uint32_t cnt = 0; + + TAILQ_FOREACH(io, tailq, internal.link) { + cnt++; + } + + return cnt; +} + +static void +basic(void) +{ + g_init_complete_called = false; + setup_test(); + CU_ASSERT(g_init_complete_called == true); + + set_thread(0); + + g_get_io_channel = false; + g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); + CU_ASSERT(g_ut_threads[0].ch == NULL); + + g_get_io_channel = true; + g_create_ch = false; + g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); + CU_ASSERT(g_ut_threads[0].ch == NULL); + + g_get_io_channel = true; + g_create_ch = true; + g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); + CU_ASSERT(g_ut_threads[0].ch != NULL); + spdk_put_io_channel(g_ut_threads[0].ch); + + g_fini_start_called = false; + teardown_test(); + CU_ASSERT(g_fini_start_called == true); +} + +static void +_bdev_removed(void *done) +{ + *(bool *)done = true; +} + +static void +_bdev_unregistered(void *done, int rc) +{ + CU_ASSERT(rc == 0); + *(bool *)done = true; +} + +static void +unregister_and_close(void) +{ + bool done, remove_notify; + struct spdk_bdev_desc *desc = NULL; + + setup_test(); + set_thread(0); + + /* setup_test() automatically opens the bdev, + * but this test needs to do that in a different + * way. */ + spdk_bdev_close(g_desc); + poll_threads(); + + /* Try hotremoving a bdev with descriptors which don't provide + * the notification callback */ + spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &desc); + SPDK_CU_ASSERT_FATAL(desc != NULL); + + /* There is an open descriptor on the device. Unregister it, + * which can't proceed until the descriptor is closed. */ + done = false; + spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done); + + /* Poll the threads to allow all events to be processed */ + poll_threads(); + + /* Make sure the bdev was not unregistered. We still have a + * descriptor open */ + CU_ASSERT(done == false); + + spdk_bdev_close(desc); + poll_threads(); + desc = NULL; + + /* The unregister should have completed */ + CU_ASSERT(done == true); + + + /* Register the bdev again */ + register_bdev(&g_bdev, "ut_bdev", &g_io_device); + + remove_notify = false; + spdk_bdev_open(&g_bdev.bdev, true, _bdev_removed, &remove_notify, &desc); + SPDK_CU_ASSERT_FATAL(desc != NULL); + CU_ASSERT(remove_notify == false); + + /* There is an open descriptor on the device. Unregister it, + * which can't proceed until the descriptor is closed. */ + done = false; + spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done); + /* No polling has occurred, so neither of these should execute */ + CU_ASSERT(remove_notify == false); + CU_ASSERT(done == false); + + /* Prior to the unregister completing, close the descriptor */ + spdk_bdev_close(desc); + + /* Poll the threads to allow all events to be processed */ + poll_threads(); + + /* Remove notify should not have been called because the + * descriptor is already closed. */ + CU_ASSERT(remove_notify == false); + + /* The unregister should have completed */ + CU_ASSERT(done == true); + + /* Restore the original g_bdev so that we can use teardown_test(). */ + register_bdev(&g_bdev, "ut_bdev", &g_io_device); + spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc); + teardown_test(); +} + +static void +reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) +{ + bool *done = cb_arg; + + CU_ASSERT(success == true); + *done = true; + spdk_bdev_free_io(bdev_io); +} + +static void +put_channel_during_reset(void) +{ + struct spdk_io_channel *io_ch; + bool done = false; + + setup_test(); + + set_thread(0); + io_ch = spdk_bdev_get_io_channel(g_desc); + CU_ASSERT(io_ch != NULL); + + /* + * Start a reset, but then put the I/O channel before + * the deferred messages for the reset get a chance to + * execute. + */ + spdk_bdev_reset(g_desc, io_ch, reset_done, &done); + spdk_put_io_channel(io_ch); + poll_threads(); + stub_complete_io(g_bdev.io_target, 0); + + teardown_test(); +} + +static void +aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) +{ + enum spdk_bdev_io_status *status = cb_arg; + + *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; + spdk_bdev_free_io(bdev_io); +} + +static void +aborted_reset(void) +{ + struct spdk_io_channel *io_ch[2]; + enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING, + status2 = SPDK_BDEV_IO_STATUS_PENDING; + + setup_test(); + + set_thread(0); + io_ch[0] = spdk_bdev_get_io_channel(g_desc); + CU_ASSERT(io_ch[0] != NULL); + spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1); + poll_threads(); + CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL); + + /* + * First reset has been submitted on ch0. Now submit a second + * reset on ch1 which will get queued since there is already a + * reset in progress. + */ + set_thread(1); + io_ch[1] = spdk_bdev_get_io_channel(g_desc); + CU_ASSERT(io_ch[1] != NULL); + spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2); + poll_threads(); + CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL); + + /* + * Now destroy ch1. This will abort the queued reset. Check that + * the second reset was completed with failed status. Also check + * that bdev->internal.reset_in_progress != NULL, since the + * original reset has not been completed yet. This ensures that + * the bdev code is correctly noticing that the failed reset is + * *not* the one that had been submitted to the bdev module. + */ + set_thread(1); + spdk_put_io_channel(io_ch[1]); + poll_threads(); + CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED); + CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL); + + /* + * Now complete the first reset, verify that it completed with SUCCESS + * status and that bdev->internal.reset_in_progress is also set back to NULL. + */ + set_thread(0); + spdk_put_io_channel(io_ch[0]); + stub_complete_io(g_bdev.io_target, 0); + poll_threads(); + CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); + + teardown_test(); +} + +static void +io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) +{ + enum spdk_bdev_io_status *status = cb_arg; + + *status = bdev_io->internal.status; + spdk_bdev_free_io(bdev_io); +} + +static void +io_during_reset(void) +{ + struct spdk_io_channel *io_ch[2]; + struct spdk_bdev_channel *bdev_ch[2]; + enum spdk_bdev_io_status status0, status1, status_reset; + int rc; + + setup_test(); + + /* + * First test normal case - submit an I/O on each of two channels (with no resets) + * and verify they complete successfully. + */ + set_thread(0); + io_ch[0] = spdk_bdev_get_io_channel(g_desc); + bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); + CU_ASSERT(bdev_ch[0]->flags == 0); + status0 = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); + CU_ASSERT(rc == 0); + + set_thread(1); + io_ch[1] = spdk_bdev_get_io_channel(g_desc); + bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); + CU_ASSERT(bdev_ch[1]->flags == 0); + status1 = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); + CU_ASSERT(rc == 0); + + poll_threads(); + CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); + CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); + + set_thread(0); + stub_complete_io(g_bdev.io_target, 0); + CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS); + + set_thread(1); + stub_complete_io(g_bdev.io_target, 0); + CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); + + /* + * Now submit a reset, and leave it pending while we submit I/O on two different + * channels. These I/O should be failed by the bdev layer since the reset is in + * progress. + */ + set_thread(0); + status_reset = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset); + CU_ASSERT(rc == 0); + + CU_ASSERT(bdev_ch[0]->flags == 0); + CU_ASSERT(bdev_ch[1]->flags == 0); + poll_threads(); + CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS); + CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS); + + set_thread(0); + status0 = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); + CU_ASSERT(rc == 0); + + set_thread(1); + status1 = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); + CU_ASSERT(rc == 0); + + /* + * A reset is in progress so these read I/O should complete with aborted. Note that we + * need to poll_threads() since I/O completed inline have their completion deferred. + */ + poll_threads(); + CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); + CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED); + CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED); + + /* + * Complete the reset + */ + set_thread(0); + stub_complete_io(g_bdev.io_target, 0); + + /* + * Only poll thread 0. We should not get a completion. + */ + poll_thread(0); + CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); + + /* + * Poll both thread 0 and 1 so the messages can propagate and we + * get a completion. + */ + poll_threads(); + CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); + + spdk_put_io_channel(io_ch[0]); + set_thread(1); + spdk_put_io_channel(io_ch[1]); + poll_threads(); + + teardown_test(); +} + +static void +basic_qos(void) +{ + struct spdk_io_channel *io_ch[2]; + struct spdk_bdev_channel *bdev_ch[2]; + struct spdk_bdev *bdev; + enum spdk_bdev_io_status status, abort_status; + int rc; + + setup_test(); + + /* Enable QoS */ + bdev = &g_bdev.bdev; + bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos)); + SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL); + TAILQ_INIT(&bdev->internal.qos->queued); + /* + * Enable read/write IOPS, read only byte per second and + * read/write byte per second rate limits. + * In this case, all rate limits will take equal effect. + */ + /* 2000 read/write I/O per second, or 2 per millisecond */ + bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000; + /* 8K read/write byte per millisecond with 4K block size */ + bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000; + /* 8K read only byte per millisecond with 4K block size */ + bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 8192000; + + g_get_io_channel = true; + + set_thread(0); + io_ch[0] = spdk_bdev_get_io_channel(g_desc); + bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); + CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); + + set_thread(1); + io_ch[1] = spdk_bdev_get_io_channel(g_desc); + bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); + CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); + + /* + * Send an I/O on thread 0, which is where the QoS thread is running. + */ + set_thread(0); + status = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status); + CU_ASSERT(rc == 0); + CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); + poll_threads(); + stub_complete_io(g_bdev.io_target, 0); + poll_threads(); + CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS); + + /* Send an I/O on thread 1. The QoS thread is not running here. */ + status = SPDK_BDEV_IO_STATUS_PENDING; + set_thread(1); + rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status); + CU_ASSERT(rc == 0); + CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); + poll_threads(); + /* Complete I/O on thread 1. This should not complete the I/O we submitted */ + stub_complete_io(g_bdev.io_target, 0); + poll_threads(); + CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); + /* Now complete I/O on thread 0 */ + set_thread(0); + poll_threads(); + stub_complete_io(g_bdev.io_target, 0); + poll_threads(); + CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS); + + /* Reset rate limit for the next test cases. */ + spdk_delay_us(SPDK_BDEV_QOS_TIMESLICE_IN_USEC); + poll_threads(); + + /* + * Test abort request when QoS is enabled. + */ + + /* Send an I/O on thread 0, which is where the QoS thread is running. */ + set_thread(0); + status = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status); + CU_ASSERT(rc == 0); + CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); + /* Send an abort to the I/O on the same thread. */ + abort_status = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_abort(g_desc, io_ch[0], &status, io_during_io_done, &abort_status); + CU_ASSERT(rc == 0); + CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING); + poll_threads(); + CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED); + + /* Send an I/O on thread 1. The QoS thread is not running here. */ + status = SPDK_BDEV_IO_STATUS_PENDING; + set_thread(1); + rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status); + CU_ASSERT(rc == 0); + CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); + poll_threads(); + /* Send an abort to the I/O on the same thread. */ + abort_status = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_abort(g_desc, io_ch[1], &status, io_during_io_done, &abort_status); + CU_ASSERT(rc == 0); + CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING); + poll_threads(); + /* Complete the I/O with failure and the abort with success on thread 1. */ + CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED); + + set_thread(0); + + /* + * Close the descriptor only, which should stop the qos channel as + * the last descriptor removed. + */ + spdk_bdev_close(g_desc); + poll_threads(); + CU_ASSERT(bdev->internal.qos->ch == NULL); + + /* + * Open the bdev again which shall setup the qos channel as the + * channels are valid. + */ + spdk_bdev_open(bdev, true, NULL, NULL, &g_desc); + poll_threads(); + CU_ASSERT(bdev->internal.qos->ch != NULL); + + /* Tear down the channels */ + set_thread(0); + spdk_put_io_channel(io_ch[0]); + set_thread(1); + spdk_put_io_channel(io_ch[1]); + poll_threads(); + set_thread(0); + + /* Close the descriptor, which should stop the qos channel */ + spdk_bdev_close(g_desc); + poll_threads(); + CU_ASSERT(bdev->internal.qos->ch == NULL); + + /* Open the bdev again, no qos channel setup without valid channels. */ + spdk_bdev_open(bdev, true, NULL, NULL, &g_desc); + poll_threads(); + CU_ASSERT(bdev->internal.qos->ch == NULL); + + /* Create the channels in reverse order. */ + set_thread(1); + io_ch[1] = spdk_bdev_get_io_channel(g_desc); + bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); + CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); + + set_thread(0); + io_ch[0] = spdk_bdev_get_io_channel(g_desc); + bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); + CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); + + /* Confirm that the qos thread is now thread 1 */ + CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]); + + /* Tear down the channels */ + set_thread(0); + spdk_put_io_channel(io_ch[0]); + set_thread(1); + spdk_put_io_channel(io_ch[1]); + poll_threads(); + + set_thread(0); + + teardown_test(); +} + +static void +io_during_qos_queue(void) +{ + struct spdk_io_channel *io_ch[2]; + struct spdk_bdev_channel *bdev_ch[2]; + struct spdk_bdev *bdev; + enum spdk_bdev_io_status status0, status1, status2; + int rc; + + setup_test(); + MOCK_SET(spdk_get_ticks, 0); + + /* Enable QoS */ + bdev = &g_bdev.bdev; + bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos)); + SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL); + TAILQ_INIT(&bdev->internal.qos->queued); + /* + * Enable read/write IOPS, read only byte per sec, write only + * byte per sec and read/write byte per sec rate limits. + * In this case, both read only and write only byte per sec + * rate limit will take effect. + */ + /* 4000 read/write I/O per second, or 4 per millisecond */ + bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 4000; + /* 8K byte per millisecond with 4K block size */ + bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000; + /* 4K byte per millisecond with 4K block size */ + bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 4096000; + /* 4K byte per millisecond with 4K block size */ + bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 4096000; + + g_get_io_channel = true; + + /* Create channels */ + set_thread(0); + io_ch[0] = spdk_bdev_get_io_channel(g_desc); + bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); + CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); + + set_thread(1); + io_ch[1] = spdk_bdev_get_io_channel(g_desc); + bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); + CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); + + /* Send two read I/Os */ + status1 = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); + CU_ASSERT(rc == 0); + CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); + set_thread(0); + status0 = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); + CU_ASSERT(rc == 0); + CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); + /* Send one write I/O */ + status2 = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status2); + CU_ASSERT(rc == 0); + CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_PENDING); + + /* Complete any I/O that arrived at the disk */ + poll_threads(); + set_thread(1); + stub_complete_io(g_bdev.io_target, 0); + set_thread(0); + stub_complete_io(g_bdev.io_target, 0); + poll_threads(); + + /* Only one of the two read I/Os should complete. (logical XOR) */ + if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) { + CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); + } else { + CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); + } + /* The write I/O should complete. */ + CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS); + + /* Advance in time by a millisecond */ + spdk_delay_us(1000); + + /* Complete more I/O */ + poll_threads(); + set_thread(1); + stub_complete_io(g_bdev.io_target, 0); + set_thread(0); + stub_complete_io(g_bdev.io_target, 0); + poll_threads(); + + /* Now the second read I/O should be done */ + CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); + + /* Tear down the channels */ + set_thread(1); + spdk_put_io_channel(io_ch[1]); + set_thread(0); + spdk_put_io_channel(io_ch[0]); + poll_threads(); + + teardown_test(); +} + +static void +io_during_qos_reset(void) +{ + struct spdk_io_channel *io_ch[2]; + struct spdk_bdev_channel *bdev_ch[2]; + struct spdk_bdev *bdev; + enum spdk_bdev_io_status status0, status1, reset_status; + int rc; + + setup_test(); + MOCK_SET(spdk_get_ticks, 0); + + /* Enable QoS */ + bdev = &g_bdev.bdev; + bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos)); + SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL); + TAILQ_INIT(&bdev->internal.qos->queued); + /* + * Enable read/write IOPS, write only byte per sec and + * read/write byte per second rate limits. + * In this case, read/write byte per second rate limit will + * take effect first. + */ + /* 2000 read/write I/O per second, or 2 per millisecond */ + bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000; + /* 4K byte per millisecond with 4K block size */ + bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 4096000; + /* 8K byte per millisecond with 4K block size */ + bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 8192000; + + g_get_io_channel = true; + + /* Create channels */ + set_thread(0); + io_ch[0] = spdk_bdev_get_io_channel(g_desc); + bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); + CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); + + set_thread(1); + io_ch[1] = spdk_bdev_get_io_channel(g_desc); + bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); + CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); + + /* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */ + status1 = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_write_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); + CU_ASSERT(rc == 0); + set_thread(0); + status0 = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); + CU_ASSERT(rc == 0); + + poll_threads(); + CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); + CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); + + /* Reset the bdev. */ + reset_status = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status); + CU_ASSERT(rc == 0); + + /* Complete any I/O that arrived at the disk */ + poll_threads(); + set_thread(1); + stub_complete_io(g_bdev.io_target, 0); + set_thread(0); + stub_complete_io(g_bdev.io_target, 0); + poll_threads(); + + CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED); + CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED); + + /* Tear down the channels */ + set_thread(1); + spdk_put_io_channel(io_ch[1]); + set_thread(0); + spdk_put_io_channel(io_ch[0]); + poll_threads(); + + teardown_test(); +} + +static void +enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) +{ + enum spdk_bdev_io_status *status = cb_arg; + + *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; + spdk_bdev_free_io(bdev_io); +} + +static void +enomem(void) +{ + struct spdk_io_channel *io_ch; + struct spdk_bdev_channel *bdev_ch; + struct spdk_bdev_shared_resource *shared_resource; + struct ut_bdev_channel *ut_ch; + const uint32_t IO_ARRAY_SIZE = 64; + const uint32_t AVAIL = 20; + enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset; + uint32_t nomem_cnt, i; + struct spdk_bdev_io *first_io; + int rc; + + setup_test(); + + set_thread(0); + io_ch = spdk_bdev_get_io_channel(g_desc); + bdev_ch = spdk_io_channel_get_ctx(io_ch); + shared_resource = bdev_ch->shared_resource; + ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); + ut_ch->avail_cnt = AVAIL; + + /* First submit a number of IOs equal to what the channel can support. */ + for (i = 0; i < AVAIL; i++) { + status[i] = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); + CU_ASSERT(rc == 0); + } + CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io)); + + /* + * Next, submit one additional I/O. This one should fail with ENOMEM and then go onto + * the enomem_io list. + */ + status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io)); + first_io = TAILQ_FIRST(&shared_resource->nomem_io); + + /* + * Now submit a bunch more I/O. These should all fail with ENOMEM and get queued behind + * the first_io above. + */ + for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) { + status[i] = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); + CU_ASSERT(rc == 0); + } + + /* Assert that first_io is still at the head of the list. */ + CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io); + CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL)); + nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io); + CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT)); + + /* + * Complete 1 I/O only. The key check here is bdev_io_tailq_cnt - this should not have + * changed since completing just 1 I/O should not trigger retrying the queued nomem_io + * list. + */ + stub_complete_io(g_bdev.io_target, 1); + CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt); + + /* + * Complete enough I/O to hit the nomem_theshold. This should trigger retrying nomem_io, + * and we should see I/O get resubmitted to the test bdev module. + */ + stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1); + CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt); + nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io); + + /* Complete 1 I/O only. This should not trigger retrying the queued nomem_io. */ + stub_complete_io(g_bdev.io_target, 1); + CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt); + + /* + * Send a reset and confirm that all I/O are completed, including the ones that + * were queued on the nomem_io list. + */ + status_reset = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset); + poll_threads(); + CU_ASSERT(rc == 0); + /* This will complete the reset. */ + stub_complete_io(g_bdev.io_target, 0); + + CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0); + CU_ASSERT(shared_resource->io_outstanding == 0); + + spdk_put_io_channel(io_ch); + poll_threads(); + teardown_test(); +} + +static void +enomem_multi_bdev(void) +{ + struct spdk_io_channel *io_ch; + struct spdk_bdev_channel *bdev_ch; + struct spdk_bdev_shared_resource *shared_resource; + struct ut_bdev_channel *ut_ch; + const uint32_t IO_ARRAY_SIZE = 64; + const uint32_t AVAIL = 20; + enum spdk_bdev_io_status status[IO_ARRAY_SIZE]; + uint32_t i; + struct ut_bdev *second_bdev; + struct spdk_bdev_desc *second_desc = NULL; + struct spdk_bdev_channel *second_bdev_ch; + struct spdk_io_channel *second_ch; + int rc; + + setup_test(); + + /* Register second bdev with the same io_target */ + second_bdev = calloc(1, sizeof(*second_bdev)); + SPDK_CU_ASSERT_FATAL(second_bdev != NULL); + register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target); + spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc); + SPDK_CU_ASSERT_FATAL(second_desc != NULL); + + set_thread(0); + io_ch = spdk_bdev_get_io_channel(g_desc); + bdev_ch = spdk_io_channel_get_ctx(io_ch); + shared_resource = bdev_ch->shared_resource; + ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); + ut_ch->avail_cnt = AVAIL; + + second_ch = spdk_bdev_get_io_channel(second_desc); + second_bdev_ch = spdk_io_channel_get_ctx(second_ch); + SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource); + + /* Saturate io_target through bdev A. */ + for (i = 0; i < AVAIL; i++) { + status[i] = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); + CU_ASSERT(rc == 0); + } + CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io)); + + /* + * Now submit I/O through the second bdev. This should fail with ENOMEM + * and then go onto the nomem_io list. + */ + status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io)); + + /* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */ + stub_complete_io(g_bdev.io_target, AVAIL); + + SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io)); + CU_ASSERT(shared_resource->io_outstanding == 1); + + /* Now complete our retried I/O */ + stub_complete_io(g_bdev.io_target, 1); + SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0); + + spdk_put_io_channel(io_ch); + spdk_put_io_channel(second_ch); + spdk_bdev_close(second_desc); + unregister_bdev(second_bdev); + poll_threads(); + free(second_bdev); + teardown_test(); +} + + +static void +enomem_multi_io_target(void) +{ + struct spdk_io_channel *io_ch; + struct spdk_bdev_channel *bdev_ch; + struct ut_bdev_channel *ut_ch; + const uint32_t IO_ARRAY_SIZE = 64; + const uint32_t AVAIL = 20; + enum spdk_bdev_io_status status[IO_ARRAY_SIZE]; + uint32_t i; + int new_io_device; + struct ut_bdev *second_bdev; + struct spdk_bdev_desc *second_desc = NULL; + struct spdk_bdev_channel *second_bdev_ch; + struct spdk_io_channel *second_ch; + int rc; + + setup_test(); + + /* Create new io_target and a second bdev using it */ + spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch, + sizeof(struct ut_bdev_channel), NULL); + second_bdev = calloc(1, sizeof(*second_bdev)); + SPDK_CU_ASSERT_FATAL(second_bdev != NULL); + register_bdev(second_bdev, "ut_bdev2", &new_io_device); + spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc); + SPDK_CU_ASSERT_FATAL(second_desc != NULL); + + set_thread(0); + io_ch = spdk_bdev_get_io_channel(g_desc); + bdev_ch = spdk_io_channel_get_ctx(io_ch); + ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); + ut_ch->avail_cnt = AVAIL; + + /* Different io_target should imply a different shared_resource */ + second_ch = spdk_bdev_get_io_channel(second_desc); + second_bdev_ch = spdk_io_channel_get_ctx(second_ch); + SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource); + + /* Saturate io_target through bdev A. */ + for (i = 0; i < AVAIL; i++) { + status[i] = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); + CU_ASSERT(rc == 0); + } + CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); + + /* Issue one more I/O to fill ENOMEM list. */ + status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); + + /* + * Now submit I/O through the second bdev. This should go through and complete + * successfully because we're using a different io_device underneath. + */ + status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io)); + stub_complete_io(second_bdev->io_target, 1); + + /* Cleanup; Complete outstanding I/O. */ + stub_complete_io(g_bdev.io_target, AVAIL); + SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); + /* Complete the ENOMEM I/O */ + stub_complete_io(g_bdev.io_target, 1); + CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0); + + SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); + CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0); + spdk_put_io_channel(io_ch); + spdk_put_io_channel(second_ch); + spdk_bdev_close(second_desc); + unregister_bdev(second_bdev); + spdk_io_device_unregister(&new_io_device, NULL); + poll_threads(); + free(second_bdev); + teardown_test(); +} + +static void +qos_dynamic_enable_done(void *cb_arg, int status) +{ + int *rc = cb_arg; + *rc = status; +} + +static void +qos_dynamic_enable(void) +{ + struct spdk_io_channel *io_ch[2]; + struct spdk_bdev_channel *bdev_ch[2]; + struct spdk_bdev *bdev; + enum spdk_bdev_io_status bdev_io_status[2]; + uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {}; + int status, second_status, rc, i; + + setup_test(); + MOCK_SET(spdk_get_ticks, 0); + + for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) { + limits[i] = UINT64_MAX; + } + + bdev = &g_bdev.bdev; + + g_get_io_channel = true; + + /* Create channels */ + set_thread(0); + io_ch[0] = spdk_bdev_get_io_channel(g_desc); + bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); + CU_ASSERT(bdev_ch[0]->flags == 0); + + set_thread(1); + io_ch[1] = spdk_bdev_get_io_channel(g_desc); + bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); + CU_ASSERT(bdev_ch[1]->flags == 0); + + set_thread(0); + + /* + * Enable QoS: Read/Write IOPS, Read/Write byte, + * Read only byte and Write only byte per second + * rate limits. + * More than 10 I/Os allowed per timeslice. + */ + status = -1; + limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000; + limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 100; + limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 100; + limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 10; + spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); + poll_threads(); + CU_ASSERT(status == 0); + CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); + CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); + + /* + * Submit and complete 10 I/O to fill the QoS allotment for this timeslice. + * Additional I/O will then be queued. + */ + set_thread(0); + for (i = 0; i < 10; i++) { + bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]); + CU_ASSERT(rc == 0); + CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING); + poll_thread(0); + stub_complete_io(g_bdev.io_target, 0); + CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS); + } + + /* + * Send two more I/O. These I/O will be queued since the current timeslice allotment has been + * filled already. We want to test that when QoS is disabled that these two I/O: + * 1) are not aborted + * 2) are sent back to their original thread for resubmission + */ + bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]); + CU_ASSERT(rc == 0); + CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING); + set_thread(1); + bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]); + CU_ASSERT(rc == 0); + CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING); + poll_threads(); + + /* + * Disable QoS: Read/Write IOPS, Read/Write byte, + * Read only byte rate limits + */ + status = -1; + limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0; + limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0; + limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 0; + spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); + poll_threads(); + CU_ASSERT(status == 0); + CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); + CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); + + /* Disable QoS: Write only Byte per second rate limit */ + status = -1; + limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 0; + spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); + poll_threads(); + CU_ASSERT(status == 0); + CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0); + CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0); + + /* + * All I/O should have been resubmitted back on their original thread. Complete + * all I/O on thread 0, and ensure that only the thread 0 I/O was completed. + */ + set_thread(0); + stub_complete_io(g_bdev.io_target, 0); + poll_threads(); + CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING); + + /* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */ + set_thread(1); + stub_complete_io(g_bdev.io_target, 0); + poll_threads(); + CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS); + + /* Disable QoS again */ + status = -1; + limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0; + spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); + poll_threads(); + CU_ASSERT(status == 0); /* This should succeed */ + CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0); + CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0); + + /* Enable QoS on thread 0 */ + status = -1; + limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000; + spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); + poll_threads(); + CU_ASSERT(status == 0); + CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); + CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); + + /* Disable QoS on thread 1 */ + set_thread(1); + status = -1; + limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0; + spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); + /* Don't poll yet. This should leave the channels with QoS enabled */ + CU_ASSERT(status == -1); + CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); + CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); + + /* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */ + second_status = 0; + limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 10; + spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &second_status); + poll_threads(); + CU_ASSERT(status == 0); /* The disable should succeed */ + CU_ASSERT(second_status < 0); /* The enable should fail */ + CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0); + CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0); + + /* Enable QoS on thread 1. This should succeed now that the disable has completed. */ + status = -1; + limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000; + spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); + poll_threads(); + CU_ASSERT(status == 0); + CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); + CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); + + /* Tear down the channels */ + set_thread(0); + spdk_put_io_channel(io_ch[0]); + set_thread(1); + spdk_put_io_channel(io_ch[1]); + poll_threads(); + + set_thread(0); + teardown_test(); +} + +static void +histogram_status_cb(void *cb_arg, int status) +{ + g_status = status; +} + +static void +histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) +{ + g_status = status; + g_histogram = histogram; +} + +static void +histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, + uint64_t total, uint64_t so_far) +{ + g_count += count; +} + +static void +bdev_histograms_mt(void) +{ + struct spdk_io_channel *ch[2]; + struct spdk_histogram_data *histogram; + uint8_t buf[4096]; + int status = false; + int rc; + + + setup_test(); + + set_thread(0); + ch[0] = spdk_bdev_get_io_channel(g_desc); + CU_ASSERT(ch[0] != NULL); + + set_thread(1); + ch[1] = spdk_bdev_get_io_channel(g_desc); + CU_ASSERT(ch[1] != NULL); + + + /* Enable histogram */ + spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, true); + poll_threads(); + CU_ASSERT(g_status == 0); + CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true); + + /* Allocate histogram */ + histogram = spdk_histogram_data_alloc(); + + /* Check if histogram is zeroed */ + spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL); + poll_threads(); + CU_ASSERT(g_status == 0); + SPDK_CU_ASSERT_FATAL(g_histogram != NULL); + + g_count = 0; + spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); + + CU_ASSERT(g_count == 0); + + set_thread(0); + rc = spdk_bdev_write_blocks(g_desc, ch[0], &buf, 0, 1, io_during_io_done, &status); + CU_ASSERT(rc == 0); + + spdk_delay_us(10); + stub_complete_io(g_bdev.io_target, 1); + poll_threads(); + CU_ASSERT(status == true); + + + set_thread(1); + rc = spdk_bdev_read_blocks(g_desc, ch[1], &buf, 0, 1, io_during_io_done, &status); + CU_ASSERT(rc == 0); + + spdk_delay_us(10); + stub_complete_io(g_bdev.io_target, 1); + poll_threads(); + CU_ASSERT(status == true); + + set_thread(0); + + /* Check if histogram gathered data from all I/O channels */ + spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL); + poll_threads(); + CU_ASSERT(g_status == 0); + CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true); + SPDK_CU_ASSERT_FATAL(g_histogram != NULL); + + g_count = 0; + spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); + CU_ASSERT(g_count == 2); + + /* Disable histogram */ + spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, false); + poll_threads(); + CU_ASSERT(g_status == 0); + CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == false); + + spdk_histogram_data_free(histogram); + + /* Tear down the channels */ + set_thread(0); + spdk_put_io_channel(ch[0]); + set_thread(1); + spdk_put_io_channel(ch[1]); + poll_threads(); + set_thread(0); + teardown_test(); + +} + +struct timeout_io_cb_arg { + struct iovec iov; + uint8_t type; +}; + +static int +bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) +{ + struct spdk_bdev_io *bdev_io; + int n = 0; + + if (!ch) { + return -1; + } + + TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { + n++; + } + + return n; +} + +static void +bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) +{ + struct timeout_io_cb_arg *ctx = cb_arg; + + ctx->type = bdev_io->type; + ctx->iov.iov_base = bdev_io->iov.iov_base; + ctx->iov.iov_len = bdev_io->iov.iov_len; +} + +static bool g_io_done; + +static void +io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) +{ + g_io_done = true; + spdk_bdev_free_io(bdev_io); +} + +static void +bdev_set_io_timeout_mt(void) +{ + struct spdk_io_channel *ch[3]; + struct spdk_bdev_channel *bdev_ch[3]; + struct timeout_io_cb_arg cb_arg; + + setup_test(); + + g_bdev.bdev.optimal_io_boundary = 16; + g_bdev.bdev.split_on_optimal_io_boundary = true; + + set_thread(0); + ch[0] = spdk_bdev_get_io_channel(g_desc); + CU_ASSERT(ch[0] != NULL); + + set_thread(1); + ch[1] = spdk_bdev_get_io_channel(g_desc); + CU_ASSERT(ch[1] != NULL); + + set_thread(2); + ch[2] = spdk_bdev_get_io_channel(g_desc); + CU_ASSERT(ch[2] != NULL); + + /* Multi-thread mode + * 1, Check the poller was registered successfully + * 2, Check the timeout IO and ensure the IO was the submitted by user + * 3, Check the link int the bdev_ch works right. + * 4, Close desc and put io channel during the timeout poller is polling + */ + + /* In desc thread set the timeout */ + set_thread(0); + CU_ASSERT(spdk_bdev_set_timeout(g_desc, 5, bdev_channel_io_timeout_cb, &cb_arg) == 0); + CU_ASSERT(g_desc->io_timeout_poller != NULL); + CU_ASSERT(g_desc->cb_fn == bdev_channel_io_timeout_cb); + CU_ASSERT(g_desc->cb_arg == &cb_arg); + + /* check the IO submitted list and timeout handler */ + CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x2000, 0, 1, io_done, NULL) == 0); + bdev_ch[0] = spdk_io_channel_get_ctx(ch[0]); + CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 1); + + set_thread(1); + CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x1000, 0, 1, io_done, NULL) == 0); + bdev_ch[1] = spdk_io_channel_get_ctx(ch[1]); + CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 1); + + /* Now test that a single-vector command is split correctly. + * Offset 14, length 8, payload 0xF000 + * Child - Offset 14, length 2, payload 0xF000 + * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 + * + * Set up the expected values before calling spdk_bdev_read_blocks + */ + set_thread(2); + CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0xF000, 14, 8, io_done, NULL) == 0); + bdev_ch[2] = spdk_io_channel_get_ctx(ch[2]); + CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 3); + + set_thread(0); + memset(&cb_arg, 0, sizeof(cb_arg)); + spdk_delay_us(3 * spdk_get_ticks_hz()); + poll_threads(); + CU_ASSERT(cb_arg.type == 0); + CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); + CU_ASSERT(cb_arg.iov.iov_len == 0); + + /* Now the time reach the limit */ + spdk_delay_us(3 * spdk_get_ticks_hz()); + poll_thread(0); + CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ); + CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000); + CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen); + stub_complete_io(g_bdev.io_target, 1); + CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 0); + + memset(&cb_arg, 0, sizeof(cb_arg)); + set_thread(1); + poll_thread(1); + CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); + CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); + CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen); + stub_complete_io(g_bdev.io_target, 1); + CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 0); + + memset(&cb_arg, 0, sizeof(cb_arg)); + set_thread(2); + poll_thread(2); + CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ); + CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); + CU_ASSERT(cb_arg.iov.iov_len == 8 * g_bdev.bdev.blocklen); + stub_complete_io(g_bdev.io_target, 1); + CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 2); + stub_complete_io(g_bdev.io_target, 1); + CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 0); + + /* Run poll_timeout_done() it means complete the timeout poller */ + set_thread(0); + poll_thread(0); + CU_ASSERT(g_desc->refs == 0); + CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x1000, 0, 1, io_done, NULL) == 0); + set_thread(1); + CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x2000, 0, 2, io_done, NULL) == 0); + set_thread(2); + CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0x3000, 0, 3, io_done, NULL) == 0); + + /* Trigger timeout poller to run again, desc->refs is incremented. + * In thread 0 we destroy the io channel before timeout poller runs. + * Timeout callback is not called on thread 0. + */ + spdk_delay_us(6 * spdk_get_ticks_hz()); + memset(&cb_arg, 0, sizeof(cb_arg)); + set_thread(0); + stub_complete_io(g_bdev.io_target, 1); + spdk_put_io_channel(ch[0]); + poll_thread(0); + CU_ASSERT(g_desc->refs == 1) + CU_ASSERT(cb_arg.type == 0); + CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); + CU_ASSERT(cb_arg.iov.iov_len == 0); + + /* In thread 1 timeout poller runs then we destroy the io channel + * Timeout callback is called on thread 1. + */ + memset(&cb_arg, 0, sizeof(cb_arg)); + set_thread(1); + poll_thread(1); + stub_complete_io(g_bdev.io_target, 1); + spdk_put_io_channel(ch[1]); + poll_thread(1); + CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); + CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000); + CU_ASSERT(cb_arg.iov.iov_len == 2 * g_bdev.bdev.blocklen); + + /* Close the desc. + * Unregister the timeout poller first. + * Then decrement desc->refs but it's not zero yet so desc is not freed. + */ + set_thread(0); + spdk_bdev_close(g_desc); + CU_ASSERT(g_desc->refs == 1); + CU_ASSERT(g_desc->io_timeout_poller == NULL); + + /* Timeout poller runs on thread 2 then we destroy the io channel. + * Desc is closed so we would exit the timeout poller directly. + * timeout callback is not called on thread 2. + */ + memset(&cb_arg, 0, sizeof(cb_arg)); + set_thread(2); + poll_thread(2); + stub_complete_io(g_bdev.io_target, 1); + spdk_put_io_channel(ch[2]); + poll_thread(2); + CU_ASSERT(cb_arg.type == 0); + CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); + CU_ASSERT(cb_arg.iov.iov_len == 0); + + set_thread(0); + poll_thread(0); + g_teardown_done = false; + unregister_bdev(&g_bdev); + spdk_io_device_unregister(&g_io_device, NULL); + spdk_bdev_finish(finish_cb, NULL); + poll_threads(); + memset(&g_bdev, 0, sizeof(g_bdev)); + CU_ASSERT(g_teardown_done == true); + g_teardown_done = false; + free_threads(); + free_cores(); +} + +static bool g_io_done2; +static bool g_lock_lba_range_done; +static bool g_unlock_lba_range_done; + +static void +io_done2(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) +{ + g_io_done2 = true; + spdk_bdev_free_io(bdev_io); +} + +static void +lock_lba_range_done(void *ctx, int status) +{ + g_lock_lba_range_done = true; +} + +static void +unlock_lba_range_done(void *ctx, int status) +{ + g_unlock_lba_range_done = true; +} + +static uint32_t +stub_channel_outstanding_cnt(void *io_target) +{ + struct spdk_io_channel *_ch = spdk_get_io_channel(io_target); + struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); + uint32_t outstanding_cnt; + + outstanding_cnt = ch->outstanding_cnt; + + spdk_put_io_channel(_ch); + return outstanding_cnt; +} + +static void +lock_lba_range_then_submit_io(void) +{ + struct spdk_bdev_desc *desc = NULL; + void *io_target; + struct spdk_io_channel *io_ch[3]; + struct spdk_bdev_channel *bdev_ch[3]; + struct lba_range *range; + char buf[4096]; + int ctx0, ctx1, ctx2; + int rc; + + setup_test(); + + io_target = g_bdev.io_target; + desc = g_desc; + + set_thread(0); + io_ch[0] = spdk_bdev_get_io_channel(desc); + bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); + CU_ASSERT(io_ch[0] != NULL); + + set_thread(1); + io_ch[1] = spdk_bdev_get_io_channel(desc); + bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); + CU_ASSERT(io_ch[1] != NULL); + + set_thread(0); + g_lock_lba_range_done = false; + rc = bdev_lock_lba_range(desc, io_ch[0], 20, 10, lock_lba_range_done, &ctx0); + CU_ASSERT(rc == 0); + poll_threads(); + + /* The lock should immediately become valid, since there are no outstanding + * write I/O. + */ + CU_ASSERT(g_lock_lba_range_done == true); + range = TAILQ_FIRST(&bdev_ch[0]->locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 20); + CU_ASSERT(range->length == 10); + CU_ASSERT(range->owner_ch == bdev_ch[0]); + + g_io_done = false; + CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked)); + rc = spdk_bdev_read_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0); + CU_ASSERT(rc == 0); + CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1); + + stub_complete_io(io_target, 1); + poll_threads(); + CU_ASSERT(g_io_done == true); + CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked)); + + /* Try a write I/O. This should actually be allowed to execute, since the channel + * holding the lock is submitting the write I/O. + */ + g_io_done = false; + CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked)); + rc = spdk_bdev_write_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0); + CU_ASSERT(rc == 0); + CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1); + + stub_complete_io(io_target, 1); + poll_threads(); + CU_ASSERT(g_io_done == true); + CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked)); + + /* Try a write I/O. This should get queued in the io_locked tailq. */ + set_thread(1); + g_io_done = false; + CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked)); + rc = spdk_bdev_write_blocks(desc, io_ch[1], buf, 20, 1, io_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0); + CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[1]->io_locked)); + CU_ASSERT(g_io_done == false); + + /* Try to unlock the lba range using thread 1's io_ch. This should fail. */ + rc = bdev_unlock_lba_range(desc, io_ch[1], 20, 10, unlock_lba_range_done, &ctx1); + CU_ASSERT(rc == -EINVAL); + + /* Now create a new channel and submit a write I/O with it. This should also be queued. + * The new channel should inherit the active locks from the bdev's internal list. + */ + set_thread(2); + io_ch[2] = spdk_bdev_get_io_channel(desc); + bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]); + CU_ASSERT(io_ch[2] != NULL); + + g_io_done2 = false; + CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked)); + rc = spdk_bdev_write_blocks(desc, io_ch[2], buf, 22, 2, io_done2, &ctx2); + CU_ASSERT(rc == 0); + poll_threads(); + CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0); + CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[2]->io_locked)); + CU_ASSERT(g_io_done2 == false); + + set_thread(0); + rc = bdev_unlock_lba_range(desc, io_ch[0], 20, 10, unlock_lba_range_done, &ctx0); + CU_ASSERT(rc == 0); + poll_threads(); + CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->locked_ranges)); + + /* The LBA range is unlocked, so the write IOs should now have started execution. */ + CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked)); + CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked)); + + set_thread(1); + CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1); + stub_complete_io(io_target, 1); + set_thread(2); + CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1); + stub_complete_io(io_target, 1); + + poll_threads(); + CU_ASSERT(g_io_done == true); + CU_ASSERT(g_io_done2 == true); + + /* Tear down the channels */ + set_thread(0); + spdk_put_io_channel(io_ch[0]); + set_thread(1); + spdk_put_io_channel(io_ch[1]); + set_thread(2); + spdk_put_io_channel(io_ch[2]); + poll_threads(); + set_thread(0); + teardown_test(); +} + +int +main(int argc, char **argv) +{ + CU_pSuite suite = NULL; + unsigned int num_failures; + + CU_set_error_action(CUEA_ABORT); + CU_initialize_registry(); + + suite = CU_add_suite("bdev", NULL, NULL); + + CU_ADD_TEST(suite, basic); + CU_ADD_TEST(suite, unregister_and_close); + CU_ADD_TEST(suite, basic_qos); + CU_ADD_TEST(suite, put_channel_during_reset); + CU_ADD_TEST(suite, aborted_reset); + CU_ADD_TEST(suite, io_during_reset); + CU_ADD_TEST(suite, io_during_qos_queue); + CU_ADD_TEST(suite, io_during_qos_reset); + CU_ADD_TEST(suite, enomem); + CU_ADD_TEST(suite, enomem_multi_bdev); + CU_ADD_TEST(suite, enomem_multi_io_target); + CU_ADD_TEST(suite, qos_dynamic_enable); + CU_ADD_TEST(suite, bdev_histograms_mt); + CU_ADD_TEST(suite, bdev_set_io_timeout_mt); + CU_ADD_TEST(suite, lock_lba_range_then_submit_io); + + CU_basic_set_mode(CU_BRM_VERBOSE); + CU_basic_run_tests(); + num_failures = CU_get_number_of_failures(); + CU_cleanup_registry(); + return num_failures; +} diff --git a/src/spdk/test/unit/lib/bdev/part.c/.gitignore b/src/spdk/test/unit/lib/bdev/part.c/.gitignore new file mode 100644 index 000000000..c8302779b --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/part.c/.gitignore @@ -0,0 +1 @@ +part_ut diff --git a/src/spdk/test/unit/lib/bdev/part.c/Makefile b/src/spdk/test/unit/lib/bdev/part.c/Makefile new file mode 100644 index 000000000..9b9637dbb --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/part.c/Makefile @@ -0,0 +1,38 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..) + +TEST_FILE = part_ut.c + +include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk diff --git a/src/spdk/test/unit/lib/bdev/part.c/part_ut.c b/src/spdk/test/unit/lib/bdev/part.c/part_ut.c new file mode 100644 index 000000000..8bab15f48 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/part.c/part_ut.c @@ -0,0 +1,173 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk_cunit.h" + +#include "common/lib/ut_multithread.c" +#include "unit/lib/json_mock.c" + +#include "spdk/config.h" +/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ +#undef SPDK_CONFIG_VTUNE + +#include "spdk_internal/thread.h" + +#include "bdev/bdev.c" +#include "bdev/part.c" + +DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp, + const char *name), NULL); +DEFINE_STUB(spdk_conf_section_get_nmval, char *, + (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL); +DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1); + +struct spdk_trace_histories *g_trace_histories; +DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); +DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix)); +DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); +DEFINE_STUB_V(spdk_trace_register_description, (const char *name, + uint16_t tpoint_id, uint8_t owner_type, + uint8_t object_type, uint8_t new_object, + uint8_t arg1_type, const char *arg1_name)); +DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, + uint32_t size, uint64_t object_id, uint64_t arg1)); +DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); +DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); + +static void +_part_cleanup(struct spdk_bdev_part *part) +{ + free(part->internal.bdev.name); + free(part->internal.bdev.product_name); +} + +void +spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, + int *sc, int *sk, int *asc, int *ascq) +{ +} + +struct spdk_bdev_module bdev_ut_if = { + .name = "bdev_ut", +}; + +static void vbdev_ut_examine(struct spdk_bdev *bdev); + +struct spdk_bdev_module vbdev_ut_if = { + .name = "vbdev_ut", + .examine_config = vbdev_ut_examine, +}; + +SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) +SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) + +static void +vbdev_ut_examine(struct spdk_bdev *bdev) +{ + spdk_bdev_module_examine_done(&vbdev_ut_if); +} + +static int +__destruct(void *ctx) +{ + return 0; +} + +static struct spdk_bdev_fn_table base_fn_table = { + .destruct = __destruct, +}; +static struct spdk_bdev_fn_table part_fn_table = { + .destruct = __destruct, +}; + +static void +part_test(void) +{ + struct spdk_bdev_part_base *base; + struct spdk_bdev_part part1 = {}; + struct spdk_bdev_part part2 = {}; + struct spdk_bdev bdev_base = {}; + SPDK_BDEV_PART_TAILQ tailq = TAILQ_HEAD_INITIALIZER(tailq); + int rc; + + bdev_base.name = "base"; + bdev_base.fn_table = &base_fn_table; + bdev_base.module = &bdev_ut_if; + rc = spdk_bdev_register(&bdev_base); + CU_ASSERT(rc == 0); + base = spdk_bdev_part_base_construct(&bdev_base, NULL, &vbdev_ut_if, + &part_fn_table, &tailq, NULL, + NULL, 0, NULL, NULL); + + SPDK_CU_ASSERT_FATAL(base != NULL); + + rc = spdk_bdev_part_construct(&part1, base, "test1", 0, 100, "test"); + SPDK_CU_ASSERT_FATAL(rc == 0); + rc = spdk_bdev_part_construct(&part2, base, "test2", 100, 100, "test"); + SPDK_CU_ASSERT_FATAL(rc == 0); + + spdk_bdev_part_base_hotremove(base, &tailq); + + spdk_bdev_part_base_free(base); + _part_cleanup(&part1); + _part_cleanup(&part2); + spdk_bdev_unregister(&bdev_base, NULL, NULL); + + poll_threads(); +} + +int +main(int argc, char **argv) +{ + CU_pSuite suite = NULL; + unsigned int num_failures; + + CU_set_error_action(CUEA_ABORT); + CU_initialize_registry(); + + suite = CU_add_suite("bdev_part", NULL, NULL); + + CU_ADD_TEST(suite, part_test); + + allocate_threads(1); + set_thread(0); + + CU_basic_set_mode(CU_BRM_VERBOSE); + CU_basic_run_tests(); + num_failures = CU_get_number_of_failures(); + CU_cleanup_registry(); + + free_threads(); + + return num_failures; +} diff --git a/src/spdk/test/unit/lib/bdev/pmem/.gitignore b/src/spdk/test/unit/lib/bdev/pmem/.gitignore new file mode 100644 index 000000000..b2e0df1eb --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/pmem/.gitignore @@ -0,0 +1 @@ +bdev_pmem_ut diff --git a/src/spdk/test/unit/lib/bdev/pmem/Makefile b/src/spdk/test/unit/lib/bdev/pmem/Makefile new file mode 100644 index 000000000..cb601f1e0 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/pmem/Makefile @@ -0,0 +1,38 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..) + +TEST_FILE = bdev_pmem_ut.c + +include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk diff --git a/src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c b/src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c new file mode 100644 index 000000000..8cd51e9f7 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c @@ -0,0 +1,772 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk_cunit.h" + +#include "common/lib/ut_multithread.c" +#include "unit/lib/json_mock.c" + +#include "spdk_internal/thread.h" + +#include "bdev/pmem/bdev_pmem.c" + +DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, + (struct spdk_conf *cp, const char *name), NULL); +DEFINE_STUB(spdk_conf_section_get_nval, char *, + (struct spdk_conf_section *sp, const char *key, int idx), NULL); +DEFINE_STUB(spdk_conf_section_get_nmval, char *, + (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL); + +static struct spdk_bdev_module *g_bdev_pmem_module; +static int g_bdev_module_cnt; + +struct pmemblk { + const char *name; + bool is_open; + bool is_consistent; + size_t bsize; + long long nblock; + + uint8_t *buffer; +}; + +static const char *g_bdev_name = "pmem0"; + +/* PMEMblkpool is a typedef of struct pmemblk */ +static PMEMblkpool g_pool_ok = { + .name = "/pools/ok_pool", + .is_open = false, + .is_consistent = true, + .bsize = 4096, + .nblock = 150 +}; + +static PMEMblkpool g_pool_nblock_0 = { + .name = "/pools/nblock_0", + .is_open = false, + .is_consistent = true, + .bsize = 4096, + .nblock = 0 +}; + +static PMEMblkpool g_pool_bsize_0 = { + .name = "/pools/nblock_0", + .is_open = false, + .is_consistent = true, + .bsize = 0, + .nblock = 100 +}; + +static PMEMblkpool g_pool_inconsistent = { + .name = "/pools/inconsistent", + .is_open = false, + .is_consistent = false, + .bsize = 512, + .nblock = 1 +}; + +static int g_opened_pools; +static struct spdk_bdev *g_bdev; +static const char *g_check_version_msg; +static bool g_pmemblk_open_allow_open = true; + +static PMEMblkpool * +find_pmemblk_pool(const char *path) +{ + if (path == NULL) { + errno = EINVAL; + return NULL; + } else if (strcmp(g_pool_ok.name, path) == 0) { + return &g_pool_ok; + } else if (strcmp(g_pool_nblock_0.name, path) == 0) { + return &g_pool_nblock_0; + } else if (strcmp(g_pool_bsize_0.name, path) == 0) { + return &g_pool_bsize_0; + } else if (strcmp(g_pool_inconsistent.name, path) == 0) { + return &g_pool_inconsistent; + } + + errno = ENOENT; + return NULL; +} + +PMEMblkpool * +pmemblk_open(const char *path, size_t bsize) +{ + PMEMblkpool *pool; + + if (!g_pmemblk_open_allow_open) { + errno = EIO; + return NULL; + } + + pool = find_pmemblk_pool(path); + if (!pool) { + errno = ENOENT; + return NULL; + } + + CU_ASSERT_TRUE_FATAL(pool->is_consistent); + CU_ASSERT_FALSE(pool->is_open); + if (pool->is_open == false) { + pool->is_open = true; + g_opened_pools++; + } else { + errno = EBUSY; + pool = NULL; + } + + return pool; +} +void +spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) +{ + cb(NULL, bdev_io, true); +} + +static void +check_open_pool_fatal(PMEMblkpool *pool) +{ + SPDK_CU_ASSERT_FATAL(pool != NULL); + SPDK_CU_ASSERT_FATAL(find_pmemblk_pool(pool->name) == pool); + SPDK_CU_ASSERT_FATAL(pool->is_open == true); +} + +void +pmemblk_close(PMEMblkpool *pool) +{ + check_open_pool_fatal(pool); + pool->is_open = false; + CU_ASSERT(g_opened_pools > 0); + g_opened_pools--; +} + +size_t +pmemblk_bsize(PMEMblkpool *pool) +{ + check_open_pool_fatal(pool); + return pool->bsize; +} + +size_t +pmemblk_nblock(PMEMblkpool *pool) +{ + check_open_pool_fatal(pool); + return pool->nblock; +} + +int +pmemblk_read(PMEMblkpool *pool, void *buf, long long blockno) +{ + check_open_pool_fatal(pool); + if (blockno >= pool->nblock) { + errno = EINVAL; + return -1; + } + + memcpy(buf, &pool->buffer[blockno * pool->bsize], pool->bsize); + return 0; +} + +int +pmemblk_write(PMEMblkpool *pool, const void *buf, long long blockno) +{ + check_open_pool_fatal(pool); + if (blockno >= pool->nblock) { + errno = EINVAL; + return -1; + } + + memcpy(&pool->buffer[blockno * pool->bsize], buf, pool->bsize); + return 0; +} + +int +pmemblk_set_zero(PMEMblkpool *pool, long long blockno) +{ + check_open_pool_fatal(pool); + if (blockno >= pool->nblock) { + + errno = EINVAL; + return -1; + } + + memset(&pool->buffer[blockno * pool->bsize], 0, pool->bsize); + return 0; +} + +const char * +pmemblk_errormsg(void) +{ + return strerror(errno); +} + +const char * +pmemblk_check_version(unsigned major_required, unsigned minor_required) +{ + return g_check_version_msg; +} + +int +pmemblk_check(const char *path, size_t bsize) +{ + PMEMblkpool *pool = find_pmemblk_pool(path); + + if (!pool) { + errno = ENOENT; + return -1; + } + + if (!pool->is_consistent) { + /* errno ? */ + return 0; + } + + if (bsize != 0 && pool->bsize != bsize) { + /* errno ? */ + return 0; + } + + return 1; +} + +void +spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) +{ + bdev_io->internal.status = status; +} + +int +spdk_bdev_register(struct spdk_bdev *bdev) +{ + CU_ASSERT_PTR_NULL(g_bdev); + g_bdev = bdev; + + return 0; +} + +void +spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) +{ +} + +void +spdk_bdev_module_finish_done(void) +{ +} + +int +spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) +{ + bdev->blockcnt = size; + return 0; +} + +static void +ut_bdev_pmem_destruct(struct spdk_bdev *bdev) +{ + SPDK_CU_ASSERT_FATAL(g_bdev != NULL); + CU_ASSERT_EQUAL(bdev_pmem_destruct(bdev->ctxt), 0); + g_bdev = NULL; +} + +void +spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module) +{ + g_bdev_pmem_module = bdev_module; + g_bdev_module_cnt++; +} + +static int +bdev_submit_request(struct spdk_bdev *bdev, int16_t io_type, uint64_t offset_blocks, + uint64_t num_blocks, struct iovec *iovs, size_t iov_cnt) +{ + struct spdk_bdev_io bio = { 0 }; + + switch (io_type) { + case SPDK_BDEV_IO_TYPE_READ: + bio.u.bdev.iovs = iovs; + bio.u.bdev.iovcnt = iov_cnt; + bio.u.bdev.offset_blocks = offset_blocks; + bio.u.bdev.num_blocks = num_blocks; + break; + case SPDK_BDEV_IO_TYPE_WRITE: + bio.u.bdev.iovs = iovs; + bio.u.bdev.iovcnt = iov_cnt; + bio.u.bdev.offset_blocks = offset_blocks; + bio.u.bdev.num_blocks = num_blocks; + break; + case SPDK_BDEV_IO_TYPE_FLUSH: + bio.u.bdev.offset_blocks = offset_blocks; + bio.u.bdev.num_blocks = num_blocks; + break; + case SPDK_BDEV_IO_TYPE_RESET: + break; + case SPDK_BDEV_IO_TYPE_UNMAP: + bio.u.bdev.offset_blocks = offset_blocks; + bio.u.bdev.num_blocks = num_blocks; + break; + case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: + bio.u.bdev.offset_blocks = offset_blocks; + bio.u.bdev.num_blocks = num_blocks; + break; + default: + CU_FAIL_FATAL("BUG:Unexpected IO type"); + break; + } + + /* + * Set status to value that shouldn't be returned + */ + bio.type = io_type; + bio.internal.status = SPDK_BDEV_IO_STATUS_PENDING; + bio.bdev = bdev; + bdev_pmem_submit_request(NULL, &bio); + return bio.internal.status; +} + + +static int +ut_pmem_blk_clean(void) +{ + free(g_pool_ok.buffer); + g_pool_ok.buffer = NULL; + + /* Unload module to free IO channel */ + g_bdev_pmem_module->module_fini(); + poll_threads(); + + free_threads(); + + return 0; +} + +static int +ut_pmem_blk_init(void) +{ + errno = 0; + + allocate_threads(1); + set_thread(0); + + g_pool_ok.buffer = calloc(g_pool_ok.nblock, g_pool_ok.bsize); + if (g_pool_ok.buffer == NULL) { + ut_pmem_blk_clean(); + return -1; + } + + return 0; +} + +static void +ut_pmem_init(void) +{ + SPDK_CU_ASSERT_FATAL(g_bdev_pmem_module != NULL); + CU_ASSERT_EQUAL(g_bdev_module_cnt, 1); + + /* Make pmemblk_check_version fail with provided error message */ + g_check_version_msg = "TEST FAIL MESSAGE"; + CU_ASSERT_NOT_EQUAL(g_bdev_pmem_module->module_init(), 0); + + /* This init must success */ + g_check_version_msg = NULL; + CU_ASSERT_EQUAL(g_bdev_pmem_module->module_init(), 0); +} + +static void +ut_pmem_open_close(void) +{ + struct spdk_bdev *bdev = NULL; + int pools_cnt; + int rc; + + pools_cnt = g_opened_pools; + + /* Try opening with NULL name */ + rc = create_pmem_disk(NULL, NULL, &bdev); + CU_ASSERT_PTR_NULL(bdev); + CU_ASSERT_EQUAL(pools_cnt, g_opened_pools); + CU_ASSERT_NOT_EQUAL(rc, 0); + + /* Open non-existent pool */ + rc = create_pmem_disk("non existent pool", NULL, &bdev); + CU_ASSERT_PTR_NULL(bdev); + CU_ASSERT_EQUAL(pools_cnt, g_opened_pools); + CU_ASSERT_NOT_EQUAL(rc, 0); + + /* Open inconsistent pool */ + rc = create_pmem_disk(g_pool_inconsistent.name, NULL, &bdev); + CU_ASSERT_PTR_NULL(bdev); + CU_ASSERT_EQUAL(pools_cnt, g_opened_pools); + CU_ASSERT_NOT_EQUAL(rc, 0); + + /* Open consistent pool fail the open from unknown reason. */ + g_pmemblk_open_allow_open = false; + rc = create_pmem_disk(g_pool_inconsistent.name, NULL, &bdev); + g_pmemblk_open_allow_open = true; + CU_ASSERT_PTR_NULL(bdev); + CU_ASSERT_EQUAL(pools_cnt, g_opened_pools); + CU_ASSERT_NOT_EQUAL(rc, 0); + + /* Open pool with nblocks = 0 */ + rc = create_pmem_disk(g_pool_nblock_0.name, NULL, &bdev); + CU_ASSERT_PTR_NULL(bdev); + CU_ASSERT_EQUAL(pools_cnt, g_opened_pools); + CU_ASSERT_NOT_EQUAL(rc, 0); + + /* Open pool with bsize = 0 */ + rc = create_pmem_disk(g_pool_bsize_0.name, NULL, &bdev); + CU_ASSERT_PTR_NULL(bdev); + CU_ASSERT_EQUAL(pools_cnt, g_opened_pools); + CU_ASSERT_NOT_EQUAL(rc, 0); + + /* Open pool with NULL name */ + rc = create_pmem_disk(g_pool_ok.name, NULL, &bdev); + CU_ASSERT_PTR_NULL(bdev); + CU_ASSERT_EQUAL(pools_cnt, g_opened_pools); + CU_ASSERT_NOT_EQUAL(rc, 0); + + /* Open good pool */ + rc = create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev); + SPDK_CU_ASSERT_FATAL(bdev != NULL); + CU_ASSERT_TRUE(g_pool_ok.is_open); + CU_ASSERT_EQUAL(pools_cnt + 1, g_opened_pools); + CU_ASSERT_EQUAL(rc, 0); + + /* Now remove this bdev */ + ut_bdev_pmem_destruct(bdev); + CU_ASSERT_FALSE(g_pool_ok.is_open); + CU_ASSERT_EQUAL(pools_cnt, g_opened_pools); +} + +static void +ut_pmem_write_read(void) +{ + uint8_t *write_buf, *read_buf; + struct spdk_bdev *bdev; + int rc; + size_t unaligned_aligned_size = 100; + size_t buf_size = g_pool_ok.bsize * g_pool_ok.nblock; + size_t i; + const uint64_t nblock_offset = 10; + uint64_t offset; + size_t io_size, nblock, total_io_size, bsize; + + bsize = 4096; + struct iovec iov[] = { + { 0, 2 * bsize }, + { 0, 3 * bsize }, + { 0, 4 * bsize }, + }; + + rc = create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev); + CU_ASSERT_EQUAL(rc, 0); + + SPDK_CU_ASSERT_FATAL(g_pool_ok.nblock > 40); + + write_buf = calloc(1, buf_size); + read_buf = calloc(1, buf_size); + + SPDK_CU_ASSERT_FATAL(bdev != NULL); + SPDK_CU_ASSERT_FATAL(write_buf != NULL); + SPDK_CU_ASSERT_FATAL(read_buf != NULL); + + total_io_size = 0; + offset = nblock_offset * g_pool_ok.bsize; + for (i = 0; i < 3; i++) { + iov[i].iov_base = &write_buf[offset + total_io_size]; + total_io_size += iov[i].iov_len; + } + + for (i = 0; i < total_io_size + unaligned_aligned_size; i++) { + write_buf[offset + i] = 0x42 + i; + } + + SPDK_CU_ASSERT_FATAL(total_io_size < buf_size); + + /* + * Write outside pool. + */ + rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, g_pool_ok.nblock, 1, &iov[0], 2); + CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED); + + /* + * Write with insufficient IOV buffers length. + */ + rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, 0, g_pool_ok.nblock, &iov[0], 2); + CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED); + + /* + * Try to write two IOV with first one iov_len % bsize != 0. + */ + io_size = iov[0].iov_len + iov[1].iov_len; + nblock = io_size / g_pool_ok.bsize; + iov[0].iov_len += unaligned_aligned_size; + rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, 0, nblock, &iov[0], 2); + CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED); + iov[0].iov_len -= unaligned_aligned_size; + + /* + * Try to write one IOV. + */ + nblock = iov[0].iov_len / g_pool_ok.bsize; + rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, nblock_offset, nblock, &iov[0], 1); + CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS); + + /* + * Try to write 2 IOV. + * Sum of IOV length is larger than IO size and last IOV is larger and iov_len % bsize != 0 + */ + offset = iov[0].iov_len / g_pool_ok.bsize; + io_size = iov[1].iov_len + iov[2].iov_len; + nblock = io_size / g_pool_ok.bsize; + iov[2].iov_len += unaligned_aligned_size; + rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, nblock_offset + offset, nblock, + &iov[1], 2); + CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS); + iov[2].iov_len -= unaligned_aligned_size; + + /* + * Examine pool state: + * 1. Written area should have expected values. + * 2. Anything else should contain zeros. + */ + offset = nblock_offset * g_pool_ok.bsize + total_io_size; + rc = memcmp(&g_pool_ok.buffer[0], write_buf, offset); + CU_ASSERT_EQUAL(rc, 0); + + for (i = offset; i < buf_size; i++) { + if (g_pool_ok.buffer[i] != 0) { + CU_ASSERT_EQUAL(g_pool_ok.buffer[i], 0); + break; + } + } + + /* Setup IOV for reads */ + memset(read_buf, 0xAB, buf_size); + offset = nblock_offset * g_pool_ok.bsize; + for (i = 0; i < 3; i++) { + iov[i].iov_base = &read_buf[offset]; + offset += iov[i].iov_len; + } + + /* + * Write outside pool. + */ + rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, g_pool_ok.nblock, 1, &iov[0], 2); + CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED); + + /* + * Read with insufficient IOV buffers length. + */ + rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, 0, g_pool_ok.nblock, &iov[0], 2); + CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED); + + /* + * Try to read two IOV with first one iov_len % bsize != 0. + */ + io_size = iov[0].iov_len + iov[1].iov_len; + nblock = io_size / g_pool_ok.bsize; + iov[0].iov_len += unaligned_aligned_size; + rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, 0, nblock, &iov[0], 2); + CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED); + iov[0].iov_len -= unaligned_aligned_size; + + /* + * Try to write one IOV. + */ + nblock = iov[0].iov_len / g_pool_ok.bsize; + rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, nblock_offset, nblock, &iov[0], 1); + CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS); + + /* + * Try to read 2 IOV. + * Sum of IOV length is larger than IO size and last IOV is larger and iov_len % bsize != 0 + */ + offset = iov[0].iov_len / g_pool_ok.bsize; + io_size = iov[1].iov_len + iov[2].iov_len; + nblock = io_size / g_pool_ok.bsize; + iov[2].iov_len += unaligned_aligned_size; + rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, nblock_offset + offset, nblock, + &iov[1], 2); + CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS); + iov[2].iov_len -= unaligned_aligned_size; + + + /* + * Examine what we read state: + * 1. Written area should have expected values. + * 2. Anything else should contain zeros. + */ + offset = nblock_offset * g_pool_ok.bsize; + for (i = 0; i < offset; i++) { + if (read_buf[i] != 0xAB) { + CU_ASSERT_EQUAL(read_buf[i], 0xAB); + break; + } + } + + rc = memcmp(&read_buf[offset], &write_buf[offset], total_io_size); + CU_ASSERT_EQUAL(rc, 0); + + offset += total_io_size; + for (i = offset; i < buf_size; i++) { + if (read_buf[i] != 0xAB) { + CU_ASSERT_EQUAL(read_buf[i], 0xAB); + break; + } + } + + memset(g_pool_ok.buffer, 0, g_pool_ok.bsize * g_pool_ok.nblock); + free(write_buf); + free(read_buf); + + /* Now remove this bdev */ + ut_bdev_pmem_destruct(bdev); + CU_ASSERT_FALSE(g_pool_ok.is_open); + CU_ASSERT_EQUAL(g_opened_pools, 0); +} + +static void +ut_pmem_reset(void) +{ + struct spdk_bdev *bdev; + int rc; + + rc = create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev); + CU_ASSERT_EQUAL(rc, 0); + SPDK_CU_ASSERT_FATAL(bdev != NULL); + + rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_RESET, 0, 0, NULL, 0); + CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS); + + ut_bdev_pmem_destruct(bdev); +} + +static void +ut_pmem_unmap_write_zero(int16_t io_type) +{ + struct spdk_bdev *bdev; + size_t buff_size = g_pool_ok.nblock * g_pool_ok.bsize; + size_t i; + uint8_t *buffer; + int rc; + + CU_ASSERT(io_type == SPDK_BDEV_IO_TYPE_UNMAP || io_type == SPDK_BDEV_IO_TYPE_WRITE_ZEROES); + rc = create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev); + CU_ASSERT_EQUAL(rc, 0); + SPDK_CU_ASSERT_FATAL(bdev != NULL); + SPDK_CU_ASSERT_FATAL(g_pool_ok.nblock > 40); + + buffer = calloc(1, buff_size); + SPDK_CU_ASSERT_FATAL(buffer != NULL); + + for (i = 10 * g_pool_ok.bsize; i < 30 * g_pool_ok.bsize; i++) { + buffer[i] = 0x30 + io_type + i; + } + memcpy(g_pool_ok.buffer, buffer, buff_size); + + /* + * Block outside of pool. + */ + rc = bdev_submit_request(bdev, io_type, g_pool_ok.nblock, 1, NULL, 0); + CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED); + + rc = memcmp(buffer, g_pool_ok.buffer, buff_size); + CU_ASSERT_EQUAL(rc, 0); + + /* + * Blocks 15 to 25 + */ + memset(&buffer[15 * g_pool_ok.bsize], 0, 10 * g_pool_ok.bsize); + rc = bdev_submit_request(bdev, io_type, 15, 10, NULL, 0); + CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS); + + rc = memcmp(buffer, g_pool_ok.buffer, buff_size); + CU_ASSERT_EQUAL(rc, 0); + + /* + * All blocks. + */ + memset(buffer, 0, buff_size); + rc = bdev_submit_request(bdev, io_type, 0, g_pool_ok.nblock, NULL, 0); + CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS); + + rc = memcmp(buffer, g_pool_ok.buffer, buff_size); + CU_ASSERT_EQUAL(rc, 0); + + /* Now remove this bdev */ + ut_bdev_pmem_destruct(bdev); + CU_ASSERT_FALSE(g_pool_ok.is_open); + CU_ASSERT_EQUAL(g_opened_pools, 0); + + free(buffer); +} + +static void +ut_pmem_write_zero(void) +{ + ut_pmem_unmap_write_zero(SPDK_BDEV_IO_TYPE_WRITE_ZEROES); +} + +static void +ut_pmem_unmap(void) +{ + ut_pmem_unmap_write_zero(SPDK_BDEV_IO_TYPE_UNMAP); +} + +int +main(int argc, char **argv) +{ + CU_pSuite suite = NULL; + unsigned int num_failures; + + CU_set_error_action(CUEA_ABORT); + CU_initialize_registry(); + + suite = CU_add_suite("bdev_pmem", ut_pmem_blk_init, ut_pmem_blk_clean); + + CU_ADD_TEST(suite, ut_pmem_init); + CU_ADD_TEST(suite, ut_pmem_open_close); + CU_ADD_TEST(suite, ut_pmem_write_read); + CU_ADD_TEST(suite, ut_pmem_reset); + CU_ADD_TEST(suite, ut_pmem_write_zero); + CU_ADD_TEST(suite, ut_pmem_unmap); + + CU_basic_set_mode(CU_BRM_VERBOSE); + CU_basic_run_tests(); + num_failures = CU_get_number_of_failures(); + CU_cleanup_registry(); + + return num_failures; +} diff --git a/src/spdk/test/unit/lib/bdev/raid/Makefile b/src/spdk/test/unit/lib/bdev/raid/Makefile new file mode 100644 index 000000000..0090a85ce --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/raid/Makefile @@ -0,0 +1,46 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..) +include $(SPDK_ROOT_DIR)/mk/spdk.common.mk + +DIRS-y = bdev_raid.c + +DIRS-$(CONFIG_RAID5) += raid5.c + +.PHONY: all clean $(DIRS-y) + +all: $(DIRS-y) +clean: $(DIRS-y) + +include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk diff --git a/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/.gitignore b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/.gitignore new file mode 100644 index 000000000..98d1a166e --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/.gitignore @@ -0,0 +1 @@ +bdev_raid_ut diff --git a/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/Makefile b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/Makefile new file mode 100644 index 000000000..da0ab94ba --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/Makefile @@ -0,0 +1,38 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../..) + +TEST_FILE = bdev_raid_ut.c + +include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk diff --git a/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c new file mode 100644 index 000000000..6cf8e9f69 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c @@ -0,0 +1,2258 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk/stdinc.h" +#include "spdk_cunit.h" +#include "spdk/env.h" +#include "spdk_internal/mock.h" +#include "bdev/raid/bdev_raid.c" +#include "bdev/raid/bdev_raid_rpc.c" +#include "bdev/raid/raid0.c" +#include "common/lib/ut_multithread.c" + +#define MAX_BASE_DRIVES 32 +#define MAX_RAIDS 2 +#define INVALID_IO_SUBMIT 0xFFFF +#define MAX_TEST_IO_RANGE (3 * 3 * 3 * (MAX_BASE_DRIVES + 5)) +#define BLOCK_CNT (1024ul * 1024ul * 1024ul * 1024ul) + +struct spdk_bdev_channel { + struct spdk_io_channel *channel; +}; + +/* Data structure to capture the output of IO for verification */ +struct io_output { + struct spdk_bdev_desc *desc; + struct spdk_io_channel *ch; + uint64_t offset_blocks; + uint64_t num_blocks; + spdk_bdev_io_completion_cb cb; + void *cb_arg; + enum spdk_bdev_io_type iotype; +}; + +struct raid_io_ranges { + uint64_t lba; + uint64_t nblocks; +}; + +/* Globals */ +int g_bdev_io_submit_status; +struct io_output *g_io_output = NULL; +uint32_t g_io_output_index; +uint32_t g_io_comp_status; +bool g_child_io_status_flag; +void *g_rpc_req; +uint32_t g_rpc_req_size; +TAILQ_HEAD(bdev, spdk_bdev); +struct bdev g_bdev_list; +TAILQ_HEAD(waitq, spdk_bdev_io_wait_entry); +struct waitq g_io_waitq; +uint32_t g_block_len; +uint32_t g_strip_size; +uint32_t g_max_io_size; +uint8_t g_max_base_drives; +uint8_t g_max_raids; +uint8_t g_ignore_io_output; +uint8_t g_rpc_err; +char *g_get_raids_output[MAX_RAIDS]; +uint32_t g_get_raids_count; +uint8_t g_json_decode_obj_err; +uint8_t g_json_decode_obj_create; +uint8_t g_config_level_create = 0; +uint8_t g_test_multi_raids; +struct raid_io_ranges g_io_ranges[MAX_TEST_IO_RANGE]; +uint32_t g_io_range_idx; +uint64_t g_lba_offset; +struct spdk_io_channel g_io_channel; + +DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module)); +DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); +DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *bdev), 0); +DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev, + enum spdk_bdev_io_type io_type), true); +DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); +DEFINE_STUB(spdk_bdev_flush_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb, + void *cb_arg), 0); +DEFINE_STUB(spdk_conf_next_section, struct spdk_conf_section *, (struct spdk_conf_section *sp), + NULL); +DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func, + uint32_t state_mask)); +DEFINE_STUB_V(spdk_rpc_register_alias_deprecated, (const char *method, const char *alias)); +DEFINE_STUB_V(spdk_jsonrpc_end_result, (struct spdk_jsonrpc_request *request, + struct spdk_json_write_ctx *w)); +DEFINE_STUB(spdk_json_decode_string, int, (const struct spdk_json_val *val, void *out), 0); +DEFINE_STUB(spdk_json_decode_uint32, int, (const struct spdk_json_val *val, void *out), 0); +DEFINE_STUB(spdk_json_decode_array, int, (const struct spdk_json_val *values, + spdk_json_decode_fn decode_func, + void *out, size_t max_size, size_t *out_size, size_t stride), 0); +DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0); +DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0); +DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w, + const char *name), 0); +DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0); +DEFINE_STUB(spdk_json_write_array_begin, int, (struct spdk_json_write_ctx *w), 0); +DEFINE_STUB(spdk_json_write_array_end, int, (struct spdk_json_write_ctx *w), 0); +DEFINE_STUB(spdk_json_write_named_array_begin, int, (struct spdk_json_write_ctx *w, + const char *name), 0); +DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0); +DEFINE_STUB(spdk_json_write_null, int, (struct spdk_json_write_ctx *w), 0); +DEFINE_STUB(spdk_strerror, const char *, (int errnum), NULL); +DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch, + struct spdk_bdev_io_wait_entry *entry), 0); + +struct spdk_io_channel * +spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc) +{ + g_io_channel.thread = spdk_get_thread(); + + return &g_io_channel; +} + +static void +set_test_opts(void) +{ + + g_max_base_drives = MAX_BASE_DRIVES; + g_max_raids = MAX_RAIDS; + g_block_len = 4096; + g_strip_size = 64; + g_max_io_size = 1024; + + printf("Test Options\n"); + printf("blocklen = %u, strip_size = %u, max_io_size = %u, g_max_base_drives = %u, " + "g_max_raids = %u\n", + g_block_len, g_strip_size, g_max_io_size, g_max_base_drives, g_max_raids); +} + +/* Set globals before every test run */ +static void +set_globals(void) +{ + uint32_t max_splits; + + g_bdev_io_submit_status = 0; + if (g_max_io_size < g_strip_size) { + max_splits = 2; + } else { + max_splits = (g_max_io_size / g_strip_size) + 1; + } + if (max_splits < g_max_base_drives) { + max_splits = g_max_base_drives; + } + + g_io_output = calloc(max_splits, sizeof(struct io_output)); + SPDK_CU_ASSERT_FATAL(g_io_output != NULL); + g_io_output_index = 0; + memset(g_get_raids_output, 0, sizeof(g_get_raids_output)); + g_get_raids_count = 0; + g_io_comp_status = 0; + g_ignore_io_output = 0; + g_config_level_create = 0; + g_rpc_err = 0; + g_test_multi_raids = 0; + g_child_io_status_flag = true; + TAILQ_INIT(&g_bdev_list); + TAILQ_INIT(&g_io_waitq); + g_rpc_req = NULL; + g_rpc_req_size = 0; + g_json_decode_obj_err = 0; + g_json_decode_obj_create = 0; + g_lba_offset = 0; +} + +static void +base_bdevs_cleanup(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev *bdev_next; + + if (!TAILQ_EMPTY(&g_bdev_list)) { + TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) { + free(bdev->name); + TAILQ_REMOVE(&g_bdev_list, bdev, internal.link); + free(bdev); + } + } +} + +static void +check_and_remove_raid_bdev(struct raid_bdev_config *raid_cfg) +{ + struct raid_bdev *raid_bdev; + struct raid_base_bdev_info *base_info; + + /* Get the raid structured allocated if exists */ + raid_bdev = raid_cfg->raid_bdev; + if (raid_bdev == NULL) { + return; + } + + assert(raid_bdev->base_bdev_info != NULL); + + RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) { + if (base_info->bdev) { + raid_bdev_free_base_bdev_resource(raid_bdev, base_info); + } + } + assert(raid_bdev->num_base_bdevs_discovered == 0); + raid_bdev_cleanup(raid_bdev); +} + +/* Reset globals */ +static void +reset_globals(void) +{ + if (g_io_output) { + free(g_io_output); + g_io_output = NULL; + } + g_rpc_req = NULL; + g_rpc_req_size = 0; +} + +void +spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, + uint64_t len) +{ + cb(bdev_io->internal.ch->channel, bdev_io, true); +} + +/* Store the IO completion status in global variable to verify by various tests */ +void +spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) +{ + g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false); +} + +static void +set_io_output(struct io_output *output, + struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + uint64_t offset_blocks, uint64_t num_blocks, + spdk_bdev_io_completion_cb cb, void *cb_arg, + enum spdk_bdev_io_type iotype) +{ + output->desc = desc; + output->ch = ch; + output->offset_blocks = offset_blocks; + output->num_blocks = num_blocks; + output->cb = cb; + output->cb_arg = cb_arg; + output->iotype = iotype; +} + +/* It will cache the split IOs for verification */ +int +spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + struct iovec *iov, int iovcnt, + uint64_t offset_blocks, uint64_t num_blocks, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + struct io_output *output = &g_io_output[g_io_output_index]; + struct spdk_bdev_io *child_io; + + if (g_ignore_io_output) { + return 0; + } + + if (g_max_io_size < g_strip_size) { + SPDK_CU_ASSERT_FATAL(g_io_output_index < 2); + } else { + SPDK_CU_ASSERT_FATAL(g_io_output_index < (g_max_io_size / g_strip_size) + 1); + } + if (g_bdev_io_submit_status == 0) { + set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg, + SPDK_BDEV_IO_TYPE_WRITE); + g_io_output_index++; + + child_io = calloc(1, sizeof(struct spdk_bdev_io)); + SPDK_CU_ASSERT_FATAL(child_io != NULL); + cb(child_io, g_child_io_status_flag, cb_arg); + } + + return g_bdev_io_submit_status; +} + +int +spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + struct io_output *output = &g_io_output[g_io_output_index]; + struct spdk_bdev_io *child_io; + + if (g_ignore_io_output) { + return 0; + } + + if (g_bdev_io_submit_status == 0) { + set_io_output(output, desc, ch, 0, 0, cb, cb_arg, SPDK_BDEV_IO_TYPE_RESET); + g_io_output_index++; + + child_io = calloc(1, sizeof(struct spdk_bdev_io)); + SPDK_CU_ASSERT_FATAL(child_io != NULL); + cb(child_io, g_child_io_status_flag, cb_arg); + } + + return g_bdev_io_submit_status; +} + +int +spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + uint64_t offset_blocks, uint64_t num_blocks, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + struct io_output *output = &g_io_output[g_io_output_index]; + struct spdk_bdev_io *child_io; + + if (g_ignore_io_output) { + return 0; + } + + if (g_bdev_io_submit_status == 0) { + set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg, + SPDK_BDEV_IO_TYPE_UNMAP); + g_io_output_index++; + + child_io = calloc(1, sizeof(struct spdk_bdev_io)); + SPDK_CU_ASSERT_FATAL(child_io != NULL); + cb(child_io, g_child_io_status_flag, cb_arg); + } + + return g_bdev_io_submit_status; +} + +void +spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) +{ + bdev->fn_table->destruct(bdev->ctxt); + + if (cb_fn) { + cb_fn(cb_arg, 0); + } +} + +int +spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb, + void *remove_ctx, struct spdk_bdev_desc **_desc) +{ + *_desc = (void *)0x1; + return 0; +} + +char * +spdk_sprintf_alloc(const char *format, ...) +{ + return strdup(format); +} + +int spdk_json_write_named_uint32(struct spdk_json_write_ctx *w, const char *name, uint32_t val) +{ + struct rpc_bdev_raid_create *req = g_rpc_req; + if (strcmp(name, "strip_size_kb") == 0) { + CU_ASSERT(req->strip_size_kb == val); + } else if (strcmp(name, "blocklen_shift") == 0) { + CU_ASSERT(spdk_u32log2(g_block_len) == val); + } else if (strcmp(name, "num_base_bdevs") == 0) { + CU_ASSERT(req->base_bdevs.num_base_bdevs == val); + } else if (strcmp(name, "state") == 0) { + CU_ASSERT(val == RAID_BDEV_STATE_ONLINE); + } else if (strcmp(name, "destruct_called") == 0) { + CU_ASSERT(val == 0); + } else if (strcmp(name, "num_base_bdevs_discovered") == 0) { + CU_ASSERT(req->base_bdevs.num_base_bdevs == val); + } + return 0; +} + +int spdk_json_write_named_string(struct spdk_json_write_ctx *w, const char *name, const char *val) +{ + struct rpc_bdev_raid_create *req = g_rpc_req; + if (strcmp(name, "raid_level") == 0) { + CU_ASSERT(strcmp(val, raid_bdev_level_to_str(req->level)) == 0); + } + return 0; +} + +void +spdk_bdev_free_io(struct spdk_bdev_io *bdev_io) +{ + if (bdev_io) { + free(bdev_io); + } +} + +/* It will cache split IOs for verification */ +int +spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + struct iovec *iov, int iovcnt, + uint64_t offset_blocks, uint64_t num_blocks, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + struct io_output *output = &g_io_output[g_io_output_index]; + struct spdk_bdev_io *child_io; + + if (g_ignore_io_output) { + return 0; + } + + SPDK_CU_ASSERT_FATAL(g_io_output_index <= (g_max_io_size / g_strip_size) + 1); + if (g_bdev_io_submit_status == 0) { + set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg, + SPDK_BDEV_IO_TYPE_READ); + g_io_output_index++; + + child_io = calloc(1, sizeof(struct spdk_bdev_io)); + SPDK_CU_ASSERT_FATAL(child_io != NULL); + cb(child_io, g_child_io_status_flag, cb_arg); + } + + return g_bdev_io_submit_status; +} + +void +spdk_bdev_module_release_bdev(struct spdk_bdev *bdev) +{ + CU_ASSERT(bdev->internal.claim_module != NULL); + bdev->internal.claim_module = NULL; +} + +struct spdk_conf_section * +spdk_conf_first_section(struct spdk_conf *cp) +{ + if (g_config_level_create) { + return (void *) 0x1; + } + + return NULL; +} + +bool +spdk_conf_section_match_prefix(const struct spdk_conf_section *sp, const char *name_prefix) +{ + if (g_config_level_create) { + return true; + } + + return false; +} + +char * +spdk_conf_section_get_val(struct spdk_conf_section *sp, const char *key) +{ + struct rpc_bdev_raid_create *req = g_rpc_req; + + if (g_config_level_create) { + if (strcmp(key, "Name") == 0) { + return req->name; + } else if (strcmp(key, "RaidLevel") == 0) { + return (char *)raid_bdev_level_to_str(req->level); + } + } + + return NULL; +} + +int +spdk_conf_section_get_intval(struct spdk_conf_section *sp, const char *key) +{ + struct rpc_bdev_raid_create *req = g_rpc_req; + + if (g_config_level_create) { + if (strcmp(key, "StripSize") == 0) { + return req->strip_size_kb; + } else if (strcmp(key, "NumDevices") == 0) { + return req->base_bdevs.num_base_bdevs; + } + } + + return 0; +} + +char * +spdk_conf_section_get_nmval(struct spdk_conf_section *sp, const char *key, int idx1, int idx2) +{ + struct rpc_bdev_raid_create *req = g_rpc_req; + + if (g_config_level_create) { + if (strcmp(key, "Devices") == 0) { + if (idx2 >= g_max_base_drives) { + return NULL; + } + return req->base_bdevs.base_bdevs[idx2]; + } + } + + return NULL; +} + +int +spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, + struct spdk_bdev_module *module) +{ + if (bdev->internal.claim_module != NULL) { + return -1; + } + bdev->internal.claim_module = module; + return 0; +} + +int +spdk_json_decode_object(const struct spdk_json_val *values, + const struct spdk_json_object_decoder *decoders, size_t num_decoders, + void *out) +{ + struct rpc_bdev_raid_create *req, *_out; + size_t i; + + if (g_json_decode_obj_err) { + return -1; + } else if (g_json_decode_obj_create) { + req = g_rpc_req; + _out = out; + + _out->name = strdup(req->name); + SPDK_CU_ASSERT_FATAL(_out->name != NULL); + _out->strip_size_kb = req->strip_size_kb; + _out->level = req->level; + _out->base_bdevs.num_base_bdevs = req->base_bdevs.num_base_bdevs; + for (i = 0; i < req->base_bdevs.num_base_bdevs; i++) { + _out->base_bdevs.base_bdevs[i] = strdup(req->base_bdevs.base_bdevs[i]); + SPDK_CU_ASSERT_FATAL(_out->base_bdevs.base_bdevs[i]); + } + } else { + memcpy(out, g_rpc_req, g_rpc_req_size); + } + + return 0; +} + +struct spdk_json_write_ctx * +spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request) +{ + return (void *)1; +} + +int +spdk_json_write_string(struct spdk_json_write_ctx *w, const char *val) +{ + if (g_test_multi_raids) { + g_get_raids_output[g_get_raids_count] = strdup(val); + SPDK_CU_ASSERT_FATAL(g_get_raids_output[g_get_raids_count] != NULL); + g_get_raids_count++; + } + + return 0; +} + +void +spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request, + int error_code, const char *msg) +{ + g_rpc_err = 1; +} + +void +spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request, + int error_code, const char *fmt, ...) +{ + g_rpc_err = 1; +} + +struct spdk_bdev * +spdk_bdev_get_by_name(const char *bdev_name) +{ + struct spdk_bdev *bdev; + + if (!TAILQ_EMPTY(&g_bdev_list)) { + TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) { + if (strcmp(bdev_name, bdev->name) == 0) { + return bdev; + } + } + } + + return NULL; +} + +static void +bdev_io_cleanup(struct spdk_bdev_io *bdev_io) +{ + if (bdev_io->u.bdev.iovs) { + if (bdev_io->u.bdev.iovs->iov_base) { + free(bdev_io->u.bdev.iovs->iov_base); + } + free(bdev_io->u.bdev.iovs); + } + free(bdev_io); +} + +static void +bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev, + uint64_t lba, uint64_t blocks, int16_t iotype) +{ + struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch); + + bdev_io->bdev = bdev; + bdev_io->u.bdev.offset_blocks = lba; + bdev_io->u.bdev.num_blocks = blocks; + bdev_io->type = iotype; + + if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) { + return; + } + + bdev_io->u.bdev.iovcnt = 1; + bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec)); + SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL); + bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * g_block_len); + SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL); + bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * g_block_len; + bdev_io->internal.ch = channel; +} + +static void +verify_reset_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives, + struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status) +{ + uint8_t index = 0; + struct io_output *output; + + SPDK_CU_ASSERT_FATAL(raid_bdev != NULL); + SPDK_CU_ASSERT_FATAL(num_base_drives != 0); + SPDK_CU_ASSERT_FATAL(io_status != INVALID_IO_SUBMIT); + SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL); + + CU_ASSERT(g_io_output_index == num_base_drives); + for (index = 0; index < g_io_output_index; index++) { + output = &g_io_output[index]; + CU_ASSERT(ch_ctx->base_channel[index] == output->ch); + CU_ASSERT(raid_bdev->base_bdev_info[index].desc == output->desc); + CU_ASSERT(bdev_io->type == output->iotype); + } + CU_ASSERT(g_io_comp_status == io_status); +} + +static void +verify_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives, + struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status) +{ + uint32_t strip_shift = spdk_u32log2(g_strip_size); + uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift; + uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >> + strip_shift; + uint32_t splits_reqd = (end_strip - start_strip + 1); + uint32_t strip; + uint64_t pd_strip; + uint8_t pd_idx; + uint32_t offset_in_strip; + uint64_t pd_lba; + uint64_t pd_blocks; + uint32_t index = 0; + uint8_t *buf = bdev_io->u.bdev.iovs->iov_base; + struct io_output *output; + + if (io_status == INVALID_IO_SUBMIT) { + CU_ASSERT(g_io_comp_status == false); + return; + } + SPDK_CU_ASSERT_FATAL(raid_bdev != NULL); + SPDK_CU_ASSERT_FATAL(num_base_drives != 0); + + CU_ASSERT(splits_reqd == g_io_output_index); + for (strip = start_strip; strip <= end_strip; strip++, index++) { + pd_strip = strip / num_base_drives; + pd_idx = strip % num_base_drives; + if (strip == start_strip) { + offset_in_strip = bdev_io->u.bdev.offset_blocks & (g_strip_size - 1); + pd_lba = (pd_strip << strip_shift) + offset_in_strip; + if (strip == end_strip) { + pd_blocks = bdev_io->u.bdev.num_blocks; + } else { + pd_blocks = g_strip_size - offset_in_strip; + } + } else if (strip == end_strip) { + pd_lba = pd_strip << strip_shift; + pd_blocks = ((bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) & + (g_strip_size - 1)) + 1; + } else { + pd_lba = pd_strip << raid_bdev->strip_size_shift; + pd_blocks = raid_bdev->strip_size; + } + output = &g_io_output[index]; + CU_ASSERT(pd_lba == output->offset_blocks); + CU_ASSERT(pd_blocks == output->num_blocks); + CU_ASSERT(ch_ctx->base_channel[pd_idx] == output->ch); + CU_ASSERT(raid_bdev->base_bdev_info[pd_idx].desc == output->desc); + CU_ASSERT(bdev_io->type == output->iotype); + buf += (pd_blocks << spdk_u32log2(g_block_len)); + } + CU_ASSERT(g_io_comp_status == io_status); +} + +static void +verify_io_without_payload(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives, + struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, + uint32_t io_status) +{ + uint32_t strip_shift = spdk_u32log2(g_strip_size); + uint64_t start_offset_in_strip = bdev_io->u.bdev.offset_blocks % g_strip_size; + uint64_t end_offset_in_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) % + g_strip_size; + uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift; + uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >> + strip_shift; + uint8_t n_disks_involved; + uint64_t start_strip_disk_idx; + uint64_t end_strip_disk_idx; + uint64_t nblocks_in_start_disk; + uint64_t offset_in_start_disk; + uint8_t disk_idx; + uint64_t base_io_idx; + uint64_t sum_nblocks = 0; + struct io_output *output; + + if (io_status == INVALID_IO_SUBMIT) { + CU_ASSERT(g_io_comp_status == false); + return; + } + SPDK_CU_ASSERT_FATAL(raid_bdev != NULL); + SPDK_CU_ASSERT_FATAL(num_base_drives != 0); + SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_READ); + SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_WRITE); + + n_disks_involved = spdk_min(end_strip - start_strip + 1, num_base_drives); + CU_ASSERT(n_disks_involved == g_io_output_index); + + start_strip_disk_idx = start_strip % num_base_drives; + end_strip_disk_idx = end_strip % num_base_drives; + + offset_in_start_disk = g_io_output[0].offset_blocks; + nblocks_in_start_disk = g_io_output[0].num_blocks; + + for (base_io_idx = 0, disk_idx = start_strip_disk_idx; base_io_idx < n_disks_involved; + base_io_idx++, disk_idx++) { + uint64_t start_offset_in_disk; + uint64_t end_offset_in_disk; + + output = &g_io_output[base_io_idx]; + + /* round disk_idx */ + if (disk_idx >= num_base_drives) { + disk_idx %= num_base_drives; + } + + /* start_offset_in_disk aligned in strip check: + * The first base io has a same start_offset_in_strip with the whole raid io. + * Other base io should have aligned start_offset_in_strip which is 0. + */ + start_offset_in_disk = output->offset_blocks; + if (base_io_idx == 0) { + CU_ASSERT(start_offset_in_disk % g_strip_size == start_offset_in_strip); + } else { + CU_ASSERT(start_offset_in_disk % g_strip_size == 0); + } + + /* end_offset_in_disk aligned in strip check: + * Base io on disk at which end_strip is located, has a same end_offset_in_strip + * with the whole raid io. + * Other base io should have aligned end_offset_in_strip. + */ + end_offset_in_disk = output->offset_blocks + output->num_blocks - 1; + if (disk_idx == end_strip_disk_idx) { + CU_ASSERT(end_offset_in_disk % g_strip_size == end_offset_in_strip); + } else { + CU_ASSERT(end_offset_in_disk % g_strip_size == g_strip_size - 1); + } + + /* start_offset_in_disk compared with start_disk. + * 1. For disk_idx which is larger than start_strip_disk_idx: Its start_offset_in_disk + * mustn't be larger than the start offset of start_offset_in_disk; And the gap + * must be less than strip size. + * 2. For disk_idx which is less than start_strip_disk_idx, Its start_offset_in_disk + * must be larger than the start offset of start_offset_in_disk; And the gap mustn't + * be less than strip size. + */ + if (disk_idx > start_strip_disk_idx) { + CU_ASSERT(start_offset_in_disk <= offset_in_start_disk); + CU_ASSERT(offset_in_start_disk - start_offset_in_disk < g_strip_size); + } else if (disk_idx < start_strip_disk_idx) { + CU_ASSERT(start_offset_in_disk > offset_in_start_disk); + CU_ASSERT(output->offset_blocks - offset_in_start_disk <= g_strip_size); + } + + /* nblocks compared with start_disk: + * The gap between them must be within a strip size. + */ + if (output->num_blocks <= nblocks_in_start_disk) { + CU_ASSERT(nblocks_in_start_disk - output->num_blocks <= g_strip_size); + } else { + CU_ASSERT(output->num_blocks - nblocks_in_start_disk < g_strip_size); + } + + sum_nblocks += output->num_blocks; + + CU_ASSERT(ch_ctx->base_channel[disk_idx] == output->ch); + CU_ASSERT(raid_bdev->base_bdev_info[disk_idx].desc == output->desc); + CU_ASSERT(bdev_io->type == output->iotype); + } + + /* Sum of each nblocks should be same with raid bdev_io */ + CU_ASSERT(bdev_io->u.bdev.num_blocks == sum_nblocks); + + CU_ASSERT(g_io_comp_status == io_status); +} + +static void +verify_raid_config_present(const char *name, bool presence) +{ + struct raid_bdev_config *raid_cfg; + bool cfg_found; + + cfg_found = false; + + TAILQ_FOREACH(raid_cfg, &g_raid_config.raid_bdev_config_head, link) { + if (raid_cfg->name != NULL) { + if (strcmp(name, raid_cfg->name) == 0) { + cfg_found = true; + break; + } + } + } + + if (presence == true) { + CU_ASSERT(cfg_found == true); + } else { + CU_ASSERT(cfg_found == false); + } +} + +static void +verify_raid_bdev_present(const char *name, bool presence) +{ + struct raid_bdev *pbdev; + bool pbdev_found; + + pbdev_found = false; + TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { + if (strcmp(pbdev->bdev.name, name) == 0) { + pbdev_found = true; + break; + } + } + if (presence == true) { + CU_ASSERT(pbdev_found == true); + } else { + CU_ASSERT(pbdev_found == false); + } +} +static void +verify_raid_config(struct rpc_bdev_raid_create *r, bool presence) +{ + struct raid_bdev_config *raid_cfg = NULL; + uint8_t i; + int val; + + TAILQ_FOREACH(raid_cfg, &g_raid_config.raid_bdev_config_head, link) { + if (strcmp(r->name, raid_cfg->name) == 0) { + if (presence == false) { + break; + } + CU_ASSERT(raid_cfg->raid_bdev != NULL); + CU_ASSERT(raid_cfg->strip_size == r->strip_size_kb); + CU_ASSERT(raid_cfg->num_base_bdevs == r->base_bdevs.num_base_bdevs); + CU_ASSERT(raid_cfg->level == r->level); + if (raid_cfg->base_bdev != NULL) { + for (i = 0; i < raid_cfg->num_base_bdevs; i++) { + val = strcmp(raid_cfg->base_bdev[i].name, + r->base_bdevs.base_bdevs[i]); + CU_ASSERT(val == 0); + } + } + break; + } + } + + if (presence == true) { + CU_ASSERT(raid_cfg != NULL); + } else { + CU_ASSERT(raid_cfg == NULL); + } +} + +static void +verify_raid_bdev(struct rpc_bdev_raid_create *r, bool presence, uint32_t raid_state) +{ + struct raid_bdev *pbdev; + struct raid_base_bdev_info *base_info; + struct spdk_bdev *bdev = NULL; + bool pbdev_found; + uint64_t min_blockcnt = 0xFFFFFFFFFFFFFFFF; + + pbdev_found = false; + TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { + if (strcmp(pbdev->bdev.name, r->name) == 0) { + pbdev_found = true; + if (presence == false) { + break; + } + CU_ASSERT(pbdev->config->raid_bdev == pbdev); + CU_ASSERT(pbdev->base_bdev_info != NULL); + CU_ASSERT(pbdev->strip_size == ((r->strip_size_kb * 1024) / g_block_len)); + CU_ASSERT(pbdev->strip_size_shift == spdk_u32log2(((r->strip_size_kb * 1024) / + g_block_len))); + CU_ASSERT(pbdev->blocklen_shift == spdk_u32log2(g_block_len)); + CU_ASSERT(pbdev->state == raid_state); + CU_ASSERT(pbdev->num_base_bdevs == r->base_bdevs.num_base_bdevs); + CU_ASSERT(pbdev->num_base_bdevs_discovered == r->base_bdevs.num_base_bdevs); + CU_ASSERT(pbdev->level == r->level); + CU_ASSERT(pbdev->destruct_called == false); + CU_ASSERT(pbdev->base_bdev_info != NULL); + RAID_FOR_EACH_BASE_BDEV(pbdev, base_info) { + CU_ASSERT(base_info->bdev != NULL); + bdev = spdk_bdev_get_by_name(base_info->bdev->name); + CU_ASSERT(bdev != NULL); + CU_ASSERT(base_info->remove_scheduled == false); + + if (bdev && bdev->blockcnt < min_blockcnt) { + min_blockcnt = bdev->blockcnt; + } + } + CU_ASSERT((((min_blockcnt / (r->strip_size_kb * 1024 / g_block_len)) * + (r->strip_size_kb * 1024 / g_block_len)) * + r->base_bdevs.num_base_bdevs) == pbdev->bdev.blockcnt); + CU_ASSERT(strcmp(pbdev->bdev.product_name, "Raid Volume") == 0); + CU_ASSERT(pbdev->bdev.write_cache == 0); + CU_ASSERT(pbdev->bdev.blocklen == g_block_len); + if (pbdev->num_base_bdevs > 1) { + CU_ASSERT(pbdev->bdev.optimal_io_boundary == pbdev->strip_size); + CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == true); + } else { + CU_ASSERT(pbdev->bdev.optimal_io_boundary == 0); + CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == false); + } + CU_ASSERT(pbdev->bdev.ctxt == pbdev); + CU_ASSERT(pbdev->bdev.fn_table == &g_raid_bdev_fn_table); + CU_ASSERT(pbdev->bdev.module == &g_raid_if); + break; + } + } + if (presence == true) { + CU_ASSERT(pbdev_found == true); + } else { + CU_ASSERT(pbdev_found == false); + } + pbdev_found = false; + if (raid_state == RAID_BDEV_STATE_ONLINE) { + TAILQ_FOREACH(pbdev, &g_raid_bdev_configured_list, state_link) { + if (strcmp(pbdev->bdev.name, r->name) == 0) { + pbdev_found = true; + break; + } + } + } else if (raid_state == RAID_BDEV_STATE_CONFIGURING) { + TAILQ_FOREACH(pbdev, &g_raid_bdev_configuring_list, state_link) { + if (strcmp(pbdev->bdev.name, r->name) == 0) { + pbdev_found = true; + break; + } + } + } else if (raid_state == RAID_BDEV_STATE_OFFLINE) { + TAILQ_FOREACH(pbdev, &g_raid_bdev_offline_list, state_link) { + if (strcmp(pbdev->bdev.name, r->name) == 0) { + pbdev_found = true; + break; + } + } + } + if (presence == true) { + CU_ASSERT(pbdev_found == true); + } else { + CU_ASSERT(pbdev_found == false); + } +} + +static void +verify_get_raids(struct rpc_bdev_raid_create *construct_req, + uint8_t g_max_raids, + char **g_get_raids_output, uint32_t g_get_raids_count) +{ + uint8_t i, j; + bool found; + + CU_ASSERT(g_max_raids == g_get_raids_count); + if (g_max_raids == g_get_raids_count) { + for (i = 0; i < g_max_raids; i++) { + found = false; + for (j = 0; j < g_max_raids; j++) { + if (construct_req[i].name && + strcmp(construct_req[i].name, g_get_raids_output[i]) == 0) { + found = true; + break; + } + } + CU_ASSERT(found == true); + } + } +} + +static void +create_base_bdevs(uint32_t bbdev_start_idx) +{ + uint8_t i; + struct spdk_bdev *base_bdev; + char name[16]; + + for (i = 0; i < g_max_base_drives; i++, bbdev_start_idx++) { + snprintf(name, 16, "%s%u%s", "Nvme", bbdev_start_idx, "n1"); + base_bdev = calloc(1, sizeof(struct spdk_bdev)); + SPDK_CU_ASSERT_FATAL(base_bdev != NULL); + base_bdev->name = strdup(name); + SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL); + base_bdev->blocklen = g_block_len; + base_bdev->blockcnt = BLOCK_CNT; + TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link); + } +} + +static void +create_test_req(struct rpc_bdev_raid_create *r, const char *raid_name, + uint8_t bbdev_start_idx, bool create_base_bdev) +{ + uint8_t i; + char name[16]; + uint8_t bbdev_idx = bbdev_start_idx; + + r->name = strdup(raid_name); + SPDK_CU_ASSERT_FATAL(r->name != NULL); + r->strip_size_kb = (g_strip_size * g_block_len) / 1024; + r->level = RAID0; + r->base_bdevs.num_base_bdevs = g_max_base_drives; + for (i = 0; i < g_max_base_drives; i++, bbdev_idx++) { + snprintf(name, 16, "%s%u%s", "Nvme", bbdev_idx, "n1"); + r->base_bdevs.base_bdevs[i] = strdup(name); + SPDK_CU_ASSERT_FATAL(r->base_bdevs.base_bdevs[i] != NULL); + } + if (create_base_bdev == true) { + create_base_bdevs(bbdev_start_idx); + } + g_rpc_req = r; + g_rpc_req_size = sizeof(*r); +} + +static void +create_raid_bdev_create_req(struct rpc_bdev_raid_create *r, const char *raid_name, + uint8_t bbdev_start_idx, bool create_base_bdev, + uint8_t json_decode_obj_err) +{ + create_test_req(r, raid_name, bbdev_start_idx, create_base_bdev); + + g_rpc_err = 0; + g_json_decode_obj_create = 1; + g_json_decode_obj_err = json_decode_obj_err; + g_config_level_create = 0; + g_test_multi_raids = 0; +} + +static void +create_raid_bdev_create_config(struct rpc_bdev_raid_create *r, const char *raid_name, + uint8_t bbdev_start_idx, bool create_base_bdev) +{ + create_test_req(r, raid_name, bbdev_start_idx, create_base_bdev); + + g_config_level_create = 1; + g_test_multi_raids = 0; +} + +static void +free_test_req(struct rpc_bdev_raid_create *r) +{ + uint8_t i; + + free(r->name); + for (i = 0; i < r->base_bdevs.num_base_bdevs; i++) { + free(r->base_bdevs.base_bdevs[i]); + } +} + +static void +create_raid_bdev_delete_req(struct rpc_bdev_raid_delete *r, const char *raid_name, + uint8_t json_decode_obj_err) +{ + r->name = strdup(raid_name); + SPDK_CU_ASSERT_FATAL(r->name != NULL); + + g_rpc_req = r; + g_rpc_req_size = sizeof(*r); + g_rpc_err = 0; + g_json_decode_obj_create = 0; + g_json_decode_obj_err = json_decode_obj_err; + g_config_level_create = 0; + g_test_multi_raids = 0; +} + +static void +create_get_raids_req(struct rpc_bdev_raid_get_bdevs *r, const char *category, + uint8_t json_decode_obj_err) +{ + r->category = strdup(category); + SPDK_CU_ASSERT_FATAL(r->category != NULL); + + g_rpc_req = r; + g_rpc_req_size = sizeof(*r); + g_rpc_err = 0; + g_json_decode_obj_create = 0; + g_json_decode_obj_err = json_decode_obj_err; + g_config_level_create = 0; + g_test_multi_raids = 1; + g_get_raids_count = 0; +} + +static void +test_create_raid(void) +{ + struct rpc_bdev_raid_create req; + struct rpc_bdev_raid_delete delete_req; + + set_globals(); + CU_ASSERT(raid_bdev_init() == 0); + + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + create_raid_bdev_create_req(&req, "raid1", 0, true, 0); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config(&req, true); + verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); + free_test_req(&req); + + create_raid_bdev_delete_req(&delete_req, "raid1", 0); + rpc_bdev_raid_delete(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + raid_bdev_exit(); + base_bdevs_cleanup(); + reset_globals(); +} + +static void +test_delete_raid(void) +{ + struct rpc_bdev_raid_create construct_req; + struct rpc_bdev_raid_delete delete_req; + + set_globals(); + CU_ASSERT(raid_bdev_init() == 0); + + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config(&construct_req, true); + verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE); + free_test_req(&construct_req); + + create_raid_bdev_delete_req(&delete_req, "raid1", 0); + rpc_bdev_raid_delete(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + raid_bdev_exit(); + base_bdevs_cleanup(); + reset_globals(); +} + +static void +test_create_raid_invalid_args(void) +{ + struct rpc_bdev_raid_create req; + struct rpc_bdev_raid_delete destroy_req; + struct raid_bdev_config *raid_cfg; + + set_globals(); + CU_ASSERT(raid_bdev_init() == 0); + + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + create_raid_bdev_create_req(&req, "raid1", 0, true, 0); + req.level = INVALID_RAID_LEVEL; + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 1); + free_test_req(&req); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + create_raid_bdev_create_req(&req, "raid1", 0, false, 1); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 1); + free_test_req(&req); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + create_raid_bdev_create_req(&req, "raid1", 0, false, 0); + req.strip_size_kb = 1231; + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 1); + free_test_req(&req); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + create_raid_bdev_create_req(&req, "raid1", 0, false, 0); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config(&req, true); + verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); + free_test_req(&req); + + create_raid_bdev_create_req(&req, "raid1", 0, false, 0); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 1); + free_test_req(&req); + + create_raid_bdev_create_req(&req, "raid2", 0, false, 0); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 1); + free_test_req(&req); + verify_raid_config_present("raid2", false); + verify_raid_bdev_present("raid2", false); + + create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0); + free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]); + req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme0n1"); + SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 1); + free_test_req(&req); + verify_raid_config_present("raid2", false); + verify_raid_bdev_present("raid2", false); + + create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0); + free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]); + req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme100000n1"); + SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + free_test_req(&req); + verify_raid_config_present("raid2", true); + verify_raid_bdev_present("raid2", true); + raid_cfg = raid_bdev_config_find_by_name("raid2"); + SPDK_CU_ASSERT_FATAL(raid_cfg != NULL); + check_and_remove_raid_bdev(raid_cfg); + raid_bdev_config_cleanup(raid_cfg); + + create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, false, 0); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + free_test_req(&req); + verify_raid_config_present("raid2", true); + verify_raid_bdev_present("raid2", true); + verify_raid_config_present("raid1", true); + verify_raid_bdev_present("raid1", true); + + create_raid_bdev_delete_req(&destroy_req, "raid1", 0); + rpc_bdev_raid_delete(NULL, NULL); + create_raid_bdev_delete_req(&destroy_req, "raid2", 0); + rpc_bdev_raid_delete(NULL, NULL); + raid_bdev_exit(); + base_bdevs_cleanup(); + reset_globals(); +} + +static void +test_delete_raid_invalid_args(void) +{ + struct rpc_bdev_raid_create construct_req; + struct rpc_bdev_raid_delete destroy_req; + + set_globals(); + CU_ASSERT(raid_bdev_init() == 0); + + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config(&construct_req, true); + verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE); + free_test_req(&construct_req); + + create_raid_bdev_delete_req(&destroy_req, "raid2", 0); + rpc_bdev_raid_delete(NULL, NULL); + CU_ASSERT(g_rpc_err == 1); + + create_raid_bdev_delete_req(&destroy_req, "raid1", 1); + rpc_bdev_raid_delete(NULL, NULL); + CU_ASSERT(g_rpc_err == 1); + free(destroy_req.name); + verify_raid_config_present("raid1", true); + verify_raid_bdev_present("raid1", true); + + create_raid_bdev_delete_req(&destroy_req, "raid1", 0); + rpc_bdev_raid_delete(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + raid_bdev_exit(); + base_bdevs_cleanup(); + reset_globals(); +} + +static void +test_io_channel(void) +{ + struct rpc_bdev_raid_create req; + struct rpc_bdev_raid_delete destroy_req; + struct raid_bdev *pbdev; + struct raid_bdev_io_channel *ch_ctx; + uint8_t i; + + set_globals(); + CU_ASSERT(raid_bdev_init() == 0); + + create_raid_bdev_create_req(&req, "raid1", 0, true, 0); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config(&req, true); + verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); + + TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { + if (strcmp(pbdev->bdev.name, "raid1") == 0) { + break; + } + } + CU_ASSERT(pbdev != NULL); + ch_ctx = calloc(1, sizeof(struct raid_bdev_io_channel)); + SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); + + CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0); + for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) { + CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel); + } + raid_bdev_destroy_cb(pbdev, ch_ctx); + CU_ASSERT(ch_ctx->base_channel == NULL); + free_test_req(&req); + + create_raid_bdev_delete_req(&destroy_req, "raid1", 0); + rpc_bdev_raid_delete(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + free(ch_ctx); + raid_bdev_exit(); + base_bdevs_cleanup(); + reset_globals(); +} + +static void +test_write_io(void) +{ + struct rpc_bdev_raid_create req; + struct rpc_bdev_raid_delete destroy_req; + struct raid_bdev *pbdev; + struct spdk_io_channel *ch; + struct raid_bdev_io_channel *ch_ctx; + uint8_t i; + struct spdk_bdev_io *bdev_io; + uint64_t io_len; + uint64_t lba = 0; + struct spdk_io_channel *ch_b; + struct spdk_bdev_channel *ch_b_ctx; + + set_globals(); + CU_ASSERT(raid_bdev_init() == 0); + + create_raid_bdev_create_req(&req, "raid1", 0, true, 0); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config(&req, true); + verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); + TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { + if (strcmp(pbdev->bdev.name, "raid1") == 0) { + break; + } + } + CU_ASSERT(pbdev != NULL); + ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel)); + SPDK_CU_ASSERT_FATAL(ch != NULL); + + ch_b = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct spdk_bdev_channel)); + SPDK_CU_ASSERT_FATAL(ch_b != NULL); + ch_b_ctx = spdk_io_channel_get_ctx(ch_b); + ch_b_ctx->channel = ch; + + ch_ctx = spdk_io_channel_get_ctx(ch); + SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); + + CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0); + for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) { + CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel); + } + + /* test 2 IO sizes based on global strip size set earlier */ + for (i = 0; i < 2; i++) { + bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + io_len = (g_strip_size / 2) << i; + bdev_io_initialize(bdev_io, ch_b, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE); + lba += g_strip_size; + memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); + g_io_output_index = 0; + raid_bdev_submit_request(ch, bdev_io); + verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, + g_child_io_status_flag); + bdev_io_cleanup(bdev_io); + } + + free_test_req(&req); + raid_bdev_destroy_cb(pbdev, ch_ctx); + CU_ASSERT(ch_ctx->base_channel == NULL); + free(ch); + free(ch_b); + create_raid_bdev_delete_req(&destroy_req, "raid1", 0); + rpc_bdev_raid_delete(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + raid_bdev_exit(); + base_bdevs_cleanup(); + reset_globals(); +} + +static void +test_read_io(void) +{ + struct rpc_bdev_raid_create req; + struct rpc_bdev_raid_delete destroy_req; + struct raid_bdev *pbdev; + struct spdk_io_channel *ch; + struct raid_bdev_io_channel *ch_ctx; + uint8_t i; + struct spdk_bdev_io *bdev_io; + uint64_t io_len; + uint64_t lba; + struct spdk_io_channel *ch_b; + struct spdk_bdev_channel *ch_b_ctx; + + set_globals(); + CU_ASSERT(raid_bdev_init() == 0); + + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + create_raid_bdev_create_req(&req, "raid1", 0, true, 0); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config(&req, true); + verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); + TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { + if (strcmp(pbdev->bdev.name, "raid1") == 0) { + break; + } + } + CU_ASSERT(pbdev != NULL); + ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel)); + SPDK_CU_ASSERT_FATAL(ch != NULL); + + ch_b = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct spdk_bdev_channel)); + SPDK_CU_ASSERT_FATAL(ch_b != NULL); + ch_b_ctx = spdk_io_channel_get_ctx(ch_b); + ch_b_ctx->channel = ch; + + ch_ctx = spdk_io_channel_get_ctx(ch); + SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); + + CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0); + for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) { + CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel); + } + free_test_req(&req); + + /* test 2 IO sizes based on global strip size set earlier */ + lba = 0; + for (i = 0; i < 2; i++) { + bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + io_len = (g_strip_size / 2) << i; + bdev_io_initialize(bdev_io, ch_b, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_READ); + lba += g_strip_size; + memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); + g_io_output_index = 0; + raid_bdev_submit_request(ch, bdev_io); + verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, + g_child_io_status_flag); + bdev_io_cleanup(bdev_io); + } + + raid_bdev_destroy_cb(pbdev, ch_ctx); + CU_ASSERT(ch_ctx->base_channel == NULL); + free(ch); + free(ch_b); + create_raid_bdev_delete_req(&destroy_req, "raid1", 0); + rpc_bdev_raid_delete(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + raid_bdev_exit(); + base_bdevs_cleanup(); + reset_globals(); +} + +static void +raid_bdev_io_generate_by_strips(uint64_t n_strips) +{ + uint64_t lba; + uint64_t nblocks; + uint64_t start_offset; + uint64_t end_offset; + uint64_t offsets_in_strip[3]; + uint64_t start_bdev_idx; + uint64_t start_bdev_offset; + uint64_t start_bdev_idxs[3]; + int i, j, l; + + /* 3 different situations of offset in strip */ + offsets_in_strip[0] = 0; + offsets_in_strip[1] = g_strip_size >> 1; + offsets_in_strip[2] = g_strip_size - 1; + + /* 3 different situations of start_bdev_idx */ + start_bdev_idxs[0] = 0; + start_bdev_idxs[1] = g_max_base_drives >> 1; + start_bdev_idxs[2] = g_max_base_drives - 1; + + /* consider different offset in strip */ + for (i = 0; i < 3; i++) { + start_offset = offsets_in_strip[i]; + for (j = 0; j < 3; j++) { + end_offset = offsets_in_strip[j]; + if (n_strips == 1 && start_offset > end_offset) { + continue; + } + + /* consider at which base_bdev lba is started. */ + for (l = 0; l < 3; l++) { + start_bdev_idx = start_bdev_idxs[l]; + start_bdev_offset = start_bdev_idx * g_strip_size; + lba = g_lba_offset + start_bdev_offset + start_offset; + nblocks = (n_strips - 1) * g_strip_size + end_offset - start_offset + 1; + + g_io_ranges[g_io_range_idx].lba = lba; + g_io_ranges[g_io_range_idx].nblocks = nblocks; + + SPDK_CU_ASSERT_FATAL(g_io_range_idx < MAX_TEST_IO_RANGE); + g_io_range_idx++; + } + } + } +} + +static void +raid_bdev_io_generate(void) +{ + uint64_t n_strips; + uint64_t n_strips_span = g_max_base_drives; + uint64_t n_strips_times[5] = {g_max_base_drives + 1, g_max_base_drives * 2 - 1, + g_max_base_drives * 2, g_max_base_drives * 3, + g_max_base_drives * 4 + }; + uint32_t i; + + g_io_range_idx = 0; + + /* consider different number of strips from 1 to strips spanned base bdevs, + * and even to times of strips spanned base bdevs + */ + for (n_strips = 1; n_strips < n_strips_span; n_strips++) { + raid_bdev_io_generate_by_strips(n_strips); + } + + for (i = 0; i < SPDK_COUNTOF(n_strips_times); i++) { + n_strips = n_strips_times[i]; + raid_bdev_io_generate_by_strips(n_strips); + } +} + +static void +test_unmap_io(void) +{ + struct rpc_bdev_raid_create req; + struct rpc_bdev_raid_delete destroy_req; + struct raid_bdev *pbdev; + struct spdk_io_channel *ch; + struct raid_bdev_io_channel *ch_ctx; + uint8_t i; + struct spdk_bdev_io *bdev_io; + uint32_t count; + uint64_t io_len; + uint64_t lba; + + set_globals(); + CU_ASSERT(raid_bdev_init() == 0); + + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + create_raid_bdev_create_req(&req, "raid1", 0, true, 0); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config(&req, true); + verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); + TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { + if (strcmp(pbdev->bdev.name, "raid1") == 0) { + break; + } + } + CU_ASSERT(pbdev != NULL); + ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel)); + SPDK_CU_ASSERT_FATAL(ch != NULL); + ch_ctx = spdk_io_channel_get_ctx(ch); + SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); + + CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0); + for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) { + SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel); + } + + CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_UNMAP) == true); + CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_FLUSH) == true); + + raid_bdev_io_generate(); + for (count = 0; count < g_io_range_idx; count++) { + bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + io_len = g_io_ranges[count].nblocks; + lba = g_io_ranges[count].lba; + bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_UNMAP); + memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output)); + g_io_output_index = 0; + raid_bdev_submit_request(ch, bdev_io); + verify_io_without_payload(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, + g_child_io_status_flag); + bdev_io_cleanup(bdev_io); + } + free_test_req(&req); + + raid_bdev_destroy_cb(pbdev, ch_ctx); + CU_ASSERT(ch_ctx->base_channel == NULL); + free(ch); + create_raid_bdev_delete_req(&destroy_req, "raid1", 0); + rpc_bdev_raid_delete(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + raid_bdev_exit(); + base_bdevs_cleanup(); + reset_globals(); +} + +/* Test IO failures */ +static void +test_io_failure(void) +{ + struct rpc_bdev_raid_create req; + struct rpc_bdev_raid_delete destroy_req; + struct raid_bdev *pbdev; + struct spdk_io_channel *ch; + struct raid_bdev_io_channel *ch_ctx; + uint8_t i; + struct spdk_bdev_io *bdev_io; + uint32_t count; + uint64_t io_len; + uint64_t lba; + + set_globals(); + CU_ASSERT(raid_bdev_init() == 0); + + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + create_raid_bdev_create_req(&req, "raid1", 0, true, 0); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config(&req, true); + verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); + TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { + if (strcmp(pbdev->bdev.name, req.name) == 0) { + break; + } + } + CU_ASSERT(pbdev != NULL); + ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel)); + SPDK_CU_ASSERT_FATAL(ch != NULL); + ch_ctx = spdk_io_channel_get_ctx(ch); + SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); + + CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0); + for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) { + CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel); + } + free_test_req(&req); + + lba = 0; + for (count = 0; count < 1; count++) { + bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + io_len = (g_strip_size / 2) << count; + bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_INVALID); + lba += g_strip_size; + memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); + g_io_output_index = 0; + raid_bdev_submit_request(ch, bdev_io); + verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, + INVALID_IO_SUBMIT); + bdev_io_cleanup(bdev_io); + } + + + lba = 0; + g_child_io_status_flag = false; + for (count = 0; count < 1; count++) { + bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + io_len = (g_strip_size / 2) << count; + bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE); + lba += g_strip_size; + memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); + g_io_output_index = 0; + raid_bdev_submit_request(ch, bdev_io); + verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, + g_child_io_status_flag); + bdev_io_cleanup(bdev_io); + } + + raid_bdev_destroy_cb(pbdev, ch_ctx); + CU_ASSERT(ch_ctx->base_channel == NULL); + free(ch); + create_raid_bdev_delete_req(&destroy_req, "raid1", 0); + rpc_bdev_raid_delete(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + raid_bdev_exit(); + base_bdevs_cleanup(); + reset_globals(); +} + +/* Test reset IO */ +static void +test_reset_io(void) +{ + struct rpc_bdev_raid_create req; + struct rpc_bdev_raid_delete destroy_req; + struct raid_bdev *pbdev; + struct spdk_io_channel *ch; + struct raid_bdev_io_channel *ch_ctx; + uint8_t i; + struct spdk_bdev_io *bdev_io; + + set_globals(); + CU_ASSERT(raid_bdev_init() == 0); + + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + create_raid_bdev_create_req(&req, "raid1", 0, true, 0); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config(&req, true); + verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); + TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { + if (strcmp(pbdev->bdev.name, "raid1") == 0) { + break; + } + } + CU_ASSERT(pbdev != NULL); + ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel)); + SPDK_CU_ASSERT_FATAL(ch != NULL); + ch_ctx = spdk_io_channel_get_ctx(ch); + SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); + + SPDK_CU_ASSERT_FATAL(raid_bdev_create_cb(pbdev, ch_ctx) == 0); + for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) { + CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel); + } + free_test_req(&req); + + g_bdev_io_submit_status = 0; + g_child_io_status_flag = true; + + CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_RESET) == true); + + bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, 1, SPDK_BDEV_IO_TYPE_RESET); + memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output)); + g_io_output_index = 0; + raid_bdev_submit_request(ch, bdev_io); + verify_reset_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, + true); + bdev_io_cleanup(bdev_io); + + raid_bdev_destroy_cb(pbdev, ch_ctx); + CU_ASSERT(ch_ctx->base_channel == NULL); + free(ch); + create_raid_bdev_delete_req(&destroy_req, "raid1", 0); + rpc_bdev_raid_delete(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + raid_bdev_exit(); + base_bdevs_cleanup(); + reset_globals(); +} + +/* Create multiple raids, destroy raids without IO, get_raids related tests */ +static void +test_multi_raid_no_io(void) +{ + struct rpc_bdev_raid_create *construct_req; + struct rpc_bdev_raid_delete destroy_req; + struct rpc_bdev_raid_get_bdevs get_raids_req; + uint8_t i; + char name[16]; + uint8_t bbdev_idx = 0; + + set_globals(); + construct_req = calloc(MAX_RAIDS, sizeof(struct rpc_bdev_raid_create)); + SPDK_CU_ASSERT_FATAL(construct_req != NULL); + CU_ASSERT(raid_bdev_init() == 0); + for (i = 0; i < g_max_raids; i++) { + snprintf(name, 16, "%s%u", "raid", i); + verify_raid_config_present(name, false); + verify_raid_bdev_present(name, false); + create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0); + bbdev_idx += g_max_base_drives; + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config(&construct_req[i], true); + verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE); + } + + create_get_raids_req(&get_raids_req, "all", 0); + rpc_bdev_raid_get_bdevs(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count); + for (i = 0; i < g_get_raids_count; i++) { + free(g_get_raids_output[i]); + } + + create_get_raids_req(&get_raids_req, "online", 0); + rpc_bdev_raid_get_bdevs(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count); + for (i = 0; i < g_get_raids_count; i++) { + free(g_get_raids_output[i]); + } + + create_get_raids_req(&get_raids_req, "configuring", 0); + rpc_bdev_raid_get_bdevs(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + CU_ASSERT(g_get_raids_count == 0); + + create_get_raids_req(&get_raids_req, "offline", 0); + rpc_bdev_raid_get_bdevs(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + CU_ASSERT(g_get_raids_count == 0); + + create_get_raids_req(&get_raids_req, "invalid_category", 0); + rpc_bdev_raid_get_bdevs(NULL, NULL); + CU_ASSERT(g_rpc_err == 1); + CU_ASSERT(g_get_raids_count == 0); + + create_get_raids_req(&get_raids_req, "all", 1); + rpc_bdev_raid_get_bdevs(NULL, NULL); + CU_ASSERT(g_rpc_err == 1); + free(get_raids_req.category); + CU_ASSERT(g_get_raids_count == 0); + + create_get_raids_req(&get_raids_req, "all", 0); + rpc_bdev_raid_get_bdevs(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + CU_ASSERT(g_get_raids_count == g_max_raids); + for (i = 0; i < g_get_raids_count; i++) { + free(g_get_raids_output[i]); + } + + for (i = 0; i < g_max_raids; i++) { + SPDK_CU_ASSERT_FATAL(construct_req[i].name != NULL); + snprintf(name, 16, "%s", construct_req[i].name); + create_raid_bdev_delete_req(&destroy_req, name, 0); + rpc_bdev_raid_delete(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config_present(name, false); + verify_raid_bdev_present(name, false); + } + raid_bdev_exit(); + for (i = 0; i < g_max_raids; i++) { + free_test_req(&construct_req[i]); + } + free(construct_req); + base_bdevs_cleanup(); + reset_globals(); +} + +/* Create multiple raids, fire IOs on raids */ +static void +test_multi_raid_with_io(void) +{ + struct rpc_bdev_raid_create *construct_req; + struct rpc_bdev_raid_delete destroy_req; + uint8_t i, j; + char name[16]; + uint8_t bbdev_idx = 0; + struct raid_bdev *pbdev; + struct spdk_io_channel *ch; + struct raid_bdev_io_channel *ch_ctx = NULL; + struct spdk_bdev_io *bdev_io; + uint64_t io_len; + uint64_t lba = 0; + int16_t iotype; + struct spdk_io_channel *ch_b; + struct spdk_bdev_channel *ch_b_ctx; + + set_globals(); + construct_req = calloc(g_max_raids, sizeof(struct rpc_bdev_raid_create)); + SPDK_CU_ASSERT_FATAL(construct_req != NULL); + CU_ASSERT(raid_bdev_init() == 0); + ch = calloc(g_max_raids, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel)); + SPDK_CU_ASSERT_FATAL(ch != NULL); + + ch_b = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct spdk_bdev_channel)); + SPDK_CU_ASSERT_FATAL(ch_b != NULL); + ch_b_ctx = spdk_io_channel_get_ctx(ch_b); + ch_b_ctx->channel = ch; + + for (i = 0; i < g_max_raids; i++) { + snprintf(name, 16, "%s%u", "raid", i); + verify_raid_config_present(name, false); + verify_raid_bdev_present(name, false); + create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0); + bbdev_idx += g_max_base_drives; + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config(&construct_req[i], true); + verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE); + TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { + if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) { + break; + } + } + CU_ASSERT(pbdev != NULL); + ch_ctx = spdk_io_channel_get_ctx(&ch[i]); + SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); + CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0); + SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL); + for (j = 0; j < construct_req[i].base_bdevs.num_base_bdevs; j++) { + CU_ASSERT(ch_ctx->base_channel[j] == &g_io_channel); + } + } + + /* This will perform a write on the first raid and a read on the second. It can be + * expanded in the future to perform r/w on each raid device in the event that + * multiple raid levels are supported. + */ + for (i = 0; i < g_max_raids; i++) { + bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + io_len = g_strip_size; + iotype = (i) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ; + memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); + g_io_output_index = 0; + TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { + if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) { + break; + } + } + bdev_io_initialize(bdev_io, ch_b, &pbdev->bdev, lba, io_len, iotype); + CU_ASSERT(pbdev != NULL); + raid_bdev_submit_request(ch, bdev_io); + verify_io(bdev_io, g_max_base_drives, ch_ctx, pbdev, + g_child_io_status_flag); + bdev_io_cleanup(bdev_io); + } + + for (i = 0; i < g_max_raids; i++) { + TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { + if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) { + break; + } + } + CU_ASSERT(pbdev != NULL); + ch_ctx = spdk_io_channel_get_ctx(&ch[i]); + SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); + raid_bdev_destroy_cb(pbdev, ch_ctx); + CU_ASSERT(ch_ctx->base_channel == NULL); + snprintf(name, 16, "%s", construct_req[i].name); + create_raid_bdev_delete_req(&destroy_req, name, 0); + rpc_bdev_raid_delete(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config_present(name, false); + verify_raid_bdev_present(name, false); + } + raid_bdev_exit(); + for (i = 0; i < g_max_raids; i++) { + free_test_req(&construct_req[i]); + } + free(construct_req); + free(ch); + free(ch_b); + base_bdevs_cleanup(); + reset_globals(); +} + +static void +test_io_type_supported(void) +{ + CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_READ) == true); + CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_WRITE) == true); + CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_INVALID) == false); +} + +static void +test_create_raid_from_config(void) +{ + struct rpc_bdev_raid_create req; + struct spdk_bdev *bdev; + struct rpc_bdev_raid_delete destroy_req; + bool can_claim; + struct raid_bdev_config *raid_cfg; + uint8_t base_bdev_slot; + + set_globals(); + create_raid_bdev_create_config(&req, "raid1", 0, true); + CU_ASSERT(raid_bdev_init() == 0); + + verify_raid_config_present("raid1", true); + verify_raid_bdev_present("raid1", true); + + TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) { + raid_bdev_examine(bdev); + } + + can_claim = raid_bdev_can_claim_bdev("Invalid", &raid_cfg, &base_bdev_slot); + CU_ASSERT(can_claim == false); + + verify_raid_config(&req, true); + verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); + + create_raid_bdev_delete_req(&destroy_req, "raid1", 0); + rpc_bdev_raid_delete(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + raid_bdev_exit(); + free_test_req(&req); + base_bdevs_cleanup(); + reset_globals(); +} + +static void +test_create_raid_from_config_invalid_params(void) +{ + struct rpc_bdev_raid_create req; + + set_globals(); + + create_raid_bdev_create_config(&req, "raid1", 0, true); + free(req.name); + req.name = NULL; + CU_ASSERT(raid_bdev_init() != 0); + free_test_req(&req); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + create_raid_bdev_create_config(&req, "raid1", 0, false); + req.strip_size_kb = 1234; + CU_ASSERT(raid_bdev_init() != 0); + free_test_req(&req); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + create_raid_bdev_create_config(&req, "raid1", 0, false); + req.level = INVALID_RAID_LEVEL; + CU_ASSERT(raid_bdev_init() != 0); + free_test_req(&req); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + create_raid_bdev_create_config(&req, "raid1", 0, false); + req.level = INVALID_RAID_LEVEL; + CU_ASSERT(raid_bdev_init() != 0); + free_test_req(&req); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + create_raid_bdev_create_config(&req, "raid1", 0, false); + req.base_bdevs.num_base_bdevs++; + CU_ASSERT(raid_bdev_init() != 0); + req.base_bdevs.num_base_bdevs--; + free_test_req(&req); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + create_raid_bdev_create_config(&req, "raid1", 0, false); + req.base_bdevs.num_base_bdevs--; + CU_ASSERT(raid_bdev_init() != 0); + req.base_bdevs.num_base_bdevs++; + free_test_req(&req); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + if (g_max_base_drives > 1) { + create_raid_bdev_create_config(&req, "raid1", 0, false); + snprintf(req.base_bdevs.base_bdevs[g_max_base_drives - 1], 15, "%s", "Nvme0n1"); + CU_ASSERT(raid_bdev_init() != 0); + free_test_req(&req); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + } + + raid_bdev_exit(); + base_bdevs_cleanup(); + reset_globals(); +} + +static void +test_raid_json_dump_info(void) +{ + struct rpc_bdev_raid_create req; + struct rpc_bdev_raid_delete destroy_req; + struct raid_bdev *pbdev; + + set_globals(); + CU_ASSERT(raid_bdev_init() == 0); + + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + create_raid_bdev_create_req(&req, "raid1", 0, true, 0); + rpc_bdev_raid_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); + + TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { + if (strcmp(pbdev->bdev.name, "raid1") == 0) { + break; + } + } + CU_ASSERT(pbdev != NULL); + + CU_ASSERT(raid_bdev_dump_info_json(pbdev, NULL) == 0); + + free_test_req(&req); + + create_raid_bdev_delete_req(&destroy_req, "raid1", 0); + rpc_bdev_raid_delete(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_raid_config_present("raid1", false); + verify_raid_bdev_present("raid1", false); + + raid_bdev_exit(); + base_bdevs_cleanup(); + reset_globals(); +} + +static void +test_context_size(void) +{ + CU_ASSERT(raid_bdev_get_ctx_size() == sizeof(struct raid_bdev_io)); +} + +static void +test_raid_level_conversions(void) +{ + const char *raid_str; + + CU_ASSERT(raid_bdev_parse_raid_level("abcd123") == INVALID_RAID_LEVEL); + CU_ASSERT(raid_bdev_parse_raid_level("0") == RAID0); + CU_ASSERT(raid_bdev_parse_raid_level("raid0") == RAID0); + CU_ASSERT(raid_bdev_parse_raid_level("RAID0") == RAID0); + + raid_str = raid_bdev_level_to_str(INVALID_RAID_LEVEL); + CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0); + raid_str = raid_bdev_level_to_str(1234); + CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0); + raid_str = raid_bdev_level_to_str(RAID0); + CU_ASSERT(raid_str != NULL && strcmp(raid_str, "raid0") == 0); +} + +int main(int argc, char **argv) +{ + CU_pSuite suite = NULL; + unsigned int num_failures; + + CU_set_error_action(CUEA_ABORT); + CU_initialize_registry(); + + suite = CU_add_suite("raid", NULL, NULL); + + CU_ADD_TEST(suite, test_create_raid); + CU_ADD_TEST(suite, test_delete_raid); + CU_ADD_TEST(suite, test_create_raid_invalid_args); + CU_ADD_TEST(suite, test_delete_raid_invalid_args); + CU_ADD_TEST(suite, test_io_channel); + CU_ADD_TEST(suite, test_reset_io); + CU_ADD_TEST(suite, test_write_io); + CU_ADD_TEST(suite, test_read_io); + CU_ADD_TEST(suite, test_unmap_io); + CU_ADD_TEST(suite, test_io_failure); + CU_ADD_TEST(suite, test_multi_raid_no_io); + CU_ADD_TEST(suite, test_multi_raid_with_io); + CU_ADD_TEST(suite, test_io_type_supported); + CU_ADD_TEST(suite, test_create_raid_from_config); + CU_ADD_TEST(suite, test_create_raid_from_config_invalid_params); + CU_ADD_TEST(suite, test_raid_json_dump_info); + CU_ADD_TEST(suite, test_context_size); + CU_ADD_TEST(suite, test_raid_level_conversions); + + allocate_threads(1); + set_thread(0); + + CU_basic_set_mode(CU_BRM_VERBOSE); + set_test_opts(); + CU_basic_run_tests(); + num_failures = CU_get_number_of_failures(); + CU_cleanup_registry(); + + free_threads(); + + return num_failures; +} diff --git a/src/spdk/test/unit/lib/bdev/raid/raid5.c/.gitignore b/src/spdk/test/unit/lib/bdev/raid/raid5.c/.gitignore new file mode 100644 index 000000000..946026bf5 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/raid/raid5.c/.gitignore @@ -0,0 +1 @@ +raid5_ut diff --git a/src/spdk/test/unit/lib/bdev/raid/raid5.c/Makefile b/src/spdk/test/unit/lib/bdev/raid/raid5.c/Makefile new file mode 100644 index 000000000..ddb733333 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/raid/raid5.c/Makefile @@ -0,0 +1,38 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../..) + +TEST_FILE = raid5_ut.c + +include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk diff --git a/src/spdk/test/unit/lib/bdev/raid/raid5.c/raid5_ut.c b/src/spdk/test/unit/lib/bdev/raid/raid5.c/raid5_ut.c new file mode 100644 index 000000000..ba30f327b --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/raid/raid5.c/raid5_ut.c @@ -0,0 +1,214 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE AiRE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk/stdinc.h" +#include "spdk_cunit.h" +#include "spdk/env.h" +#include "spdk_internal/mock.h" + +#include "bdev/raid/raid5.c" + +DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module)); +DEFINE_STUB_V(raid_bdev_io_complete, (struct raid_bdev_io *raid_io, + enum spdk_bdev_io_status status)); + +struct raid5_params { + uint8_t num_base_bdevs; + uint64_t base_bdev_blockcnt; + uint32_t base_bdev_blocklen; + uint32_t strip_size; +}; + +static struct raid5_params *g_params; +static size_t g_params_count; + +#define ARRAY_FOR_EACH(a, e) \ + for (e = a; e < a + SPDK_COUNTOF(a); e++) + +#define RAID5_PARAMS_FOR_EACH(p) \ + for (p = g_params; p < g_params + g_params_count; p++) + +static int +test_setup(void) +{ + uint8_t num_base_bdevs_values[] = { 3, 4, 5 }; + uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 }; + uint32_t base_bdev_blocklen_values[] = { 512, 4096 }; + uint32_t strip_size_kb_values[] = { 1, 4, 128 }; + uint8_t *num_base_bdevs; + uint64_t *base_bdev_blockcnt; + uint32_t *base_bdev_blocklen; + uint32_t *strip_size_kb; + struct raid5_params *params; + + g_params_count = SPDK_COUNTOF(num_base_bdevs_values) * + SPDK_COUNTOF(base_bdev_blockcnt_values) * + SPDK_COUNTOF(base_bdev_blocklen_values) * + SPDK_COUNTOF(strip_size_kb_values); + g_params = calloc(g_params_count, sizeof(*g_params)); + if (!g_params) { + return -ENOMEM; + } + + params = g_params; + + ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) { + ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) { + ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) { + ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) { + params->num_base_bdevs = *num_base_bdevs; + params->base_bdev_blockcnt = *base_bdev_blockcnt; + params->base_bdev_blocklen = *base_bdev_blocklen; + params->strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen; + if (params->strip_size == 0 || + params->strip_size > *base_bdev_blockcnt) { + g_params_count--; + continue; + } + params++; + } + } + } + } + + return 0; +} + +static int +test_cleanup(void) +{ + free(g_params); + return 0; +} + +static struct raid_bdev * +create_raid_bdev(struct raid5_params *params) +{ + struct raid_bdev *raid_bdev; + struct raid_base_bdev_info *base_info; + + raid_bdev = calloc(1, sizeof(*raid_bdev)); + SPDK_CU_ASSERT_FATAL(raid_bdev != NULL); + + raid_bdev->module = &g_raid5_module; + raid_bdev->num_base_bdevs = params->num_base_bdevs; + raid_bdev->base_bdev_info = calloc(raid_bdev->num_base_bdevs, + sizeof(struct raid_base_bdev_info)); + SPDK_CU_ASSERT_FATAL(raid_bdev->base_bdev_info != NULL); + + RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) { + base_info->bdev = calloc(1, sizeof(*base_info->bdev)); + SPDK_CU_ASSERT_FATAL(base_info->bdev != NULL); + + base_info->bdev->blockcnt = params->base_bdev_blockcnt; + base_info->bdev->blocklen = params->base_bdev_blocklen; + } + + raid_bdev->strip_size = params->strip_size; + raid_bdev->strip_size_shift = spdk_u32log2(raid_bdev->strip_size); + raid_bdev->bdev.blocklen = params->base_bdev_blocklen; + + return raid_bdev; +} + +static void +delete_raid_bdev(struct raid_bdev *raid_bdev) +{ + struct raid_base_bdev_info *base_info; + + RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) { + free(base_info->bdev); + } + free(raid_bdev->base_bdev_info); + free(raid_bdev); +} + +static struct raid5_info * +create_raid5(struct raid5_params *params) +{ + struct raid_bdev *raid_bdev = create_raid_bdev(params); + + SPDK_CU_ASSERT_FATAL(raid5_start(raid_bdev) == 0); + + return raid_bdev->module_private; +} + +static void +delete_raid5(struct raid5_info *r5info) +{ + struct raid_bdev *raid_bdev = r5info->raid_bdev; + + raid5_stop(raid_bdev); + + delete_raid_bdev(raid_bdev); +} + +static void +test_raid5_start(void) +{ + struct raid5_params *params; + + RAID5_PARAMS_FOR_EACH(params) { + struct raid5_info *r5info; + + r5info = create_raid5(params); + + CU_ASSERT_EQUAL(r5info->stripe_blocks, params->strip_size * (params->num_base_bdevs - 1)); + CU_ASSERT_EQUAL(r5info->total_stripes, params->base_bdev_blockcnt / params->strip_size); + CU_ASSERT_EQUAL(r5info->raid_bdev->bdev.blockcnt, + (params->base_bdev_blockcnt - params->base_bdev_blockcnt % params->strip_size) * + (params->num_base_bdevs - 1)); + CU_ASSERT_EQUAL(r5info->raid_bdev->bdev.optimal_io_boundary, r5info->stripe_blocks); + + delete_raid5(r5info); + } +} + +int +main(int argc, char **argv) +{ + CU_pSuite suite = NULL; + unsigned int num_failures; + + CU_set_error_action(CUEA_ABORT); + CU_initialize_registry(); + + suite = CU_add_suite("raid5", test_setup, test_cleanup); + CU_ADD_TEST(suite, test_raid5_start); + + CU_basic_set_mode(CU_BRM_VERBOSE); + CU_basic_run_tests(); + num_failures = CU_get_number_of_failures(); + CU_cleanup_registry(); + return num_failures; +} diff --git a/src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore new file mode 100644 index 000000000..75800527d --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore @@ -0,0 +1 @@ +scsi_nvme_ut diff --git a/src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile new file mode 100644 index 000000000..0dbe788db --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile @@ -0,0 +1,37 @@ +# +# BSD LICENSE +# +# Copyright (c) 2016 FUJITSU LIMITED, All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..) + +TEST_FILE = scsi_nvme_ut.c + +include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk diff --git a/src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c new file mode 100644 index 000000000..ef27d7c09 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c @@ -0,0 +1,131 @@ +/*- + * BSD LICENSE + * + * Copyright (c) 2016 FUJITSU LIMITED, All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk_cunit.h" + +#include "bdev/scsi_nvme.c" + +static int +null_init(void) +{ + return 0; +} + +static int +null_clean(void) +{ + return 0; +} + +static void +scsi_nvme_translate_test(void) +{ + struct spdk_bdev_io bdev_io; + int sc, sk, asc, ascq; + + /* SPDK_NVME_SCT_GENERIC */ + bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_GENERIC; + bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS; + spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq); + CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_TASK_ABORTED); + CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ABORTED_COMMAND); + CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_WARNING); + CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_POWER_LOSS_EXPECTED); + + bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS; + spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq); + CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION); + CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST); + CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE); + CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + + /* SPDK_NVME_SCT_COMMAND_SPECIFIC */ + bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; + bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_INVALID_FORMAT; + spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq); + CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION); + CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST); + CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_FORMAT_COMMAND_FAILED); + CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_FORMAT_COMMAND_FAILED); + + bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_OVERLAPPING_RANGE; + spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq); + CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION); + CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST); + CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE); + CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + + /* SPDK_NVME_SCT_MEDIA_ERROR */ + bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_MEDIA_ERROR; + bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_GUARD_CHECK_ERROR; + spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq); + CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION); + CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_MEDIUM_ERROR); + CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_LOGICAL_BLOCK_GUARD_CHECK_FAILED); + CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_LOGICAL_BLOCK_GUARD_CHECK_FAILED); + + bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_DEALLOCATED_OR_UNWRITTEN_BLOCK; + spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq); + CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION); + CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST); + CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE); + CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + + /* SPDK_NVME_SCT_VENDOR_SPECIFIC */ + bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; + bdev_io.internal.error.nvme.sc = 0xff; + spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq); + CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION); + CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST); + CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE); + CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE); +} + +int +main(int argc, char **argv) +{ + CU_pSuite suite = NULL; + unsigned int num_failures; + + CU_set_error_action(CUEA_ABORT); + CU_initialize_registry(); + + suite = CU_add_suite("scsi_nvme_suite", null_init, null_clean); + + CU_ADD_TEST(suite, scsi_nvme_translate_test); + + CU_basic_set_mode(CU_BRM_VERBOSE); + CU_basic_run_tests(); + num_failures = CU_get_number_of_failures(); + CU_cleanup_registry(); + return num_failures; +} diff --git a/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore new file mode 100644 index 000000000..5f2f6fdff --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore @@ -0,0 +1 @@ +vbdev_lvol_ut diff --git a/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile new file mode 100644 index 000000000..a44f51372 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile @@ -0,0 +1,38 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../) + +TEST_FILE = vbdev_lvol_ut.c + +include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk diff --git a/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c new file mode 100644 index 000000000..a963bd3b7 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c @@ -0,0 +1,1440 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk_cunit.h" +#include "spdk/string.h" + +#include "bdev/lvol/vbdev_lvol.c" + +#include "unit/lib/json_mock.c" + +#define SPDK_BS_PAGE_SIZE 0x1000 + +int g_lvolerrno; +int g_lvserrno; +int g_cluster_size; +int g_registered_bdevs; +int g_num_lvols = 0; +struct spdk_lvol_store *g_lvs = NULL; +struct spdk_lvol *g_lvol = NULL; +struct lvol_store_bdev *g_lvs_bdev = NULL; +struct spdk_bdev *g_base_bdev = NULL; +struct spdk_bdev_io *g_io = NULL; +struct spdk_io_channel *g_ch = NULL; + +static struct spdk_bdev g_bdev = {}; +static struct spdk_lvol_store *g_lvol_store = NULL; +bool lvol_store_initialize_fail = false; +bool lvol_store_initialize_cb_fail = false; +bool lvol_already_opened = false; +bool g_examine_done = false; +bool g_bdev_alias_already_exists = false; +bool g_lvs_with_name_already_exists = false; + +int +spdk_bdev_alias_add(struct spdk_bdev *bdev, const char *alias) +{ + struct spdk_bdev_alias *tmp; + + CU_ASSERT(alias != NULL); + CU_ASSERT(bdev != NULL); + if (g_bdev_alias_already_exists) { + return -EEXIST; + } + + tmp = calloc(1, sizeof(*tmp)); + SPDK_CU_ASSERT_FATAL(tmp != NULL); + + tmp->alias = strdup(alias); + SPDK_CU_ASSERT_FATAL(tmp->alias != NULL); + + TAILQ_INSERT_TAIL(&bdev->aliases, tmp, tailq); + + return 0; +} + +int +spdk_bdev_alias_del(struct spdk_bdev *bdev, const char *alias) +{ + struct spdk_bdev_alias *tmp; + + CU_ASSERT(bdev != NULL); + + TAILQ_FOREACH(tmp, &bdev->aliases, tailq) { + SPDK_CU_ASSERT_FATAL(alias != NULL); + if (strncmp(alias, tmp->alias, SPDK_LVOL_NAME_MAX) == 0) { + TAILQ_REMOVE(&bdev->aliases, tmp, tailq); + free(tmp->alias); + free(tmp); + return 0; + } + } + + return -ENOENT; +} + +void +spdk_bdev_alias_del_all(struct spdk_bdev *bdev) +{ + struct spdk_bdev_alias *p, *tmp; + + TAILQ_FOREACH_SAFE(p, &bdev->aliases, tailq, tmp) { + TAILQ_REMOVE(&bdev->aliases, p, tailq); + free(p->alias); + free(p); + } +} + +void +spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno) +{ +} + +void +spdk_lvs_rename(struct spdk_lvol_store *lvs, const char *new_name, + spdk_lvs_op_complete cb_fn, void *cb_arg) +{ + if (g_lvs_with_name_already_exists) { + g_lvolerrno = -EEXIST; + } else { + snprintf(lvs->name, sizeof(lvs->name), "%s", new_name); + g_lvolerrno = 0; + } + + cb_fn(cb_arg, g_lvolerrno); +} + +void +spdk_lvol_rename(struct spdk_lvol *lvol, const char *new_name, + spdk_lvol_op_complete cb_fn, void *cb_arg) +{ + struct spdk_lvol *tmp; + + if (strncmp(lvol->name, new_name, SPDK_LVOL_NAME_MAX) == 0) { + cb_fn(cb_arg, 0); + return; + } + + TAILQ_FOREACH(tmp, &lvol->lvol_store->lvols, link) { + if (strncmp(tmp->name, new_name, SPDK_LVOL_NAME_MAX) == 0) { + SPDK_ERRLOG("Lvol %s already exists in lvol store %s\n", new_name, lvol->lvol_store->name); + cb_fn(cb_arg, -EEXIST); + return; + } + } + + snprintf(lvol->name, sizeof(lvol->name), "%s", new_name); + + cb_fn(cb_arg, g_lvolerrno); +} + +void +spdk_lvol_open(struct spdk_lvol *lvol, spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg) +{ + cb_fn(cb_arg, lvol, g_lvolerrno); +} + +uint64_t +spdk_blob_get_num_clusters(struct spdk_blob *b) +{ + return 0; +} + +int +spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, + size_t *count) +{ + *count = 0; + return 0; +} + +spdk_blob_id +spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid) +{ + return 0; +} + +bool g_blob_is_read_only = false; + +bool +spdk_blob_is_read_only(struct spdk_blob *blob) +{ + return g_blob_is_read_only; +} + +bool +spdk_blob_is_snapshot(struct spdk_blob *blob) +{ + return false; +} + +bool +spdk_blob_is_clone(struct spdk_blob *blob) +{ + return false; +} + +bool +spdk_blob_is_thin_provisioned(struct spdk_blob *blob) +{ + return false; +} + +static struct spdk_lvol *_lvol_create(struct spdk_lvol_store *lvs); + +void +spdk_lvs_load(struct spdk_bs_dev *dev, + spdk_lvs_op_with_handle_complete cb_fn, void *cb_arg) +{ + struct spdk_lvol_store *lvs = NULL; + int i; + int lvserrno = g_lvserrno; + + if (lvserrno != 0) { + /* On error blobstore destroys bs_dev itself, + * by puttin back io channels. + * This operation is asynchronous, and completed + * after calling the callback for lvol. */ + cb_fn(cb_arg, g_lvol_store, lvserrno); + dev->destroy(dev); + return; + } + + lvs = calloc(1, sizeof(*lvs)); + SPDK_CU_ASSERT_FATAL(lvs != NULL); + TAILQ_INIT(&lvs->lvols); + TAILQ_INIT(&lvs->pending_lvols); + spdk_uuid_generate(&lvs->uuid); + lvs->bs_dev = dev; + for (i = 0; i < g_num_lvols; i++) { + _lvol_create(lvs); + } + + cb_fn(cb_arg, lvs, lvserrno); +} + +int +spdk_bs_bdev_claim(struct spdk_bs_dev *bs_dev, struct spdk_bdev_module *module) +{ + if (lvol_already_opened == true) { + return -1; + } + + lvol_already_opened = true; + + return 0; +} + +void +spdk_bdev_unregister(struct spdk_bdev *vbdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) +{ + int rc; + + SPDK_CU_ASSERT_FATAL(vbdev != NULL); + rc = vbdev->fn_table->destruct(vbdev->ctxt); + + SPDK_CU_ASSERT_FATAL(cb_fn != NULL); + cb_fn(cb_arg, rc); +} + +void +spdk_bdev_module_finish_done(void) +{ + return; +} + +uint64_t +spdk_bs_get_page_size(struct spdk_blob_store *bs) +{ + return SPDK_BS_PAGE_SIZE; +} + +uint64_t +spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) +{ + return SPDK_BS_PAGE_SIZE; +} + +static void +bdev_blob_destroy(struct spdk_bs_dev *bs_dev) +{ + CU_ASSERT(bs_dev != NULL); + free(bs_dev); + lvol_already_opened = false; +} + +struct spdk_bs_dev * +spdk_bdev_create_bs_dev(struct spdk_bdev *bdev, spdk_bdev_remove_cb_t remove_cb, void *remove_ctx) +{ + struct spdk_bs_dev *bs_dev; + + if (lvol_already_opened == true || bdev == NULL) { + return NULL; + } + + bs_dev = calloc(1, sizeof(*bs_dev)); + SPDK_CU_ASSERT_FATAL(bs_dev != NULL); + bs_dev->destroy = bdev_blob_destroy; + + return bs_dev; +} + +void +spdk_lvs_opts_init(struct spdk_lvs_opts *opts) +{ +} + +int +spdk_lvs_init(struct spdk_bs_dev *bs_dev, struct spdk_lvs_opts *o, + spdk_lvs_op_with_handle_complete cb_fn, void *cb_arg) +{ + struct spdk_lvol_store *lvs; + int error = 0; + + if (lvol_store_initialize_fail) { + return -1; + } + + if (lvol_store_initialize_cb_fail) { + bs_dev->destroy(bs_dev); + lvs = NULL; + error = -1; + } else { + lvs = calloc(1, sizeof(*lvs)); + SPDK_CU_ASSERT_FATAL(lvs != NULL); + TAILQ_INIT(&lvs->lvols); + TAILQ_INIT(&lvs->pending_lvols); + spdk_uuid_generate(&lvs->uuid); + snprintf(lvs->name, sizeof(lvs->name), "%s", o->name); + lvs->bs_dev = bs_dev; + error = 0; + } + cb_fn(cb_arg, lvs, error); + + return 0; +} + +int +spdk_lvs_unload(struct spdk_lvol_store *lvs, spdk_lvs_op_complete cb_fn, void *cb_arg) +{ + struct spdk_lvol *lvol, *tmp; + + TAILQ_FOREACH_SAFE(lvol, &lvs->lvols, link, tmp) { + TAILQ_REMOVE(&lvs->lvols, lvol, link); + free(lvol); + } + g_lvol_store = NULL; + + lvs->bs_dev->destroy(lvs->bs_dev); + free(lvs); + + if (cb_fn != NULL) { + cb_fn(cb_arg, 0); + } + + return 0; +} + +int +spdk_lvs_destroy(struct spdk_lvol_store *lvs, spdk_lvs_op_complete cb_fn, + void *cb_arg) +{ + struct spdk_lvol *lvol, *tmp; + char *alias; + + TAILQ_FOREACH_SAFE(lvol, &lvs->lvols, link, tmp) { + TAILQ_REMOVE(&lvs->lvols, lvol, link); + + alias = spdk_sprintf_alloc("%s/%s", lvs->name, lvol->name); + if (alias == NULL) { + SPDK_ERRLOG("Cannot alloc memory for alias\n"); + return -1; + } + spdk_bdev_alias_del(lvol->bdev, alias); + + free(alias); + free(lvol); + } + g_lvol_store = NULL; + + lvs->bs_dev->destroy(lvs->bs_dev); + free(lvs); + + if (cb_fn != NULL) { + cb_fn(cb_arg, 0); + } + + return 0; +} + +void +spdk_lvol_resize(struct spdk_lvol *lvol, size_t sz, spdk_lvol_op_complete cb_fn, void *cb_arg) +{ + cb_fn(cb_arg, 0); +} + +void +spdk_lvol_set_read_only(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_fn, void *cb_arg) +{ + cb_fn(cb_arg, 0); +} + +int +spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) +{ + bdev->blockcnt = size; + return 0; +} + +uint64_t +spdk_bs_get_cluster_size(struct spdk_blob_store *bs) +{ + return g_cluster_size; +} + +struct spdk_bdev * +spdk_bdev_get_by_name(const char *bdev_name) +{ + if (!strcmp(g_base_bdev->name, bdev_name)) { + return g_base_bdev; + } + + return NULL; +} + +void +spdk_lvol_close(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_fn, void *cb_arg) +{ + lvol->ref_count--; + + SPDK_CU_ASSERT_FATAL(cb_fn != NULL); + cb_fn(cb_arg, 0); +} + +bool +spdk_lvol_deletable(struct spdk_lvol *lvol) +{ + return true; +} + +void +spdk_lvol_destroy(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_fn, void *cb_arg) +{ + if (lvol->ref_count != 0) { + cb_fn(cb_arg, -ENODEV); + } + + TAILQ_REMOVE(&lvol->lvol_store->lvols, lvol, link); + + SPDK_CU_ASSERT_FATAL(cb_fn != NULL); + cb_fn(cb_arg, 0); + + g_lvol = NULL; + free(lvol); +} + +void +spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) +{ + bdev_io->internal.status = status; +} + +struct spdk_io_channel *spdk_lvol_get_io_channel(struct spdk_lvol *lvol) +{ + CU_ASSERT(lvol == g_lvol); + return g_ch; +} + +void +spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) +{ + CU_ASSERT(cb == lvol_get_buf_cb); +} + +void +spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, + void *payload, uint64_t offset, uint64_t length, + spdk_blob_op_complete cb_fn, void *cb_arg) +{ + CU_ASSERT(blob == NULL); + CU_ASSERT(channel == g_ch); + CU_ASSERT(offset == g_io->u.bdev.offset_blocks); + CU_ASSERT(length == g_io->u.bdev.num_blocks); + cb_fn(cb_arg, 0); +} + +void +spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, + void *payload, uint64_t offset, uint64_t length, + spdk_blob_op_complete cb_fn, void *cb_arg) +{ + CU_ASSERT(blob == NULL); + CU_ASSERT(channel == g_ch); + CU_ASSERT(offset == g_io->u.bdev.offset_blocks); + CU_ASSERT(length == g_io->u.bdev.num_blocks); + cb_fn(cb_arg, 0); +} + +void +spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, + uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) +{ + CU_ASSERT(blob == NULL); + CU_ASSERT(channel == g_ch); + CU_ASSERT(offset == g_io->u.bdev.offset_blocks); + CU_ASSERT(length == g_io->u.bdev.num_blocks); + cb_fn(cb_arg, 0); +} + +void +spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, + uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) +{ + CU_ASSERT(blob == NULL); + CU_ASSERT(channel == g_ch); + CU_ASSERT(offset == g_io->u.bdev.offset_blocks); + CU_ASSERT(length == g_io->u.bdev.num_blocks); + cb_fn(cb_arg, 0); +} + +void +spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, + struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, + spdk_blob_op_complete cb_fn, void *cb_arg) +{ + CU_ASSERT(blob == NULL); + CU_ASSERT(channel == g_ch); + CU_ASSERT(offset == g_io->u.bdev.offset_blocks); + CU_ASSERT(length == g_io->u.bdev.num_blocks); + cb_fn(cb_arg, 0); +} + +void +spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, + struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, + spdk_blob_op_complete cb_fn, void *cb_arg) +{ + CU_ASSERT(blob == NULL); + CU_ASSERT(channel == g_ch); + CU_ASSERT(offset == g_io->u.bdev.offset_blocks); + CU_ASSERT(length == g_io->u.bdev.num_blocks); + cb_fn(cb_arg, 0); +} + +void +spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module) +{ +} + +const char * +spdk_bdev_get_name(const struct spdk_bdev *bdev) +{ + return "test"; +} + +int +spdk_bdev_register(struct spdk_bdev *vbdev) +{ + TAILQ_INIT(&vbdev->aliases); + + g_registered_bdevs++; + return 0; +} + +void +spdk_bdev_module_examine_done(struct spdk_bdev_module *module) +{ + SPDK_CU_ASSERT_FATAL(g_examine_done != true); + g_examine_done = true; +} + +static struct spdk_lvol * +_lvol_create(struct spdk_lvol_store *lvs) +{ + struct spdk_lvol *lvol = calloc(1, sizeof(*lvol)); + + SPDK_CU_ASSERT_FATAL(lvol != NULL); + + lvol->lvol_store = lvs; + lvol->ref_count++; + snprintf(lvol->unique_id, sizeof(lvol->unique_id), "%s", "UNIT_TEST_UUID"); + + TAILQ_INSERT_TAIL(&lvol->lvol_store->lvols, lvol, link); + + return lvol; +} + +int +spdk_lvol_create(struct spdk_lvol_store *lvs, const char *name, size_t sz, + bool thin_provision, enum lvol_clear_method clear_method, spdk_lvol_op_with_handle_complete cb_fn, + void *cb_arg) +{ + struct spdk_lvol *lvol; + + lvol = _lvol_create(lvs); + snprintf(lvol->name, sizeof(lvol->name), "%s", name); + cb_fn(cb_arg, lvol, 0); + + return 0; +} + +void +spdk_lvol_create_snapshot(struct spdk_lvol *lvol, const char *snapshot_name, + spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg) +{ + struct spdk_lvol *snap; + + snap = _lvol_create(lvol->lvol_store); + snprintf(snap->name, sizeof(snap->name), "%s", snapshot_name); + cb_fn(cb_arg, snap, 0); +} + +void +spdk_lvol_create_clone(struct spdk_lvol *lvol, const char *clone_name, + spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg) +{ + struct spdk_lvol *clone; + + clone = _lvol_create(lvol->lvol_store); + snprintf(clone->name, sizeof(clone->name), "%s", clone_name); + cb_fn(cb_arg, clone, 0); +} + +static void +lvol_store_op_complete(void *cb_arg, int lvserrno) +{ + g_lvserrno = lvserrno; + return; +} + +static void +lvol_store_op_with_handle_complete(void *cb_arg, struct spdk_lvol_store *lvs, int lvserrno) +{ + g_lvserrno = lvserrno; + g_lvol_store = lvs; + return; +} + +static void +vbdev_lvol_create_complete(void *cb_arg, struct spdk_lvol *lvol, int lvolerrno) +{ + g_lvolerrno = lvolerrno; + g_lvol = lvol; +} + +static void +vbdev_lvol_resize_complete(void *cb_arg, int lvolerrno) +{ + g_lvolerrno = lvolerrno; +} + +static void +vbdev_lvol_set_read_only_complete(void *cb_arg, int lvolerrno) +{ + g_lvolerrno = lvolerrno; +} + +static void +vbdev_lvol_rename_complete(void *cb_arg, int lvolerrno) +{ + g_lvolerrno = lvolerrno; +} + +static void +ut_lvs_destroy(void) +{ + int rc = 0; + int sz = 10; + struct spdk_lvol_store *lvs; + + /* Lvol store is successfully created */ + rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvserrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL); + CU_ASSERT(g_lvol_store->bs_dev != NULL); + + lvs = g_lvol_store; + g_lvol_store = NULL; + + spdk_uuid_generate(&lvs->uuid); + + /* Successfully create lvol, which should be unloaded with lvs later */ + g_lvolerrno = -1; + rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvolerrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol != NULL); + + /* Unload lvol store */ + vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvserrno == 0); + CU_ASSERT(g_lvol_store == NULL); +} + +static void +ut_lvol_init(void) +{ + struct spdk_lvol_store *lvs; + int sz = 10; + int rc; + + /* Lvol store is successfully created */ + rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvserrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL); + CU_ASSERT(g_lvol_store->bs_dev != NULL); + lvs = g_lvol_store; + + /* Successful lvol create */ + g_lvolerrno = -1; + rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete, + NULL); + SPDK_CU_ASSERT_FATAL(rc == 0); + CU_ASSERT(g_lvol != NULL); + CU_ASSERT(g_lvolerrno == 0); + + /* Successful lvol destroy */ + vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvol == NULL); + + /* Destroy lvol store */ + vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvserrno == 0); + CU_ASSERT(g_lvol_store == NULL); +} + +static void +ut_lvol_snapshot(void) +{ + struct spdk_lvol_store *lvs; + int sz = 10; + int rc; + struct spdk_lvol *lvol = NULL; + + /* Lvol store is successfully created */ + rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvserrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL); + CU_ASSERT(g_lvol_store->bs_dev != NULL); + lvs = g_lvol_store; + + /* Successful lvol create */ + g_lvolerrno = -1; + rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete, + NULL); + SPDK_CU_ASSERT_FATAL(rc == 0); + SPDK_CU_ASSERT_FATAL(g_lvol != NULL); + CU_ASSERT(g_lvolerrno == 0); + + lvol = g_lvol; + + /* Successful snap create */ + vbdev_lvol_create_snapshot(lvol, "snap", vbdev_lvol_create_complete, NULL); + SPDK_CU_ASSERT_FATAL(rc == 0); + CU_ASSERT(g_lvol != NULL); + CU_ASSERT(g_lvolerrno == 0); + + /* Successful lvol destroy */ + vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvol == NULL); + + /* Successful snap destroy */ + g_lvol = lvol; + vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvol == NULL); + + /* Destroy lvol store */ + vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvserrno == 0); + CU_ASSERT(g_lvol_store == NULL); +} + +static void +ut_lvol_clone(void) +{ + struct spdk_lvol_store *lvs; + int sz = 10; + int rc; + struct spdk_lvol *lvol = NULL; + struct spdk_lvol *snap = NULL; + struct spdk_lvol *clone = NULL; + + /* Lvol store is successfully created */ + rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvserrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL); + CU_ASSERT(g_lvol_store->bs_dev != NULL); + lvs = g_lvol_store; + + /* Successful lvol create */ + g_lvolerrno = -1; + rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete, + NULL); + SPDK_CU_ASSERT_FATAL(rc == 0); + SPDK_CU_ASSERT_FATAL(g_lvol != NULL); + CU_ASSERT(g_lvolerrno == 0); + + lvol = g_lvol; + + /* Successful snap create */ + vbdev_lvol_create_snapshot(lvol, "snap", vbdev_lvol_create_complete, NULL); + SPDK_CU_ASSERT_FATAL(rc == 0); + SPDK_CU_ASSERT_FATAL(g_lvol != NULL); + CU_ASSERT(g_lvolerrno == 0); + + snap = g_lvol; + + /* Successful clone create */ + vbdev_lvol_create_clone(snap, "clone", vbdev_lvol_create_complete, NULL); + + SPDK_CU_ASSERT_FATAL(rc == 0); + SPDK_CU_ASSERT_FATAL(g_lvol != NULL); + CU_ASSERT(g_lvolerrno == 0); + + clone = g_lvol; + + /* Successful lvol destroy */ + g_lvol = lvol; + vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvol == NULL); + + /* Successful clone destroy */ + g_lvol = clone; + vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvol == NULL); + + /* Successful lvol destroy */ + g_lvol = snap; + vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvol == NULL); + + /* Destroy lvol store */ + vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvserrno == 0); + CU_ASSERT(g_lvol_store == NULL); +} + +static void +ut_lvol_hotremove(void) +{ + int rc = 0; + + lvol_store_initialize_fail = false; + lvol_store_initialize_cb_fail = false; + lvol_already_opened = false; + + /* Lvol store is successfully created */ + rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvserrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL); + CU_ASSERT(g_lvol_store->bs_dev != NULL); + + /* Hot remove callback with NULL - stability check */ + vbdev_lvs_hotremove_cb(NULL); + + /* Hot remove lvs on bdev removal */ + vbdev_lvs_hotremove_cb(&g_bdev); + + CU_ASSERT(g_lvol_store == NULL); + CU_ASSERT(TAILQ_EMPTY(&g_spdk_lvol_pairs)); + +} + +static void +ut_lvs_examine_check(bool success) +{ + struct lvol_store_bdev *lvs_bdev; + + /* Examine was finished regardless of result */ + CU_ASSERT(g_examine_done == true); + g_examine_done = false; + + if (success) { + SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_spdk_lvol_pairs)); + lvs_bdev = TAILQ_FIRST(&g_spdk_lvol_pairs); + SPDK_CU_ASSERT_FATAL(lvs_bdev != NULL); + g_lvol_store = lvs_bdev->lvs; + SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL); + CU_ASSERT(g_lvol_store->bs_dev != NULL); + } else { + SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_spdk_lvol_pairs)); + g_lvol_store = NULL; + } +} + +static void +ut_lvol_examine(void) +{ + /* Examine unsuccessfully - bdev already opened */ + g_lvserrno = -1; + lvol_already_opened = true; + vbdev_lvs_examine(&g_bdev); + ut_lvs_examine_check(false); + + /* Examine unsuccessfully - fail on lvol store */ + g_lvserrno = -1; + lvol_already_opened = false; + vbdev_lvs_examine(&g_bdev); + ut_lvs_examine_check(false); + + /* Examine successfully + * - one lvol fails to load + * - lvs is loaded with no lvols present */ + g_lvserrno = 0; + g_lvolerrno = -1; + g_num_lvols = 1; + lvol_already_opened = false; + g_registered_bdevs = 0; + vbdev_lvs_examine(&g_bdev); + ut_lvs_examine_check(true); + CU_ASSERT(g_registered_bdevs == 0); + CU_ASSERT(TAILQ_EMPTY(&g_lvol_store->lvols)); + vbdev_lvs_destruct(g_lvol_store, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvserrno == 0); + CU_ASSERT(g_lvol_store == NULL); + + /* Examine successfully */ + g_lvserrno = 0; + g_lvolerrno = 0; + g_registered_bdevs = 0; + lvol_already_opened = false; + vbdev_lvs_examine(&g_bdev); + ut_lvs_examine_check(true); + CU_ASSERT(g_registered_bdevs != 0); + SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_lvol_store->lvols)); + vbdev_lvs_destruct(g_lvol_store, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvserrno == 0); +} + +static void +ut_lvol_rename(void) +{ + struct spdk_lvol_store *lvs; + struct spdk_lvol *lvol; + struct spdk_lvol *lvol2; + int sz = 10; + int rc; + + /* Lvol store is successfully created */ + rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvserrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL); + CU_ASSERT(g_lvol_store->bs_dev != NULL); + lvs = g_lvol_store; + + /* Successful lvols create */ + g_lvolerrno = -1; + rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete, + NULL); + SPDK_CU_ASSERT_FATAL(rc == 0); + CU_ASSERT(g_lvol != NULL); + CU_ASSERT(g_lvolerrno == 0); + lvol = g_lvol; + + g_lvolerrno = -1; + rc = vbdev_lvol_create(lvs, "lvol2", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete, + NULL); + SPDK_CU_ASSERT_FATAL(rc == 0); + CU_ASSERT(g_lvol != NULL); + CU_ASSERT(g_lvolerrno == 0); + lvol2 = g_lvol; + + /* Successful rename lvol */ + vbdev_lvol_rename(lvol, "new_lvol_name", vbdev_lvol_rename_complete, NULL); + SPDK_CU_ASSERT_FATAL(g_lvolerrno == 0); + CU_ASSERT_STRING_EQUAL(lvol->name, "new_lvol_name"); + + /* Renaming lvol with name already existing */ + g_bdev_alias_already_exists = true; + vbdev_lvol_rename(lvol2, "new_lvol_name", vbdev_lvol_rename_complete, NULL); + g_bdev_alias_already_exists = false; + SPDK_CU_ASSERT_FATAL(g_lvolerrno != 0); + CU_ASSERT_STRING_NOT_EQUAL(lvol2->name, "new_lvol_name"); + + /* Renaming lvol with it's own name */ + vbdev_lvol_rename(lvol, "new_lvol_name", vbdev_lvol_rename_complete, NULL); + SPDK_CU_ASSERT_FATAL(g_lvolerrno == 0); + CU_ASSERT_STRING_EQUAL(lvol->name, "new_lvol_name"); + + /* Successful lvols destroy */ + vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvol == NULL); + + vbdev_lvol_destroy(lvol2, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvol == NULL); + + /* Destroy lvol store */ + vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvserrno == 0); + CU_ASSERT(g_lvol_store == NULL); +} + +static void +ut_lvol_destroy(void) +{ + struct spdk_lvol_store *lvs; + struct spdk_lvol *lvol; + struct spdk_lvol *lvol2; + int sz = 10; + int rc; + + /* Lvol store is successfully created */ + rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvserrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL); + CU_ASSERT(g_lvol_store->bs_dev != NULL); + lvs = g_lvol_store; + + /* Successful lvols create */ + g_lvolerrno = -1; + rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete, + NULL); + SPDK_CU_ASSERT_FATAL(rc == 0); + CU_ASSERT(g_lvol != NULL); + CU_ASSERT(g_lvolerrno == 0); + lvol = g_lvol; + + g_lvolerrno = -1; + rc = vbdev_lvol_create(lvs, "lvol2", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete, + NULL); + SPDK_CU_ASSERT_FATAL(rc == 0); + CU_ASSERT(g_lvol != NULL); + CU_ASSERT(g_lvolerrno == 0); + lvol2 = g_lvol; + + /* Successful lvols destroy */ + vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvol == NULL); + CU_ASSERT(g_lvolerrno == 0); + + /* Hot remove lvol bdev */ + vbdev_lvol_unregister(lvol2); + + /* Unload lvol store */ + vbdev_lvs_unload(lvs, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvserrno == 0); + CU_ASSERT(g_lvol_store == NULL); +} + +static void +ut_lvol_resize(void) +{ + struct spdk_lvol_store *lvs; + struct spdk_lvol *lvol; + int sz = 10; + int rc = 0; + + /* Lvol store is successfully created */ + rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvserrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL); + CU_ASSERT(g_lvol_store->bs_dev != NULL); + lvs = g_lvol_store; + + /* Successful lvol create */ + g_lvolerrno = -1; + rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvolerrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol != NULL); + lvol = g_lvol; + + /* Successful lvol resize */ + g_lvolerrno = -1; + vbdev_lvol_resize(lvol, 20, vbdev_lvol_resize_complete, NULL); + CU_ASSERT(g_lvolerrno == 0); + CU_ASSERT(lvol->bdev->blockcnt == 20 * g_cluster_size / lvol->bdev->blocklen); + + /* Resize with NULL lvol */ + vbdev_lvol_resize(NULL, 20, vbdev_lvol_resize_complete, NULL); + CU_ASSERT(g_lvolerrno != 0); + + /* Successful lvol destroy */ + vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvol == NULL); + + /* Destroy lvol store */ + vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvserrno == 0); + CU_ASSERT(g_lvol_store == NULL); +} + +static void +ut_lvol_set_read_only(void) +{ + struct spdk_lvol_store *lvs; + struct spdk_lvol *lvol; + int sz = 10; + int rc = 0; + + /* Lvol store is successfully created */ + rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvserrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL); + CU_ASSERT(g_lvol_store->bs_dev != NULL); + lvs = g_lvol_store; + + /* Successful lvol create */ + g_lvolerrno = -1; + rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvolerrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol != NULL); + lvol = g_lvol; + + /* Successful set lvol as read only */ + g_lvolerrno = -1; + vbdev_lvol_set_read_only(lvol, vbdev_lvol_set_read_only_complete, NULL); + CU_ASSERT(g_lvolerrno == 0); + + /* Successful lvol destroy */ + vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvol == NULL); + + /* Destroy lvol store */ + vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvserrno == 0); + CU_ASSERT(g_lvol_store == NULL); +} + +static void +ut_lvs_unload(void) +{ + int rc = 0; + int sz = 10; + struct spdk_lvol_store *lvs; + + /* Lvol store is successfully created */ + rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvserrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL); + CU_ASSERT(g_lvol_store->bs_dev != NULL); + + lvs = g_lvol_store; + g_lvol_store = NULL; + + spdk_uuid_generate(&lvs->uuid); + + /* Successfully create lvol, which should be destroyed with lvs later */ + g_lvolerrno = -1; + rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvolerrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol != NULL); + + /* Unload lvol store */ + vbdev_lvs_unload(lvs, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvserrno == 0); + CU_ASSERT(g_lvol_store == NULL); + CU_ASSERT(g_lvol != NULL); +} + +static void +ut_lvs_init(void) +{ + int rc = 0; + struct spdk_lvol_store *lvs; + + /* spdk_lvs_init() fails */ + lvol_store_initialize_fail = true; + + rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete, + NULL); + CU_ASSERT(rc != 0); + CU_ASSERT(g_lvserrno == 0); + CU_ASSERT(g_lvol_store == NULL); + + lvol_store_initialize_fail = false; + + /* spdk_lvs_init_cb() fails */ + lvol_store_initialize_cb_fail = true; + + rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvserrno != 0); + CU_ASSERT(g_lvol_store == NULL); + + lvol_store_initialize_cb_fail = false; + + /* Lvol store is successfully created */ + rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvserrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL); + CU_ASSERT(g_lvol_store->bs_dev != NULL); + + lvs = g_lvol_store; + g_lvol_store = NULL; + + /* Bdev with lvol store already claimed */ + rc = vbdev_lvs_create(&g_bdev, "lvs", 0, LVS_CLEAR_WITH_UNMAP, lvol_store_op_with_handle_complete, + NULL); + CU_ASSERT(rc != 0); + CU_ASSERT(g_lvserrno == 0); + CU_ASSERT(g_lvol_store == NULL); + + /* Destruct lvol store */ + vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvserrno == 0); + CU_ASSERT(g_lvol_store == NULL); +} + +static void +ut_vbdev_lvol_get_io_channel(void) +{ + struct spdk_io_channel *ch; + + g_lvol = calloc(1, sizeof(struct spdk_lvol)); + SPDK_CU_ASSERT_FATAL(g_lvol != NULL); + + ch = vbdev_lvol_get_io_channel(g_lvol); + CU_ASSERT(ch == g_ch); + + free(g_lvol); +} + +static void +ut_vbdev_lvol_io_type_supported(void) +{ + struct spdk_lvol *lvol; + bool ret; + + lvol = calloc(1, sizeof(struct spdk_lvol)); + SPDK_CU_ASSERT_FATAL(lvol != NULL); + + g_blob_is_read_only = false; + + /* Supported types */ + ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_READ); + CU_ASSERT(ret == true); + ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE); + CU_ASSERT(ret == true); + ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_RESET); + CU_ASSERT(ret == true); + ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_UNMAP); + CU_ASSERT(ret == true); + ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE_ZEROES); + CU_ASSERT(ret == true); + + /* Unsupported types */ + ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_FLUSH); + CU_ASSERT(ret == false); + ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_ADMIN); + CU_ASSERT(ret == false); + ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_IO); + CU_ASSERT(ret == false); + + g_blob_is_read_only = true; + + /* Supported types */ + ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_READ); + CU_ASSERT(ret == true); + ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_RESET); + CU_ASSERT(ret == true); + + /* Unsupported types */ + ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE); + CU_ASSERT(ret == false); + ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_UNMAP); + CU_ASSERT(ret == false); + ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE_ZEROES); + CU_ASSERT(ret == false); + ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_FLUSH); + CU_ASSERT(ret == false); + ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_ADMIN); + CU_ASSERT(ret == false); + ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_IO); + CU_ASSERT(ret == false); + + free(lvol); +} + +static void +ut_lvol_read_write(void) +{ + g_io = calloc(1, sizeof(struct spdk_bdev_io)); + SPDK_CU_ASSERT_FATAL(g_io != NULL); + g_base_bdev = calloc(1, sizeof(struct spdk_bdev)); + SPDK_CU_ASSERT_FATAL(g_base_bdev != NULL); + g_lvol = calloc(1, sizeof(struct spdk_lvol)); + SPDK_CU_ASSERT_FATAL(g_lvol != NULL); + + g_io->bdev = g_base_bdev; + g_io->bdev->ctxt = g_lvol; + g_io->u.bdev.offset_blocks = 20; + g_io->u.bdev.num_blocks = 20; + + lvol_read(g_ch, g_io); + CU_ASSERT(g_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); + + lvol_write(g_lvol, g_ch, g_io); + CU_ASSERT(g_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); + + free(g_io); + free(g_base_bdev); + free(g_lvol); +} + +static void +ut_vbdev_lvol_submit_request(void) +{ + struct spdk_lvol request_lvol = {}; + g_io = calloc(1, sizeof(struct spdk_bdev_io)); + SPDK_CU_ASSERT_FATAL(g_io != NULL); + g_base_bdev = calloc(1, sizeof(struct spdk_bdev)); + SPDK_CU_ASSERT_FATAL(g_base_bdev != NULL); + g_io->bdev = g_base_bdev; + + g_io->type = SPDK_BDEV_IO_TYPE_READ; + g_base_bdev->ctxt = &request_lvol; + vbdev_lvol_submit_request(g_ch, g_io); + + free(g_io); + free(g_base_bdev); +} + +static void +ut_lvs_rename(void) +{ + int rc = 0; + int sz = 10; + struct spdk_lvol_store *lvs; + + /* Lvol store is successfully created */ + rc = vbdev_lvs_create(&g_bdev, "old_lvs_name", 0, LVS_CLEAR_WITH_UNMAP, + lvol_store_op_with_handle_complete, NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvserrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL); + CU_ASSERT(g_lvol_store->bs_dev != NULL); + + lvs = g_lvol_store; + g_lvol_store = NULL; + + g_base_bdev = calloc(1, sizeof(*g_base_bdev)); + SPDK_CU_ASSERT_FATAL(g_base_bdev != NULL); + + /* Successfully create lvol, which should be destroyed with lvs later */ + g_lvolerrno = -1; + rc = vbdev_lvol_create(lvs, "lvol", sz, false, LVOL_CLEAR_WITH_DEFAULT, vbdev_lvol_create_complete, + NULL); + CU_ASSERT(rc == 0); + CU_ASSERT(g_lvolerrno == 0); + SPDK_CU_ASSERT_FATAL(g_lvol != NULL); + + /* Trying to rename lvs with lvols created */ + vbdev_lvs_rename(lvs, "new_lvs_name", lvol_store_op_complete, NULL); + CU_ASSERT(g_lvserrno == 0); + CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name"); + CU_ASSERT_STRING_EQUAL(TAILQ_FIRST(&g_lvol->bdev->aliases)->alias, "new_lvs_name/lvol"); + + /* Trying to rename lvs with name already used by another lvs */ + /* This is a bdev_lvol test, so g_lvs_with_name_already_exists simulates + * existing lvs with name 'another_new_lvs_name' and this name in fact is not compared */ + g_lvs_with_name_already_exists = true; + vbdev_lvs_rename(lvs, "another_new_lvs_name", lvol_store_op_complete, NULL); + CU_ASSERT(g_lvserrno == -EEXIST); + CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name"); + CU_ASSERT_STRING_EQUAL(TAILQ_FIRST(&g_lvol->bdev->aliases)->alias, "new_lvs_name/lvol"); + g_lvs_with_name_already_exists = false; + + /* Unload lvol store */ + g_lvol_store = lvs; + vbdev_lvs_destruct(g_lvol_store, lvol_store_op_complete, NULL); + CU_ASSERT(g_lvserrno == 0); + CU_ASSERT(g_lvol_store == NULL); + + free(g_base_bdev->name); + free(g_base_bdev); +} + +int main(int argc, char **argv) +{ + CU_pSuite suite = NULL; + unsigned int num_failures; + + CU_set_error_action(CUEA_ABORT); + CU_initialize_registry(); + + suite = CU_add_suite("lvol", NULL, NULL); + + CU_ADD_TEST(suite, ut_lvs_init); + CU_ADD_TEST(suite, ut_lvol_init); + CU_ADD_TEST(suite, ut_lvol_snapshot); + CU_ADD_TEST(suite, ut_lvol_clone); + CU_ADD_TEST(suite, ut_lvs_destroy); + CU_ADD_TEST(suite, ut_lvs_unload); + CU_ADD_TEST(suite, ut_lvol_resize); + CU_ADD_TEST(suite, ut_lvol_set_read_only); + CU_ADD_TEST(suite, ut_lvol_hotremove); + CU_ADD_TEST(suite, ut_vbdev_lvol_get_io_channel); + CU_ADD_TEST(suite, ut_vbdev_lvol_io_type_supported); + CU_ADD_TEST(suite, ut_lvol_read_write); + CU_ADD_TEST(suite, ut_vbdev_lvol_submit_request); + CU_ADD_TEST(suite, ut_lvol_examine); + CU_ADD_TEST(suite, ut_lvol_rename); + CU_ADD_TEST(suite, ut_lvol_destroy); + CU_ADD_TEST(suite, ut_lvs_rename); + + CU_basic_set_mode(CU_BRM_VERBOSE); + CU_basic_run_tests(); + num_failures = CU_get_number_of_failures(); + CU_cleanup_registry(); + return num_failures; +} diff --git a/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/.gitignore b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/.gitignore new file mode 100644 index 000000000..a1d7547aa --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/.gitignore @@ -0,0 +1 @@ +vbdev_zone_block_ut diff --git a/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/Makefile b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/Makefile new file mode 100644 index 000000000..81a9575d5 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/Makefile @@ -0,0 +1,38 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..) + +TEST_FILE = vbdev_zone_block_ut.c + +include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk diff --git a/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/vbdev_zone_block_ut.c b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/vbdev_zone_block_ut.c new file mode 100644 index 000000000..d0ee553e3 --- /dev/null +++ b/src/spdk/test/unit/lib/bdev/vbdev_zone_block.c/vbdev_zone_block_ut.c @@ -0,0 +1,1502 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk/stdinc.h" +#include "spdk_cunit.h" +#include "spdk/env.h" +#include "spdk_internal/mock.h" +#include "spdk/thread.h" +#include "common/lib/test_env.c" +#include "bdev/zone_block/vbdev_zone_block.c" +#include "bdev/zone_block/vbdev_zone_block_rpc.c" + +#define BLOCK_CNT (1024ul * 1024ul * 1024ul * 1024ul) +#define BLOCK_SIZE 4096 + +/* Globals */ +uint64_t g_block_cnt; +struct io_output *g_io_output = NULL; +uint32_t g_max_io_size; +uint32_t g_io_output_index; +uint32_t g_io_comp_status; +uint8_t g_rpc_err; +uint8_t g_json_decode_obj_construct; +static TAILQ_HEAD(, spdk_bdev) g_bdev_list = TAILQ_HEAD_INITIALIZER(g_bdev_list); +void *g_rpc_req = NULL; +static struct spdk_thread *g_thread; + +struct io_output { + struct spdk_bdev_desc *desc; + struct spdk_io_channel *ch; + uint64_t offset_blocks; + uint64_t num_blocks; + spdk_bdev_io_completion_cb cb; + void *cb_arg; + enum spdk_bdev_io_type iotype; +}; + +DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); +DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); +DEFINE_STUB(spdk_json_decode_string, int, (const struct spdk_json_val *val, void *out), 0); +DEFINE_STUB(spdk_json_decode_uint64, int, (const struct spdk_json_val *val, void *out), 0); +DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module)); +DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0); +DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0); +DEFINE_STUB(spdk_json_write_named_string, int, (struct spdk_json_write_ctx *w, + const char *name, const char *val), 0); +DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev, + enum spdk_bdev_io_type io_type), true); +DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0); +DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w, + const char *name), 0); +DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0); +DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func, + uint32_t state_mask)); +DEFINE_STUB_V(spdk_jsonrpc_end_result, (struct spdk_jsonrpc_request *request, + struct spdk_json_write_ctx *w)); +DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), + (void *)0); + +static void +set_test_opts(void) +{ + g_max_io_size = 1024; +} + +static void +init_test_globals(uint64_t block_cnt) +{ + g_io_output = calloc(g_max_io_size, sizeof(struct io_output)); + SPDK_CU_ASSERT_FATAL(g_io_output != NULL); + g_io_output_index = 0; + g_block_cnt = block_cnt; +} + +static void +free_test_globals(void) +{ + free(g_io_output); + g_io_output = NULL; +} + +void +spdk_bdev_free_io(struct spdk_bdev_io *bdev_io) +{ + free(bdev_io); +} + +int +spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb, + void *remove_ctx, struct spdk_bdev_desc **_desc) +{ + *_desc = (void *)bdev; + return 0; +} + +struct spdk_bdev * +spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) +{ + return (void *)desc; +} + +int +spdk_bdev_register(struct spdk_bdev *bdev) +{ + CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(bdev->name)); + TAILQ_INSERT_TAIL(&g_bdev_list, bdev, internal.link); + + return 0; +} + +void +spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) +{ + CU_ASSERT_EQUAL(spdk_bdev_get_by_name(bdev->name), bdev); + TAILQ_REMOVE(&g_bdev_list, bdev, internal.link); + + bdev->fn_table->destruct(bdev->ctxt); + + if (cb_fn) { + cb_fn(cb_arg, 0); + } +} + +int spdk_json_write_named_uint64(struct spdk_json_write_ctx *w, const char *name, uint64_t val) +{ + struct rpc_construct_zone_block *req = g_rpc_req; + if (strcmp(name, "zone_capacity") == 0) { + CU_ASSERT(req->zone_capacity == val); + } else if (strcmp(name, "optimal_open_zones") == 0) { + CU_ASSERT(req->optimal_open_zones == val); + } + + return 0; +} + +const char * +spdk_bdev_get_name(const struct spdk_bdev *bdev) +{ + return bdev->name; +} + +bool +spdk_bdev_is_zoned(const struct spdk_bdev *bdev) +{ + return bdev->zoned; +} + +int +spdk_json_write_string(struct spdk_json_write_ctx *w, const char *val) +{ + return 0; +} + +int +spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, + struct spdk_bdev_module *module) +{ + if (bdev->internal.claim_module != NULL) { + return -1; + } + bdev->internal.claim_module = module; + return 0; +} + +void +spdk_bdev_module_release_bdev(struct spdk_bdev *bdev) +{ + CU_ASSERT(bdev->internal.claim_module != NULL); + bdev->internal.claim_module = NULL; +} + +void +spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) +{ + g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false); +} + +int +spdk_json_decode_object(const struct spdk_json_val *values, + const struct spdk_json_object_decoder *decoders, size_t num_decoders, + void *out) +{ + struct rpc_construct_zone_block *construct, *_construct; + struct rpc_delete_zone_block *delete, *_delete; + + if (g_json_decode_obj_construct) { + construct = g_rpc_req; + _construct = out; + + _construct->name = strdup(construct->name); + SPDK_CU_ASSERT_FATAL(_construct->name != NULL); + _construct->base_bdev = strdup(construct->base_bdev); + SPDK_CU_ASSERT_FATAL(_construct->base_bdev != NULL); + _construct->zone_capacity = construct->zone_capacity; + _construct->optimal_open_zones = construct->optimal_open_zones; + } else { + delete = g_rpc_req; + _delete = out; + + _delete->name = strdup(delete->name); + SPDK_CU_ASSERT_FATAL(_delete->name != NULL); + } + + return 0; +} + +struct spdk_json_write_ctx * +spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request) +{ + return (void *)1; +} + +static struct spdk_bdev * +create_nvme_bdev(void) +{ + struct spdk_bdev *base_bdev; + char *name = "Nvme0n1"; + base_bdev = calloc(1, sizeof(struct spdk_bdev)); + SPDK_CU_ASSERT_FATAL(base_bdev != NULL); + base_bdev->name = strdup(name); + SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL); + base_bdev->blocklen = BLOCK_SIZE; + base_bdev->blockcnt = g_block_cnt; + base_bdev->write_unit_size = 1; + TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link); + + return base_bdev; +} + +static void +base_bdevs_cleanup(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev *bdev_next; + + if (!TAILQ_EMPTY(&g_bdev_list)) { + TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) { + free(bdev->name); + TAILQ_REMOVE(&g_bdev_list, bdev, internal.link); + free(bdev); + } + } +} + +struct spdk_bdev * +spdk_bdev_get_by_name(const char *bdev_name) +{ + struct spdk_bdev *bdev; + + if (!TAILQ_EMPTY(&g_bdev_list)) { + TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) { + if (strcmp(bdev_name, bdev->name) == 0) { + return bdev; + } + } + } + + return NULL; +} + +void +spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request, + int error_code, const char *msg) +{ + g_rpc_err = 1; +} + +void +spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request, + int error_code, const char *fmt, ...) +{ + g_rpc_err = 1; +} + +static void +set_io_output(struct io_output *output, + struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + uint64_t offset_blocks, uint64_t num_blocks, + spdk_bdev_io_completion_cb cb, void *cb_arg, + enum spdk_bdev_io_type iotype) +{ + output->desc = desc; + output->ch = ch; + output->offset_blocks = offset_blocks; + output->num_blocks = num_blocks; + output->cb = cb; + output->cb_arg = cb_arg; + output->iotype = iotype; +} + +int +spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + uint64_t offset_blocks, uint64_t num_blocks, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + struct io_output *output = &g_io_output[g_io_output_index]; + struct spdk_bdev_io *child_io; + + set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg, + SPDK_BDEV_IO_TYPE_UNMAP); + g_io_output_index++; + + child_io = calloc(1, sizeof(struct spdk_bdev_io)); + SPDK_CU_ASSERT_FATAL(child_io != NULL); + cb(child_io, true, cb_arg); + + return 0; +} + +int +spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + struct iovec *iov, int iovcnt, void *md, + uint64_t offset_blocks, uint64_t num_blocks, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + struct io_output *output = &g_io_output[g_io_output_index]; + struct spdk_bdev_io *child_io; + + SPDK_CU_ASSERT_FATAL(g_io_output_index < g_max_io_size); + + set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg, + SPDK_BDEV_IO_TYPE_WRITE); + g_io_output_index++; + + child_io = calloc(1, sizeof(struct spdk_bdev_io)); + SPDK_CU_ASSERT_FATAL(child_io != NULL); + child_io->internal.desc = desc; + child_io->type = SPDK_BDEV_IO_TYPE_WRITE; + child_io->u.bdev.iovs = iov; + child_io->u.bdev.iovcnt = iovcnt; + child_io->u.bdev.md_buf = md; + child_io->u.bdev.num_blocks = num_blocks; + child_io->u.bdev.offset_blocks = offset_blocks; + cb(child_io, true, cb_arg); + + return 0; +} + + +int +spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + struct iovec *iov, int iovcnt, + uint64_t offset_blocks, uint64_t num_blocks, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + + return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, + cb, cb_arg); +} + +int +spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + struct iovec *iov, int iovcnt, void *md, + uint64_t offset_blocks, uint64_t num_blocks, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + struct io_output *output = &g_io_output[g_io_output_index]; + struct spdk_bdev_io *child_io; + + SPDK_CU_ASSERT_FATAL(g_io_output_index < g_max_io_size); + set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg, + SPDK_BDEV_IO_TYPE_READ); + g_io_output_index++; + + child_io = calloc(1, sizeof(struct spdk_bdev_io)); + SPDK_CU_ASSERT_FATAL(child_io != NULL); + cb(child_io, true, cb_arg); + + return 0; +} + +int +spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + struct iovec *iov, int iovcnt, + uint64_t offset_blocks, uint64_t num_blocks, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + + return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, + cb, cb_arg); +} + +static void +verify_config_present(const char *name, bool presence) +{ + struct bdev_zone_block_config *cfg; + bool cfg_found; + + cfg_found = false; + + TAILQ_FOREACH(cfg, &g_bdev_configs, link) { + if (cfg->vbdev_name != NULL) { + if (strcmp(name, cfg->vbdev_name) == 0) { + cfg_found = true; + break; + } + } + } + + if (presence == true) { + CU_ASSERT(cfg_found == true); + } else { + CU_ASSERT(cfg_found == false); + } +} + +static void +verify_bdev_present(const char *name, bool presence) +{ + struct bdev_zone_block *bdev; + bool bdev_found = false; + + TAILQ_FOREACH(bdev, &g_bdev_nodes, link) { + if (strcmp(bdev->bdev.name, name) == 0) { + bdev_found = true; + break; + } + } + if (presence == true) { + CU_ASSERT(bdev_found == true); + } else { + CU_ASSERT(bdev_found == false); + } +} + +static void +initialize_create_req(const char *vbdev_name, const char *base_name, + uint64_t zone_capacity, uint64_t optimal_open_zones, bool create_base_bdev) +{ + struct rpc_construct_zone_block *r; + + r = g_rpc_req = calloc(1, sizeof(struct rpc_construct_zone_block)); + SPDK_CU_ASSERT_FATAL(r != NULL); + + r->name = strdup(vbdev_name); + SPDK_CU_ASSERT_FATAL(r->name != NULL); + r->base_bdev = strdup(base_name); + SPDK_CU_ASSERT_FATAL(r->base_bdev != NULL); + r->zone_capacity = zone_capacity; + r->optimal_open_zones = optimal_open_zones; + + if (create_base_bdev == true) { + create_nvme_bdev(); + } + g_rpc_err = 0; + g_json_decode_obj_construct = 1; +} + +static void +free_create_req(void) +{ + struct rpc_construct_zone_block *r = g_rpc_req; + + free(r->name); + free(r->base_bdev); + free(r); + g_rpc_req = NULL; +} + +static void +initialize_delete_req(const char *vbdev_name) +{ + struct rpc_delete_zone_block *r; + + r = g_rpc_req = calloc(1, sizeof(struct rpc_delete_zone_block)); + SPDK_CU_ASSERT_FATAL(r != NULL); + r->name = strdup(vbdev_name); + SPDK_CU_ASSERT_FATAL(r->name != NULL); + + g_rpc_err = 0; + g_json_decode_obj_construct = 0; +} + +static void +free_delete_req(void) +{ + struct rpc_delete_zone_block *r = g_rpc_req; + + free(r->name); + free(r); + g_rpc_req = NULL; +} + +static void +verify_zone_config(bool presence) +{ + struct rpc_construct_zone_block *r = g_rpc_req; + struct bdev_zone_block_config *cfg = NULL; + + TAILQ_FOREACH(cfg, &g_bdev_configs, link) { + if (strcmp(r->name, cfg->vbdev_name) == 0) { + if (presence == false) { + break; + } + CU_ASSERT(strcmp(r->base_bdev, cfg->bdev_name) == 0); + CU_ASSERT(r->zone_capacity == cfg->zone_capacity); + CU_ASSERT(spdk_max(r->optimal_open_zones, 1) == cfg->optimal_open_zones); + break; + } + } + + if (presence) { + CU_ASSERT(cfg != NULL); + } else { + CU_ASSERT(cfg == NULL); + } +} + +static void +verify_zone_bdev(bool presence) +{ + struct rpc_construct_zone_block *r = g_rpc_req; + struct block_zone *zone; + struct bdev_zone_block *bdev; + bool bdev_found = false; + uint32_t i; + uint64_t expected_num_zones; + uint64_t expected_optimal_open_zones; + + TAILQ_FOREACH(bdev, &g_bdev_nodes, link) { + if (strcmp(bdev->bdev.name, r->name) == 0) { + bdev_found = true; + if (presence == false) { + break; + } + + expected_optimal_open_zones = spdk_max(r->optimal_open_zones, 1); + expected_num_zones = g_block_cnt / spdk_align64pow2(r->zone_capacity) / expected_optimal_open_zones; + expected_num_zones *= expected_optimal_open_zones; + + CU_ASSERT(bdev->num_zones == expected_num_zones); + CU_ASSERT(bdev->bdev.zoned == true); + CU_ASSERT(bdev->bdev.blockcnt == expected_num_zones * spdk_align64pow2(r->zone_capacity)); + CU_ASSERT(bdev->bdev.blocklen == BLOCK_SIZE); + CU_ASSERT(bdev->bdev.ctxt == bdev); + CU_ASSERT(bdev->bdev.fn_table == &zone_block_fn_table); + CU_ASSERT(bdev->bdev.module == &bdev_zoned_if); + CU_ASSERT(bdev->bdev.write_unit_size == 1); + CU_ASSERT(bdev->bdev.zone_size == spdk_align64pow2(r->zone_capacity)); + CU_ASSERT(bdev->bdev.optimal_open_zones == expected_optimal_open_zones); + CU_ASSERT(bdev->bdev.max_open_zones == 0); + + for (i = 0; i < bdev->num_zones; i++) { + zone = &bdev->zones[i]; + CU_ASSERT(zone->zone_info.state == SPDK_BDEV_ZONE_STATE_FULL); + CU_ASSERT(zone->zone_info.capacity == r->zone_capacity); + } + break; + } + } + + if (presence == true) { + CU_ASSERT(bdev_found == true); + } else { + CU_ASSERT(bdev_found == false); + } +} + +static void +send_create_vbdev(char *vdev_name, char *name, uint64_t zone_capacity, uint64_t optimal_open_zones, + bool create_bdev, bool success) +{ + initialize_create_req(vdev_name, name, zone_capacity, optimal_open_zones, create_bdev); + rpc_zone_block_create(NULL, NULL); + CU_ASSERT(g_rpc_err != success); + verify_zone_config(success); + verify_zone_bdev(success); + free_create_req(); +} + +static void +send_delete_vbdev(char *name, bool success) +{ + initialize_delete_req(name); + rpc_zone_block_delete(NULL, NULL); + verify_config_present(name, false); + verify_bdev_present(name, false); + CU_ASSERT(g_rpc_err != success); + free_delete_req(); +} + +static void +test_cleanup(void) +{ + CU_ASSERT(spdk_thread_is_idle(g_thread)); + zone_block_finish(); + base_bdevs_cleanup(); + free_test_globals(); +} + +static void +test_zone_block_create(void) +{ + struct spdk_bdev *bdev; + char *name = "Nvme0n1"; + size_t num_zones = 16; + size_t zone_capacity = BLOCK_CNT / num_zones; + + init_test_globals(BLOCK_CNT); + CU_ASSERT(zone_block_init() == 0); + + /* Create zoned virtual device before nvme device */ + verify_config_present("zone_dev1", false); + verify_bdev_present("zone_dev1", false); + initialize_create_req("zone_dev1", name, zone_capacity, 1, false); + rpc_zone_block_create(NULL, NULL); + CU_ASSERT(g_rpc_err == 0); + verify_zone_config(true); + verify_zone_bdev(false); + bdev = create_nvme_bdev(); + zone_block_examine(bdev); + verify_zone_bdev(true); + free_create_req(); + + /* Delete bdev */ + send_delete_vbdev("zone_dev1", true); + + /* Create zoned virtual device and verify its correctness */ + verify_config_present("zone_dev1", false); + send_create_vbdev("zone_dev1", name, zone_capacity, 1, false, true); + send_delete_vbdev("zone_dev1", true); + + while (spdk_thread_poll(g_thread, 0, 0) > 0) {} + test_cleanup(); +} + +static void +test_zone_block_create_invalid(void) +{ + char *name = "Nvme0n1"; + size_t num_zones = 8; + size_t zone_capacity = BLOCK_CNT / num_zones; + + init_test_globals(BLOCK_CNT); + CU_ASSERT(zone_block_init() == 0); + + /* Create zoned virtual device and verify its correctness */ + verify_config_present("zone_dev1", false); + verify_bdev_present("zone_dev1", false); + send_create_vbdev("zone_dev1", name, zone_capacity, 1, true, true); + + /* Try to create another zoned virtual device on the same bdev */ + send_create_vbdev("zone_dev2", name, zone_capacity, 1, false, false); + + /* Try to create zoned virtual device on the zoned bdev */ + send_create_vbdev("zone_dev2", "zone_dev1", zone_capacity, 1, false, false); + + /* Unclaim the base bdev */ + send_delete_vbdev("zone_dev1", true); + + /* Try to create zoned virtual device with 0 zone size */ + send_create_vbdev("zone_dev1", name, 0, 1, false, false); + + /* Try to create zoned virtual device with 0 optimal number of zones */ + send_create_vbdev("zone_dev1", name, zone_capacity, 0, false, false); + + while (spdk_thread_poll(g_thread, 0, 0) > 0) {} + test_cleanup(); +} + +static void +bdev_io_zone_info_initialize(struct spdk_bdev_io *bdev_io, struct spdk_bdev *bdev, + uint64_t zone_id, uint32_t num_zones) +{ + bdev_io->bdev = bdev; + bdev_io->type = SPDK_BDEV_IO_TYPE_GET_ZONE_INFO; + + bdev_io->u.zone_mgmt.zone_id = zone_id; + + bdev_io->u.zone_mgmt.num_zones = num_zones; + if (num_zones) { + bdev_io->u.zone_mgmt.buf = calloc(num_zones, sizeof(struct spdk_bdev_zone_info)); + SPDK_CU_ASSERT_FATAL(bdev_io->u.zone_mgmt.buf != NULL); + } +} + +static void +bdev_io_zone_initialize(struct spdk_bdev_io *bdev_io, struct spdk_bdev *bdev, + uint64_t zone_id, uint32_t num_zones, uint8_t zone_action) +{ + bdev_io->bdev = bdev; + bdev_io->type = SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT; + + bdev_io->u.zone_mgmt.zone_action = zone_action; + bdev_io->u.zone_mgmt.zone_id = zone_id; +} + +static void +bdev_io_zone_cleanup(struct spdk_bdev_io *bdev_io) +{ + free(bdev_io->u.zone_mgmt.buf); + free(bdev_io); +} + +static void +bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_bdev *bdev, + uint64_t lba, uint64_t blocks, int16_t iotype) +{ + bdev_io->bdev = bdev; + bdev_io->u.bdev.offset_blocks = lba; + bdev_io->u.bdev.num_blocks = blocks; + bdev_io->type = iotype; + + if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) { + return; + } + + bdev_io->u.bdev.iovcnt = 1; + bdev_io->u.bdev.iovs = &bdev_io->iov; + bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * BLOCK_SIZE); + SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL); + bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * BLOCK_SIZE; +} + +static void +bdev_io_cleanup(struct spdk_bdev_io *bdev_io) +{ + free(bdev_io->iov.iov_base); + free(bdev_io); +} + +static struct bdev_zone_block * +create_and_get_vbdev(char *vdev_name, char *name, uint64_t num_zones, uint64_t optimal_open_zones, + bool create_bdev) +{ + size_t zone_size = g_block_cnt / num_zones; + struct bdev_zone_block *bdev = NULL; + + send_create_vbdev(vdev_name, name, zone_size, optimal_open_zones, create_bdev, true); + + TAILQ_FOREACH(bdev, &g_bdev_nodes, link) { + if (strcmp(bdev->bdev.name, vdev_name) == 0) { + break; + } + } + + SPDK_CU_ASSERT_FATAL(bdev != NULL); + return bdev; +} + +static void +test_supported_io_types(void) +{ + struct bdev_zone_block *bdev; + char *name = "Nvme0n1"; + uint32_t num_zones = 8; + + init_test_globals(BLOCK_CNT); + CU_ASSERT(zone_block_init() == 0); + + /* Create zone dev */ + bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true); + + CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT) == true); + CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZONE_APPEND) == true); + CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_READ) == true); + CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE) == true); + + CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN) == false); + CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO) == false); + CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO_MD) == false); + CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP) == false); + CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH) == false); + CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_RESET) == false); + CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) == false); + CU_ASSERT(zone_block_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY) == false); + + send_delete_vbdev("zone_dev1", true); + while (spdk_thread_poll(g_thread, 0, 0) > 0) {} + test_cleanup(); +} + +static void +send_zone_info(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id, + uint64_t wp, + enum spdk_bdev_zone_state state, uint32_t output_index, bool success) +{ + struct spdk_bdev_io *bdev_io; + struct spdk_bdev_zone_info *info; + + bdev_io = calloc(1, sizeof(struct spdk_bdev_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + bdev_io_zone_info_initialize(bdev_io, &bdev->bdev, zone_id, 1); + memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output))); + g_io_output_index = output_index; + + g_io_comp_status = !success; + zone_block_submit_request(ch, bdev_io); + CU_ASSERT(g_io_comp_status == success); + + if (success) { + info = (struct spdk_bdev_zone_info *)bdev_io->u.zone_mgmt.buf; + CU_ASSERT(info->zone_id == zone_id); + CU_ASSERT(info->capacity == bdev->zone_capacity); + CU_ASSERT(info->write_pointer == wp); + CU_ASSERT(info->state == state); + } + + bdev_io_zone_cleanup(bdev_io); +} + +static void +test_get_zone_info(void) +{ + struct spdk_io_channel *ch; + struct bdev_zone_block *bdev; + struct spdk_bdev_io *bdev_io; + char *name = "Nvme0n1"; + uint32_t num_zones = 8, i; + struct spdk_bdev_zone_info *info; + + init_test_globals(BLOCK_CNT); + CU_ASSERT(zone_block_init() == 0); + + /* Create zone dev */ + bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true); + + ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel)); + SPDK_CU_ASSERT_FATAL(ch != NULL); + + /* Get info about each zone */ + for (i = 0; i < num_zones; i++) { + send_zone_info(bdev, ch, i * bdev->bdev.zone_size, + i * bdev->bdev.zone_size + bdev->zone_capacity, SPDK_BDEV_ZONE_STATE_FULL, 0, true); + } + + /* Send info asking for 0 zones */ + bdev_io = calloc(1, sizeof(struct spdk_bdev_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + bdev_io_zone_info_initialize(bdev_io, &bdev->bdev, 0, 0); + memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output))); + g_io_output_index = 0; + zone_block_submit_request(ch, bdev_io); + CU_ASSERT(g_io_comp_status); + bdev_io_zone_cleanup(bdev_io); + + /* Send info asking for all zones */ + bdev_io = calloc(1, sizeof(struct spdk_bdev_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + bdev_io_zone_info_initialize(bdev_io, &bdev->bdev, 0, num_zones); + memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output))); + g_io_output_index = 0; + zone_block_submit_request(ch, bdev_io); + CU_ASSERT(g_io_comp_status); + + for (i = 0; i < num_zones; i++) { + info = &(((struct spdk_bdev_zone_info *)bdev_io->u.zone_mgmt.buf)[i]); + CU_ASSERT(info->zone_id == i * bdev->bdev.zone_size); + CU_ASSERT(info->capacity == bdev->zone_capacity); + CU_ASSERT(info->write_pointer == i * bdev->bdev.zone_size + bdev->zone_capacity); + CU_ASSERT(info->state == SPDK_BDEV_ZONE_STATE_FULL); + } + bdev_io_zone_cleanup(bdev_io); + + /* Send info asking for too many zones */ + bdev_io = calloc(1, sizeof(struct spdk_bdev_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + bdev_io_zone_info_initialize(bdev_io, &bdev->bdev, 0, num_zones + 1); + memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output))); + g_io_output_index = 0; + zone_block_submit_request(ch, bdev_io); + CU_ASSERT(!g_io_comp_status); + bdev_io_zone_cleanup(bdev_io); + + /* Send info with misaligned start LBA */ + send_zone_info(bdev, ch, 1, 0, SPDK_BDEV_ZONE_STATE_FULL, 0, false); + + /* Send info with too high LBA */ + send_zone_info(bdev, ch, num_zones * bdev->bdev.zone_size, 0, SPDK_BDEV_ZONE_STATE_FULL, 0, + false); + + /* Delete zone dev */ + send_delete_vbdev("zone_dev1", true); + + while (spdk_thread_poll(g_thread, 0, 0) > 0) {} + free(ch); + + test_cleanup(); +} + +static void +send_zone_management(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id, + uint32_t output_index, enum spdk_bdev_zone_action action, bool success) +{ + struct spdk_bdev_io *bdev_io; + + bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct zone_block_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + bdev_io_zone_initialize(bdev_io, &bdev->bdev, zone_id, 1, action); + memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output))); + g_io_output_index = output_index; + + g_io_comp_status = !success; + zone_block_submit_request(ch, bdev_io); + + CU_ASSERT(g_io_comp_status == success); + bdev_io_zone_cleanup(bdev_io); +} + +static void +send_reset_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id, + uint32_t output_index, bool success) +{ + send_zone_management(bdev, ch, zone_id, output_index, SPDK_BDEV_ZONE_RESET, success); +} + +static void +send_open_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id, + uint32_t output_index, bool success) +{ + send_zone_management(bdev, ch, zone_id, output_index, SPDK_BDEV_ZONE_OPEN, success); +} + +static void +send_close_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id, + uint32_t output_index, bool success) +{ + send_zone_management(bdev, ch, zone_id, output_index, SPDK_BDEV_ZONE_CLOSE, success); +} + +static void +send_finish_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t zone_id, + uint32_t output_index, bool success) +{ + send_zone_management(bdev, ch, zone_id, output_index, SPDK_BDEV_ZONE_FINISH, success); +} + +static void +test_reset_zone(void) +{ + struct spdk_io_channel *ch; + struct bdev_zone_block *bdev; + char *name = "Nvme0n1"; + uint32_t num_zones = 16; + uint64_t zone_id; + uint32_t output_index = 0; + + init_test_globals(BLOCK_CNT); + CU_ASSERT(zone_block_init() == 0); + + /* Create zone dev */ + bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true); + + ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel)); + SPDK_CU_ASSERT_FATAL(ch != NULL); + + /* Send reset to zone 0 */ + zone_id = 0; + send_reset_zone(bdev, ch, zone_id, output_index, true); + send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true); + + /* Send reset to last zone */ + zone_id = (num_zones - 1) * bdev->bdev.zone_size; + send_reset_zone(bdev, ch, zone_id, output_index, true); + send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true); + + /* Send reset with misaligned LBA */ + zone_id = 1; + send_reset_zone(bdev, ch, zone_id, output_index, false); + + /* Send reset to non-existing zone */ + zone_id = num_zones * bdev->bdev.zone_size; + send_reset_zone(bdev, ch, zone_id, output_index, false); + + /* Send reset to already resetted zone */ + zone_id = 0; + send_reset_zone(bdev, ch, zone_id, output_index, true); + send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true); + + /* Delete zone dev */ + send_delete_vbdev("zone_dev1", true); + + while (spdk_thread_poll(g_thread, 0, 0) > 0) {} + free(ch); + + test_cleanup(); +} + +static void +send_write_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t lba, + uint64_t blocks, uint32_t output_index, bool success) +{ + struct spdk_bdev_io *bdev_io; + + bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct zone_block_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + bdev_io_initialize(bdev_io, &bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_WRITE); + memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output))); + g_io_output_index = output_index; + + g_io_comp_status = !success; + zone_block_submit_request(ch, bdev_io); + + CU_ASSERT(g_io_comp_status == success); + bdev_io_cleanup(bdev_io); +} + +static void +send_read_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t lba, + uint64_t blocks, uint32_t output_index, bool success) +{ + struct spdk_bdev_io *bdev_io; + + bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct zone_block_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + bdev_io_initialize(bdev_io, &bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_READ); + memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output))); + g_io_output_index = output_index; + + g_io_comp_status = !success; + zone_block_submit_request(ch, bdev_io); + + CU_ASSERT(g_io_comp_status == success); + bdev_io_cleanup(bdev_io); +} + +static void +send_append_zone(struct bdev_zone_block *bdev, struct spdk_io_channel *ch, uint64_t lba, + uint64_t blocks, uint32_t output_index, bool success, uint64_t wp) +{ + struct spdk_bdev_io *bdev_io; + + bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct zone_block_io)); + SPDK_CU_ASSERT_FATAL(bdev_io != NULL); + bdev_io_initialize(bdev_io, &bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_ZONE_APPEND); + memset(g_io_output, 0, (g_max_io_size * sizeof(struct io_output))); + g_io_output_index = output_index; + + g_io_comp_status = !success; + zone_block_submit_request(ch, bdev_io); + + CU_ASSERT(g_io_comp_status == success); + if (success) { + CU_ASSERT(bdev_io->u.bdev.offset_blocks == wp); + } + bdev_io_cleanup(bdev_io); +} + +static void +test_open_zone(void) +{ + struct spdk_io_channel *ch; + struct bdev_zone_block *bdev; + char *name = "Nvme0n1"; + uint32_t num_zones = 16; + uint64_t zone_id; + uint32_t output_index = 0, i; + + init_test_globals(BLOCK_CNT); + CU_ASSERT(zone_block_init() == 0); + + /* Create zone dev */ + bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true); + + ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel)); + SPDK_CU_ASSERT_FATAL(ch != NULL); + + /* Try to open full zone */ + zone_id = 0; + send_open_zone(bdev, ch, zone_id, output_index, false); + + /* Open all zones */ + for (i = 0; i < num_zones; i++) { + zone_id = i * bdev->bdev.zone_size; + send_reset_zone(bdev, ch, zone_id, output_index, true); + send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true); + } + for (i = 0; i < num_zones; i++) { + zone_id = i * bdev->bdev.zone_size; + send_open_zone(bdev, ch, zone_id, output_index, true); + send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true); + } + + /* Reset one of the zones and open it again */ + zone_id = 0; + send_reset_zone(bdev, ch, zone_id, output_index, true); + send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true); + send_open_zone(bdev, ch, zone_id, output_index, true); + send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true); + + /* Send open with misaligned LBA */ + zone_id = 0; + send_reset_zone(bdev, ch, zone_id, output_index, true); + zone_id = 1; + send_open_zone(bdev, ch, zone_id, output_index, false); + + /* Send open to non-existing zone */ + zone_id = num_zones * bdev->bdev.zone_size; + send_open_zone(bdev, ch, zone_id, output_index, false); + + /* Send open to already opened zone */ + zone_id = bdev->bdev.zone_size; + send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true); + send_open_zone(bdev, ch, zone_id, output_index, true); + send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true); + + /* Delete zone dev */ + send_delete_vbdev("zone_dev1", true); + + while (spdk_thread_poll(g_thread, 0, 0) > 0) {} + free(ch); + + test_cleanup(); +} + +static void +test_zone_write(void) +{ + struct spdk_io_channel *ch; + struct bdev_zone_block *bdev; + char *name = "Nvme0n1"; + uint32_t num_zones = 20; + uint64_t zone_id, lba, block_len; + uint32_t output_index = 0, i; + + init_test_globals(20 * 1024ul); + CU_ASSERT(zone_block_init() == 0); + + /* Create zone dev */ + bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true); + + ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel)); + SPDK_CU_ASSERT_FATAL(ch != NULL); + + /* Write to full zone */ + lba = 0; + send_write_zone(bdev, ch, lba, 1, output_index, false); + + /* Write out of device range */ + lba = g_block_cnt; + send_write_zone(bdev, ch, lba, 1, output_index, false); + + /* Write 1 sector to zone 0 */ + lba = 0; + send_reset_zone(bdev, ch, lba, output_index, true); + send_write_zone(bdev, ch, lba, 1, output_index, true); + send_zone_info(bdev, ch, lba, 1, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true); + + /* Write to another zone */ + lba = bdev->bdev.zone_size; + send_reset_zone(bdev, ch, lba, output_index, true); + send_write_zone(bdev, ch, lba, 5, output_index, true); + send_zone_info(bdev, ch, lba, lba + 5, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true); + + /* Fill zone 0 and verify zone state change */ + block_len = 15; + send_write_zone(bdev, ch, 1, block_len, output_index, true); + block_len = 16; + for (i = block_len; i < bdev->bdev.zone_size; i += block_len) { + send_write_zone(bdev, ch, i, block_len, output_index, true); + } + send_zone_info(bdev, ch, 0, bdev->bdev.zone_size, SPDK_BDEV_ZONE_STATE_FULL, output_index, + true); + + /* Write to wrong write pointer */ + lba = bdev->bdev.zone_size; + send_write_zone(bdev, ch, lba + 7, 1, output_index, false); + /* Write to already written sectors */ + send_write_zone(bdev, ch, lba, 1, output_index, false); + + /* Write to two zones at once */ + for (i = 0; i < num_zones; i++) { + zone_id = i * bdev->bdev.zone_size; + send_reset_zone(bdev, ch, zone_id, output_index, true); + send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true); + } + block_len = 16; + for (i = 0; i < bdev->bdev.zone_size - block_len; i += block_len) { + send_write_zone(bdev, ch, i, block_len, output_index, true); + } + send_write_zone(bdev, ch, bdev->bdev.zone_size - block_len, 32, output_index, false); + + /* Delete zone dev */ + send_delete_vbdev("zone_dev1", true); + + while (spdk_thread_poll(g_thread, 0, 0) > 0) {} + free(ch); + + test_cleanup(); +} + +static void +test_zone_read(void) +{ + struct spdk_io_channel *ch; + struct bdev_zone_block *bdev; + char *name = "Nvme0n1"; + uint32_t num_zones = 20; + uint64_t lba, block_len; + uint32_t output_index = 0; + + init_test_globals(20 * 1024ul); + CU_ASSERT(zone_block_init() == 0); + + /* Create zone dev */ + bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true); + + ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel)); + SPDK_CU_ASSERT_FATAL(ch != NULL); + + /* Read out of device range */ + block_len = 16; + lba = g_block_cnt - block_len / 2; + send_read_zone(bdev, ch, lba, block_len, output_index, false); + + block_len = 1; + lba = g_block_cnt; + send_read_zone(bdev, ch, lba, block_len, output_index, false); + + /* Read from full zone */ + lba = 0; + send_read_zone(bdev, ch, lba, 1, output_index, true); + + /* Read from empty zone */ + send_reset_zone(bdev, ch, lba, output_index, true); + send_read_zone(bdev, ch, lba, 1, output_index, true); + + /* Read written sectors from open zone */ + send_write_zone(bdev, ch, lba, 1, output_index, true); + send_read_zone(bdev, ch, lba, 1, output_index, true); + + /* Read partially written sectors from open zone */ + send_read_zone(bdev, ch, lba, 2, output_index, true); + + /* Read unwritten sectors from open zone */ + lba = 2; + send_read_zone(bdev, ch, lba, 1, output_index, true); + + /* Read from two zones at once */ + block_len = 16; + lba = bdev->bdev.zone_size - block_len / 2; + send_read_zone(bdev, ch, lba, block_len, output_index, false); + + /* Delete zone dev */ + send_delete_vbdev("zone_dev1", true); + + while (spdk_thread_poll(g_thread, 0, 0) > 0) {} + free(ch); + test_cleanup(); +} + +static void +test_close_zone(void) +{ + struct spdk_io_channel *ch; + struct bdev_zone_block *bdev; + char *name = "Nvme0n1"; + uint32_t num_zones = 20; + uint64_t zone_id; + uint32_t output_index = 0; + + init_test_globals(20 * 1024ul); + CU_ASSERT(zone_block_init() == 0); + + /* Create zone dev */ + bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true); + + ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel)); + SPDK_CU_ASSERT_FATAL(ch != NULL); + + /* Try to close a full zone */ + zone_id = 0; + send_close_zone(bdev, ch, zone_id, output_index, false); + + /* Try to close an empty zone */ + send_reset_zone(bdev, ch, zone_id, output_index, true); + send_close_zone(bdev, ch, zone_id, output_index, false); + + /* Close an open zone */ + send_open_zone(bdev, ch, zone_id, output_index, true); + send_close_zone(bdev, ch, zone_id, output_index, true); + send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_CLOSED, output_index, true); + + /* Close a closed zone */ + send_close_zone(bdev, ch, zone_id, output_index, true); + send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_CLOSED, output_index, true); + + /* Send close to last zone */ + zone_id = (num_zones - 1) * bdev->bdev.zone_size; + send_reset_zone(bdev, ch, zone_id, output_index, true); + send_open_zone(bdev, ch, zone_id, output_index, true); + send_close_zone(bdev, ch, zone_id, output_index, true); + send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_CLOSED, output_index, true); + + /* Send close with misaligned LBA */ + zone_id = 1; + send_close_zone(bdev, ch, zone_id, output_index, false); + + /* Send close to non-existing zone */ + zone_id = num_zones * bdev->bdev.zone_size; + send_close_zone(bdev, ch, zone_id, output_index, false); + + /* Delete zone dev */ + send_delete_vbdev("zone_dev1", true); + + while (spdk_thread_poll(g_thread, 0, 0) > 0) {} + free(ch); + test_cleanup(); +} + +static void +test_finish_zone(void) +{ + struct spdk_io_channel *ch; + struct bdev_zone_block *bdev; + char *name = "Nvme0n1"; + uint32_t num_zones = 20; + uint64_t zone_id, wp; + uint32_t output_index = 0; + + init_test_globals(20 * 1024ul); + CU_ASSERT(zone_block_init() == 0); + + /* Create zone dev */ + bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true); + + ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel)); + SPDK_CU_ASSERT_FATAL(ch != NULL); + + /* Reset an unused zone */ + send_reset_zone(bdev, ch, bdev->bdev.zone_size, output_index, true); + + /* Finish a full zone */ + zone_id = 0; + wp = bdev->bdev.zone_size; + send_finish_zone(bdev, ch, zone_id, output_index, true); + send_zone_info(bdev, ch, zone_id, wp, SPDK_BDEV_ZONE_STATE_FULL, output_index, true); + + /* Finish an empty zone */ + send_reset_zone(bdev, ch, zone_id, output_index, true); + send_finish_zone(bdev, ch, zone_id, output_index, true); + send_zone_info(bdev, ch, zone_id, wp, SPDK_BDEV_ZONE_STATE_FULL, output_index, true); + + /* Finish an open zone */ + send_reset_zone(bdev, ch, zone_id, output_index, true); + send_write_zone(bdev, ch, zone_id, 1, output_index, true); + send_finish_zone(bdev, ch, zone_id, output_index, true); + send_zone_info(bdev, ch, zone_id, wp, SPDK_BDEV_ZONE_STATE_FULL, output_index, true); + + /* Send finish with misaligned LBA */ + zone_id = 1; + send_finish_zone(bdev, ch, zone_id, output_index, false); + + /* Send finish to non-existing zone */ + zone_id = num_zones * bdev->bdev.zone_size; + send_finish_zone(bdev, ch, zone_id, output_index, false); + + /* Make sure unused zone wasn't written to */ + zone_id = bdev->bdev.zone_size; + send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true); + + /* Delete zone dev */ + send_delete_vbdev("zone_dev1", true); + + while (spdk_thread_poll(g_thread, 0, 0) > 0) {} + free(ch); + + test_cleanup(); +} + +static void +test_append_zone(void) +{ + struct spdk_io_channel *ch; + struct bdev_zone_block *bdev; + char *name = "Nvme0n1"; + uint32_t num_zones = 20; + uint64_t zone_id, block_len, i; + uint32_t output_index = 0; + + init_test_globals(20 * 1024ul); + CU_ASSERT(zone_block_init() == 0); + + /* Create zone dev */ + bdev = create_and_get_vbdev("zone_dev1", name, num_zones, 1, true); + + ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct zone_block_io_channel)); + SPDK_CU_ASSERT_FATAL(ch != NULL); + + /* Append to full zone */ + zone_id = 0; + send_append_zone(bdev, ch, zone_id, 1, output_index, false, 0); + + /* Append out of device range */ + zone_id = g_block_cnt; + send_append_zone(bdev, ch, zone_id, 1, output_index, false, 0); + + /* Append 1 sector to zone 0 */ + zone_id = 0; + send_reset_zone(bdev, ch, zone_id, output_index, true); + send_append_zone(bdev, ch, zone_id, 1, output_index, true, zone_id); + send_zone_info(bdev, ch, zone_id, 1, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true); + + /* Append to another zone */ + zone_id = bdev->bdev.zone_size; + send_reset_zone(bdev, ch, zone_id, output_index, true); + send_append_zone(bdev, ch, zone_id, 5, output_index, true, zone_id); + send_zone_info(bdev, ch, zone_id, zone_id + 5, SPDK_BDEV_ZONE_STATE_OPEN, output_index, true); + + /* Fill zone 0 and verify zone state change */ + zone_id = 0; + block_len = 15; + send_append_zone(bdev, ch, zone_id, block_len, output_index, true, 1); + block_len++; + for (i = block_len; i < bdev->zone_capacity; i += block_len) { + send_append_zone(bdev, ch, zone_id, block_len, output_index, true, i); + } + send_zone_info(bdev, ch, zone_id, bdev->bdev.zone_size, SPDK_BDEV_ZONE_STATE_FULL, output_index, + true); + + /* Append to two zones at once */ + for (i = 0; i < num_zones; i++) { + zone_id = i * bdev->bdev.zone_size; + send_reset_zone(bdev, ch, zone_id, output_index, true); + send_zone_info(bdev, ch, zone_id, zone_id, SPDK_BDEV_ZONE_STATE_EMPTY, output_index, true); + } + + zone_id = 0; + block_len = 16; + for (i = 0; i < bdev->zone_capacity - block_len; i += block_len) { + send_append_zone(bdev, ch, zone_id, block_len, output_index, true, zone_id + i); + } + send_append_zone(bdev, ch, zone_id, 32, output_index, false, 0); + /* Delete zone dev */ + send_delete_vbdev("zone_dev1", true); + + while (spdk_thread_poll(g_thread, 0, 0) > 0) {} + free(ch); + + test_cleanup(); +} + +int main(int argc, char **argv) +{ + CU_pSuite suite = NULL; + unsigned int num_failures; + + CU_set_error_action(CUEA_ABORT); + CU_initialize_registry(); + + suite = CU_add_suite("zone_block", NULL, NULL); + + CU_ADD_TEST(suite, test_zone_block_create); + CU_ADD_TEST(suite, test_zone_block_create_invalid); + CU_ADD_TEST(suite, test_get_zone_info); + CU_ADD_TEST(suite, test_supported_io_types); + CU_ADD_TEST(suite, test_reset_zone); + CU_ADD_TEST(suite, test_open_zone); + CU_ADD_TEST(suite, test_zone_write); + CU_ADD_TEST(suite, test_zone_read); + CU_ADD_TEST(suite, test_close_zone); + CU_ADD_TEST(suite, test_finish_zone); + CU_ADD_TEST(suite, test_append_zone); + + g_thread = spdk_thread_create("test", NULL); + spdk_set_thread(g_thread); + + CU_basic_set_mode(CU_BRM_VERBOSE); + set_test_opts(); + CU_basic_run_tests(); + num_failures = CU_get_number_of_failures(); + + spdk_thread_exit(g_thread); + while (!spdk_thread_is_exited(g_thread)) { + spdk_thread_poll(g_thread, 0, 0); + } + spdk_thread_destroy(g_thread); + + CU_cleanup_registry(); + + return num_failures; +} |