diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:45:59 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:45:59 +0000 |
commit | 19fcec84d8d7d21e796c7624e521b60d28ee21ed (patch) | |
tree | 42d26aa27d1e3f7c0b8bd3fd14e7d7082f5008dc /src/spdk/test/unit/lib/blob | |
parent | Initial commit. (diff) | |
download | ceph-upstream/16.2.11+ds.tar.xz ceph-upstream/16.2.11+ds.zip |
Adding upstream version 16.2.11+ds.upstream/16.2.11+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/test/unit/lib/blob')
-rw-r--r-- | src/spdk/test/unit/lib/blob/Makefile | 49 | ||||
-rw-r--r-- | src/spdk/test/unit/lib/blob/blob.c/.gitignore | 1 | ||||
-rw-r--r-- | src/spdk/test/unit/lib/blob/blob.c/Makefile | 38 | ||||
-rw-r--r-- | src/spdk/test/unit/lib/blob/blob.c/blob_ut.c | 6693 | ||||
-rw-r--r-- | src/spdk/test/unit/lib/blob/bs_dev_common.c | 395 | ||||
-rw-r--r-- | src/spdk/test/unit/lib/blob/bs_scheduler.c | 87 |
6 files changed, 7263 insertions, 0 deletions
diff --git a/src/spdk/test/unit/lib/blob/Makefile b/src/spdk/test/unit/lib/blob/Makefile new file mode 100644 index 000000000..a039a423e --- /dev/null +++ b/src/spdk/test/unit/lib/blob/Makefile @@ -0,0 +1,49 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..) +include $(SPDK_ROOT_DIR)/mk/spdk.common.mk + +CUNIT_VERSION = $(shell echo "\#include <CUnit/CUnit.h>" | $(CC) -E -dM - | sed -n -e 's/.*VERSION "\([0-9\.\-]*\).*/\1/p') +ifeq ($(CUNIT_VERSION),2.1-3) +DIRS-y = blob.c +else +$(warning "blob_ut.c compilation skipped, only CUnit version 2.1-3 is supported") +endif + +.PHONY: all clean $(DIRS-y) + +all: $(DIRS-y) +clean: $(DIRS-y) + +include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk diff --git a/src/spdk/test/unit/lib/blob/blob.c/.gitignore b/src/spdk/test/unit/lib/blob/blob.c/.gitignore new file mode 100644 index 000000000..553f54655 --- /dev/null +++ b/src/spdk/test/unit/lib/blob/blob.c/.gitignore @@ -0,0 +1 @@ +blob_ut diff --git a/src/spdk/test/unit/lib/blob/blob.c/Makefile b/src/spdk/test/unit/lib/blob/blob.c/Makefile new file mode 100644 index 000000000..fc449a5c8 --- /dev/null +++ b/src/spdk/test/unit/lib/blob/blob.c/Makefile @@ -0,0 +1,38 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..) + +TEST_FILE = blob_ut.c + +include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk diff --git a/src/spdk/test/unit/lib/blob/blob.c/blob_ut.c b/src/spdk/test/unit/lib/blob/blob.c/blob_ut.c new file mode 100644 index 000000000..6e51842e3 --- /dev/null +++ b/src/spdk/test/unit/lib/blob/blob.c/blob_ut.c @@ -0,0 +1,6693 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk/stdinc.h" + +#include "spdk_cunit.h" +#include "spdk/blob.h" +#include "spdk/string.h" +#include "spdk_internal/thread.h" + +#include "common/lib/ut_multithread.c" +#include "../bs_dev_common.c" +#include "blob/blobstore.c" +#include "blob/request.c" +#include "blob/zeroes.c" +#include "blob/blob_bs_dev.c" + +struct spdk_blob_store *g_bs; +spdk_blob_id g_blobid; +struct spdk_blob *g_blob; +int g_bserrno; +struct spdk_xattr_names *g_names; +int g_done; +char *g_xattr_names[] = {"first", "second", "third"}; +char *g_xattr_values[] = {"one", "two", "three"}; +uint64_t g_ctx = 1729; +bool g_use_extent_table = false; + +struct spdk_bs_super_block_ver1 { + uint8_t signature[8]; + uint32_t version; + uint32_t length; + uint32_t clean; /* If there was a clean shutdown, this is 1. */ + spdk_blob_id super_blob; + + uint32_t cluster_size; /* In bytes */ + + uint32_t used_page_mask_start; /* Offset from beginning of disk, in pages */ + uint32_t used_page_mask_len; /* Count, in pages */ + + uint32_t used_cluster_mask_start; /* Offset from beginning of disk, in pages */ + uint32_t used_cluster_mask_len; /* Count, in pages */ + + uint32_t md_start; /* Offset from beginning of disk, in pages */ + uint32_t md_len; /* Count, in pages */ + + uint8_t reserved[4036]; + uint32_t crc; +} __attribute__((packed)); +SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size"); + +static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs, + struct spdk_blob_opts *blob_opts); +static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob); +static void suite_blob_setup(void); +static void suite_blob_cleanup(void); + +static void +_get_xattr_value(void *arg, const char *name, + const void **value, size_t *value_len) +{ + uint64_t i; + + SPDK_CU_ASSERT_FATAL(value_len != NULL); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(arg == &g_ctx); + + for (i = 0; i < sizeof(g_xattr_names); i++) { + if (!strcmp(name, g_xattr_names[i])) { + *value_len = strlen(g_xattr_values[i]); + *value = g_xattr_values[i]; + break; + } + } +} + +static void +_get_xattr_value_null(void *arg, const char *name, + const void **value, size_t *value_len) +{ + SPDK_CU_ASSERT_FATAL(value_len != NULL); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(arg == NULL); + + *value_len = 0; + *value = NULL; +} + +static int +_get_snapshots_count(struct spdk_blob_store *bs) +{ + struct spdk_blob_list *snapshot = NULL; + int count = 0; + + TAILQ_FOREACH(snapshot, &bs->snapshots, link) { + count += 1; + } + + return count; +} + +static void +ut_spdk_blob_opts_init(struct spdk_blob_opts *opts) +{ + spdk_blob_opts_init(opts); + opts->use_extent_table = g_use_extent_table; +} + +static void +bs_op_complete(void *cb_arg, int bserrno) +{ + g_bserrno = bserrno; +} + +static void +bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs, + int bserrno) +{ + g_bs = bs; + g_bserrno = bserrno; +} + +static void +blob_op_complete(void *cb_arg, int bserrno) +{ + g_bserrno = bserrno; +} + +static void +blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno) +{ + g_blobid = blobid; + g_bserrno = bserrno; +} + +static void +blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno) +{ + g_blob = blb; + g_bserrno = bserrno; +} + +static void +ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) +{ + struct spdk_bs_dev *dev; + + /* Unload the blob store */ + spdk_bs_unload(*bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + dev = init_dev(); + /* Load an existing blob store */ + spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + *bs = g_bs; + + g_bserrno = -1; +} + +static void +ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) +{ + struct spdk_bs_dev *dev; + + /* Dirty shutdown */ + bs_free(*bs); + + dev = init_dev(); + /* Load an existing blob store */ + spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + *bs = g_bs; + + g_bserrno = -1; +} + +static void +blob_init(void) +{ + struct spdk_blob_store *bs; + struct spdk_bs_dev *dev; + + dev = init_dev(); + + /* should fail for an unsupported blocklen */ + dev->blocklen = 500; + spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + + dev = init_dev(); + spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; +} + +static void +blob_super(void) +{ + struct spdk_blob_store *bs = g_bs; + spdk_blob_id blobid; + struct spdk_blob_opts blob_opts; + + /* Get the super blob without having set one */ + spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -ENOENT); + CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); + + /* Create a blob */ + ut_spdk_blob_opts_init(&blob_opts); + spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobid = g_blobid; + + /* Set the blob as the super blob */ + spdk_bs_set_super(bs, blobid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Get the super blob */ + spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(blobid == g_blobid); +} + +static void +blob_open(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + struct spdk_blob_opts blob_opts; + spdk_blob_id blobid, blobid2; + + ut_spdk_blob_opts_init(&blob_opts); + spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobid = g_blobid; + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blob != NULL); + blob = g_blob; + + blobid2 = spdk_blob_get_id(blob); + CU_ASSERT(blobid == blobid2); + + /* Try to open file again. It should return success. */ + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(blob == g_blob); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* + * Close the file a second time, releasing the second reference. This + * should succeed. + */ + blob = g_blob; + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* + * Try to open file again. It should succeed. This tests the case + * where the file is opened, closed, then re-opened again. + */ + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blob != NULL); + blob = g_blob; + + ut_blob_close_and_delete(bs, blob); +} + +static void +blob_create(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + struct spdk_blob_opts opts; + spdk_blob_id blobid; + + /* Create blob with 10 clusters */ + + ut_spdk_blob_opts_init(&opts); + opts.num_clusters = 10; + + spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobid = g_blobid; + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob = g_blob; + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Create blob with 0 clusters */ + + ut_spdk_blob_opts_init(&opts); + opts.num_clusters = 0; + + spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobid = g_blobid; + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob = g_blob; + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Create blob with default options (opts == NULL) */ + + spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobid = g_blobid; + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob = g_blob; + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Try to create blob with size larger than blobstore */ + + ut_spdk_blob_opts_init(&opts); + opts.num_clusters = bs->total_clusters + 1; + + spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -ENOSPC); +} + +static void +blob_create_fail(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob_opts opts; + spdk_blob_id blobid; + uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids); + uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages); + + /* NULL callback */ + ut_spdk_blob_opts_init(&opts); + opts.xattrs.names = g_xattr_names; + opts.xattrs.get_value = NULL; + opts.xattrs.count = 1; + opts.xattrs.ctx = &g_ctx; + + blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); + spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count); + CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count); + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -ENOENT); + SPDK_CU_ASSERT_FATAL(g_blob == NULL); + + ut_bs_reload(&bs, NULL); + CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count); + CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count); + + spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_blob == NULL); + CU_ASSERT(g_bserrno == -ENOENT); +} + +static void +blob_create_internal(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + struct spdk_blob_opts opts; + struct spdk_blob_xattr_opts internal_xattrs; + const void *value; + size_t value_len; + spdk_blob_id blobid; + int rc; + + /* Create blob with custom xattrs */ + + ut_spdk_blob_opts_init(&opts); + blob_xattrs_init(&internal_xattrs); + internal_xattrs.count = 3; + internal_xattrs.names = g_xattr_names; + internal_xattrs.get_value = _get_xattr_value; + internal_xattrs.ctx = &g_ctx; + + bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobid = g_blobid; + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob = g_blob; + + rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(value_len == strlen(g_xattr_values[0])); + CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); + + rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(value_len == strlen(g_xattr_values[1])); + CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); + + rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(value_len == strlen(g_xattr_values[2])); + CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); + + rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); + CU_ASSERT(rc != 0); + + rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); + CU_ASSERT(rc != 0); + + rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); + CU_ASSERT(rc != 0); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Create blob with NULL internal options */ + + bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobid = g_blobid; + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL); + + blob = g_blob; + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); +} + +static void +blob_thin_provision(void) +{ + struct spdk_blob_store *bs; + struct spdk_bs_dev *dev; + struct spdk_blob *blob; + struct spdk_blob_opts opts; + struct spdk_bs_opts bs_opts; + spdk_blob_id blobid; + + dev = init_dev(); + spdk_bs_opts_init(&bs_opts); + snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); + + /* Initialize a new blob store */ + spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + + bs = g_bs; + + /* Create blob with thin provisioning enabled */ + + ut_spdk_blob_opts_init(&opts); + opts.thin_provision = true; + opts.num_clusters = 10; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); + + spdk_blob_close(blob, blob_op_complete, NULL); + CU_ASSERT(g_bserrno == 0); + + /* Do not shut down cleanly. This makes sure that when we load again + * and try to recover a valid used_cluster map, that blobstore will + * ignore clusters with index 0 since these are unallocated clusters. + */ + ut_bs_dirty_load(&bs, &bs_opts); + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob = g_blob; + CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); + + ut_blob_close_and_delete(bs, blob); + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; +} + +static void +blob_snapshot(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + struct spdk_blob *snapshot, *snapshot2; + struct spdk_blob_bs_dev *blob_bs_dev; + struct spdk_blob_opts opts; + struct spdk_blob_xattr_opts xattrs; + spdk_blob_id blobid; + spdk_blob_id snapshotid; + spdk_blob_id snapshotid2; + const void *value; + size_t value_len; + int rc; + spdk_blob_id ids[2]; + size_t count; + + /* Create blob with 10 clusters */ + ut_spdk_blob_opts_init(&opts); + opts.num_clusters = 10; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); + + /* Create snapshot from blob */ + CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); + spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); + snapshotid = g_blobid; + + spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot = g_blob; + CU_ASSERT(snapshot->data_ro == true); + CU_ASSERT(snapshot->md_ro == true); + CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); + + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); + CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); + CU_ASSERT(spdk_mem_all_zero(blob->active.clusters, + blob->active.num_clusters * sizeof(blob->active.clusters[0]))); + + /* Try to create snapshot from clone with xattrs */ + xattrs.names = g_xattr_names; + xattrs.get_value = _get_xattr_value; + xattrs.count = 3; + xattrs.ctx = &g_ctx; + spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); + snapshotid2 = g_blobid; + + spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot2 = g_blob; + CU_ASSERT(snapshot2->data_ro == true); + CU_ASSERT(snapshot2->md_ro == true); + CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10); + + /* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */ + CU_ASSERT(snapshot->back_bs_dev == NULL); + SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL); + SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL); + + blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev; + CU_ASSERT(blob_bs_dev->blob == snapshot2); + + blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev; + CU_ASSERT(blob_bs_dev->blob == snapshot); + + rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(value_len == strlen(g_xattr_values[0])); + CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); + + rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(value_len == strlen(g_xattr_values[1])); + CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); + + rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(value_len == strlen(g_xattr_values[2])); + CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); + + /* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */ + count = 2; + CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == blobid); + + count = 2; + CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == snapshotid2); + + /* Try to create snapshot from snapshot */ + spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); + CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); + + /* Delete blob and confirm that it is no longer on snapshot2 clone list */ + ut_blob_close_and_delete(bs, blob); + count = 2; + CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); + CU_ASSERT(count == 0); + + /* Delete snapshot2 and confirm that it is no longer on snapshot clone list */ + ut_blob_close_and_delete(bs, snapshot2); + CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); + count = 2; + CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); + CU_ASSERT(count == 0); + + ut_blob_close_and_delete(bs, snapshot); + CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); +} + +static void +blob_snapshot_freeze_io(void) +{ + struct spdk_io_channel *channel; + struct spdk_bs_channel *bs_channel; + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + struct spdk_blob_opts opts; + spdk_blob_id blobid; + uint32_t num_of_pages = 10; + uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE]; + uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE]; + uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE]; + + memset(payload_write, 0xE5, sizeof(payload_write)); + memset(payload_read, 0x00, sizeof(payload_read)); + memset(payload_zero, 0x00, sizeof(payload_zero)); + + /* Test freeze I/O during snapshot */ + channel = spdk_bs_alloc_io_channel(bs); + bs_channel = spdk_io_channel_get_ctx(channel); + + /* Create blob with 10 clusters */ + ut_spdk_blob_opts_init(&opts); + opts.num_clusters = 10; + opts.thin_provision = false; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); + + spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); + + /* This is implementation specific. + * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback. + * Four async I/O operations happen before that. */ + poll_thread_times(0, 3); + + CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io)); + + /* Blob I/O should be frozen here */ + CU_ASSERT(blob->frozen_refcnt == 1); + + /* Write to the blob */ + spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL); + + /* Verify that I/O is queued */ + CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io)); + /* Verify that payload is not written to disk */ + CU_ASSERT(memcmp(payload_zero, &g_dev_buffer[blob->active.clusters[0]*SPDK_BS_PAGE_SIZE], + SPDK_BS_PAGE_SIZE) == 0); + + /* Finish all operations including spdk_bs_create_snapshot */ + poll_threads(); + + /* Verify snapshot */ + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + + /* Verify that blob has unset frozen_io */ + CU_ASSERT(blob->frozen_refcnt == 0); + + /* Verify that postponed I/O completed successfully by comparing payload */ + spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0); + + spdk_bs_free_io_channel(channel); + poll_threads(); + + ut_blob_close_and_delete(bs, blob); +} + +static void +blob_clone(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob_opts opts; + struct spdk_blob *blob, *snapshot, *clone; + spdk_blob_id blobid, cloneid, snapshotid; + struct spdk_blob_xattr_opts xattrs; + const void *value; + size_t value_len; + int rc; + + /* Create blob with 10 clusters */ + + ut_spdk_blob_opts_init(&opts); + opts.num_clusters = 10; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); + + /* Create snapshot */ + spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + snapshotid = g_blobid; + + spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot = g_blob; + CU_ASSERT(snapshot->data_ro == true); + CU_ASSERT(snapshot->md_ro == true); + CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); + + spdk_blob_close(snapshot, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Create clone from snapshot with xattrs */ + xattrs.names = g_xattr_names; + xattrs.get_value = _get_xattr_value; + xattrs.count = 3; + xattrs.ctx = &g_ctx; + + spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + cloneid = g_blobid; + + spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + clone = g_blob; + CU_ASSERT(clone->data_ro == false); + CU_ASSERT(clone->md_ro == false); + CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); + + rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(value_len == strlen(g_xattr_values[0])); + CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); + + rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(value_len == strlen(g_xattr_values[1])); + CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); + + rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(value_len == strlen(g_xattr_values[2])); + CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); + + + spdk_blob_close(clone, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Try to create clone from not read only blob */ + spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); + + /* Mark blob as read only */ + spdk_blob_set_read_only(blob); + spdk_blob_sync_md(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Create clone from read only blob */ + spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + cloneid = g_blobid; + + spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + clone = g_blob; + CU_ASSERT(clone->data_ro == false); + CU_ASSERT(clone->md_ro == false); + CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); + + ut_blob_close_and_delete(bs, clone); + ut_blob_close_and_delete(bs, blob); +} + +static void +_blob_inflate(bool decouple_parent) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob_opts opts; + struct spdk_blob *blob, *snapshot; + spdk_blob_id blobid, snapshotid; + struct spdk_io_channel *channel; + uint64_t free_clusters; + + channel = spdk_bs_alloc_io_channel(bs); + SPDK_CU_ASSERT_FATAL(channel != NULL); + + /* Create blob with 10 clusters */ + + ut_spdk_blob_opts_init(&opts); + opts.num_clusters = 10; + opts.thin_provision = true; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); + CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); + + /* 1) Blob with no parent */ + if (decouple_parent) { + /* Decouple parent of blob with no parent (should fail) */ + spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno != 0); + } else { + /* Inflate of thin blob with no parent should made it thick */ + spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false); + } + + spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + snapshotid = g_blobid; + + CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); + + spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot = g_blob; + CU_ASSERT(snapshot->data_ro == true); + CU_ASSERT(snapshot->md_ro == true); + CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); + + spdk_blob_close(snapshot, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + free_clusters = spdk_bs_free_cluster_count(bs); + + /* 2) Blob with parent */ + if (!decouple_parent) { + /* Do full blob inflation */ + spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + /* all 10 clusters should be allocated */ + CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10); + } else { + /* Decouple parent of blob */ + spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + /* when only parent is removed, none of the clusters should be allocated */ + CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters); + } + + /* Now, it should be possible to delete snapshot */ + spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); + CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent); + + spdk_bs_free_io_channel(channel); + poll_threads(); + + ut_blob_close_and_delete(bs, blob); +} + +static void +blob_inflate(void) +{ + _blob_inflate(false); + _blob_inflate(true); +} + +static void +blob_delete(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob_opts blob_opts; + spdk_blob_id blobid; + + /* Create a blob and then delete it. */ + ut_spdk_blob_opts_init(&blob_opts); + spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid > 0); + blobid = g_blobid; + + spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Try to open the blob */ + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -ENOENT); +} + +static void +blob_resize_test(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + uint64_t free_clusters; + + free_clusters = spdk_bs_free_cluster_count(bs); + + blob = ut_blob_create_and_open(bs, NULL); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + + /* Confirm that resize fails if blob is marked read-only. */ + blob->md_ro = true; + spdk_blob_resize(blob, 5, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EPERM); + blob->md_ro = false; + + /* The blob started at 0 clusters. Resize it to be 5. */ + spdk_blob_resize(blob, 5, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); + + /* Shrink the blob to 3 clusters. This will not actually release + * the old clusters until the blob is synced. + */ + spdk_blob_resize(blob, 3, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + /* Verify there are still 5 clusters in use */ + CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); + + spdk_blob_sync_md(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + /* Now there are only 3 clusters in use */ + CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs)); + + /* Resize the blob to be 10 clusters. Growth takes effect immediately. */ + spdk_blob_resize(blob, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs)); + + /* Try to resize the blob to size larger than blobstore. */ + spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -ENOSPC); + + ut_blob_close_and_delete(bs, blob); +} + +static void +blob_read_only(void) +{ + struct spdk_blob_store *bs; + struct spdk_bs_dev *dev; + struct spdk_blob *blob; + struct spdk_bs_opts opts; + spdk_blob_id blobid; + int rc; + + dev = init_dev(); + spdk_bs_opts_init(&opts); + snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); + + spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + blob = ut_blob_create_and_open(bs, NULL); + blobid = spdk_blob_get_id(blob); + + rc = spdk_blob_set_read_only(blob); + CU_ASSERT(rc == 0); + + CU_ASSERT(blob->data_ro == false); + CU_ASSERT(blob->md_ro == false); + + spdk_blob_sync_md(blob, bs_op_complete, NULL); + poll_threads(); + + CU_ASSERT(blob->data_ro == true); + CU_ASSERT(blob->md_ro == true); + CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob = g_blob; + + CU_ASSERT(blob->data_ro == true); + CU_ASSERT(blob->md_ro == true); + CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + ut_bs_reload(&bs, &opts); + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob = g_blob; + + CU_ASSERT(blob->data_ro == true); + CU_ASSERT(blob->md_ro == true); + CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); + + ut_blob_close_and_delete(bs, blob); + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); +} + +static void +channel_ops(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_io_channel *channel; + + channel = spdk_bs_alloc_io_channel(bs); + CU_ASSERT(channel != NULL); + + spdk_bs_free_io_channel(channel); + poll_threads(); +} + +static void +blob_write(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob = g_blob; + struct spdk_io_channel *channel; + uint64_t pages_per_cluster; + uint8_t payload[10 * 4096]; + + pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); + + channel = spdk_bs_alloc_io_channel(bs); + CU_ASSERT(channel != NULL); + + /* Write to a blob with 0 size */ + spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + + /* Resize the blob */ + spdk_blob_resize(blob, 5, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Confirm that write fails if blob is marked read-only. */ + blob->data_ro = true; + spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EPERM); + blob->data_ro = false; + + /* Write to the blob */ + spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Write starting beyond the end */ + spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, + NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + + /* Write starting at a valid location but going off the end */ + spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, + blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + + spdk_bs_free_io_channel(channel); + poll_threads(); +} + +static void +blob_read(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob = g_blob; + struct spdk_io_channel *channel; + uint64_t pages_per_cluster; + uint8_t payload[10 * 4096]; + + pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); + + channel = spdk_bs_alloc_io_channel(bs); + CU_ASSERT(channel != NULL); + + /* Read from a blob with 0 size */ + spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + + /* Resize the blob */ + spdk_blob_resize(blob, 5, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Confirm that read passes if blob is marked read-only. */ + blob->data_ro = true; + spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + blob->data_ro = false; + + /* Read from the blob */ + spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Read starting beyond the end */ + spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, + NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + + /* Read starting at a valid location but going off the end */ + spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, + blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + + spdk_bs_free_io_channel(channel); + poll_threads(); +} + +static void +blob_rw_verify(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob = g_blob; + struct spdk_io_channel *channel; + uint8_t payload_read[10 * 4096]; + uint8_t payload_write[10 * 4096]; + + channel = spdk_bs_alloc_io_channel(bs); + CU_ASSERT(channel != NULL); + + spdk_blob_resize(blob, 32, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + memset(payload_write, 0xE5, sizeof(payload_write)); + spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + memset(payload_read, 0x00, sizeof(payload_read)); + spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0); + + spdk_bs_free_io_channel(channel); + poll_threads(); +} + +static void +blob_rw_verify_iov(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + struct spdk_io_channel *channel; + uint8_t payload_read[10 * 4096]; + uint8_t payload_write[10 * 4096]; + struct iovec iov_read[3]; + struct iovec iov_write[3]; + void *buf; + + channel = spdk_bs_alloc_io_channel(bs); + CU_ASSERT(channel != NULL); + + blob = ut_blob_create_and_open(bs, NULL); + + spdk_blob_resize(blob, 2, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* + * Manually adjust the offset of the blob's second cluster. This allows + * us to make sure that the readv/write code correctly accounts for I/O + * that cross cluster boundaries. Start by asserting that the allocated + * clusters are where we expect before modifying the second cluster. + */ + CU_ASSERT(blob->active.clusters[0] == 1 * 256); + CU_ASSERT(blob->active.clusters[1] == 2 * 256); + blob->active.clusters[1] = 3 * 256; + + memset(payload_write, 0xE5, sizeof(payload_write)); + iov_write[0].iov_base = payload_write; + iov_write[0].iov_len = 1 * 4096; + iov_write[1].iov_base = payload_write + 1 * 4096; + iov_write[1].iov_len = 5 * 4096; + iov_write[2].iov_base = payload_write + 6 * 4096; + iov_write[2].iov_len = 4 * 4096; + /* + * Choose a page offset just before the cluster boundary. The first 6 pages of payload + * will get written to the first cluster, the last 4 to the second cluster. + */ + spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + memset(payload_read, 0xAA, sizeof(payload_read)); + iov_read[0].iov_base = payload_read; + iov_read[0].iov_len = 3 * 4096; + iov_read[1].iov_base = payload_read + 3 * 4096; + iov_read[1].iov_len = 4 * 4096; + iov_read[2].iov_base = payload_read + 7 * 4096; + iov_read[2].iov_len = 3 * 4096; + spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); + + buf = calloc(1, 256 * 4096); + SPDK_CU_ASSERT_FATAL(buf != NULL); + /* Check that cluster 2 on "disk" was not modified. */ + CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0); + free(buf); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_bs_free_io_channel(channel); + poll_threads(); +} + +static uint32_t +bs_channel_get_req_count(struct spdk_io_channel *_channel) +{ + struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel); + struct spdk_bs_request_set *set; + uint32_t count = 0; + + TAILQ_FOREACH(set, &channel->reqs, link) { + count++; + } + + return count; +} + +static void +blob_rw_verify_iov_nomem(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob = g_blob; + struct spdk_io_channel *channel; + uint8_t payload_write[10 * 4096]; + struct iovec iov_write[3]; + uint32_t req_count; + + channel = spdk_bs_alloc_io_channel(bs); + CU_ASSERT(channel != NULL); + + spdk_blob_resize(blob, 2, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* + * Choose a page offset just before the cluster boundary. The first 6 pages of payload + * will get written to the first cluster, the last 4 to the second cluster. + */ + iov_write[0].iov_base = payload_write; + iov_write[0].iov_len = 1 * 4096; + iov_write[1].iov_base = payload_write + 1 * 4096; + iov_write[1].iov_len = 5 * 4096; + iov_write[2].iov_base = payload_write + 6 * 4096; + iov_write[2].iov_len = 4 * 4096; + MOCK_SET(calloc, NULL); + req_count = bs_channel_get_req_count(channel); + spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno = -ENOMEM); + CU_ASSERT(req_count == bs_channel_get_req_count(channel)); + MOCK_CLEAR(calloc); + + spdk_bs_free_io_channel(channel); + poll_threads(); +} + +static void +blob_rw_iov_read_only(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob = g_blob; + struct spdk_io_channel *channel; + uint8_t payload_read[4096]; + uint8_t payload_write[4096]; + struct iovec iov_read; + struct iovec iov_write; + + channel = spdk_bs_alloc_io_channel(bs); + CU_ASSERT(channel != NULL); + + spdk_blob_resize(blob, 2, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Verify that writev failed if read_only flag is set. */ + blob->data_ro = true; + iov_write.iov_base = payload_write; + iov_write.iov_len = sizeof(payload_write); + spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EPERM); + + /* Verify that reads pass if data_ro flag is set. */ + iov_read.iov_base = payload_read; + iov_read.iov_len = sizeof(payload_read); + spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_bs_free_io_channel(channel); + poll_threads(); +} + +static void +_blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, + uint8_t *payload, uint64_t offset, uint64_t length, + spdk_blob_op_complete cb_fn, void *cb_arg) +{ + uint64_t i; + uint8_t *buf; + uint64_t page_size = spdk_bs_get_page_size(blob->bs); + + /* To be sure that operation is NOT splitted, read one page at the time */ + buf = payload; + for (i = 0; i < length; i++) { + spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); + poll_threads(); + if (g_bserrno != 0) { + /* Pass the error code up */ + break; + } + buf += page_size; + } + + cb_fn(cb_arg, g_bserrno); +} + +static void +_blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, + uint8_t *payload, uint64_t offset, uint64_t length, + spdk_blob_op_complete cb_fn, void *cb_arg) +{ + uint64_t i; + uint8_t *buf; + uint64_t page_size = spdk_bs_get_page_size(blob->bs); + + /* To be sure that operation is NOT splitted, write one page at the time */ + buf = payload; + for (i = 0; i < length; i++) { + spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); + poll_threads(); + if (g_bserrno != 0) { + /* Pass the error code up */ + break; + } + buf += page_size; + } + + cb_fn(cb_arg, g_bserrno); +} + +static void +blob_operation_split_rw(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + struct spdk_io_channel *channel; + struct spdk_blob_opts opts; + uint64_t cluster_size; + + uint64_t payload_size; + uint8_t *payload_read; + uint8_t *payload_write; + uint8_t *payload_pattern; + + uint64_t page_size; + uint64_t pages_per_cluster; + uint64_t pages_per_payload; + + uint64_t i; + + cluster_size = spdk_bs_get_cluster_size(bs); + page_size = spdk_bs_get_page_size(bs); + pages_per_cluster = cluster_size / page_size; + pages_per_payload = pages_per_cluster * 5; + payload_size = cluster_size * 5; + + payload_read = malloc(payload_size); + SPDK_CU_ASSERT_FATAL(payload_read != NULL); + + payload_write = malloc(payload_size); + SPDK_CU_ASSERT_FATAL(payload_write != NULL); + + payload_pattern = malloc(payload_size); + SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); + + /* Prepare random pattern to write */ + memset(payload_pattern, 0xFF, payload_size); + for (i = 0; i < pages_per_payload; i++) { + *((uint64_t *)(payload_pattern + page_size * i)) = (i + 1); + } + + channel = spdk_bs_alloc_io_channel(bs); + SPDK_CU_ASSERT_FATAL(channel != NULL); + + /* Create blob */ + ut_spdk_blob_opts_init(&opts); + opts.thin_provision = false; + opts.num_clusters = 5; + + blob = ut_blob_create_and_open(bs, &opts); + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); + + /* Initial read should return zeroed payload */ + memset(payload_read, 0xFF, payload_size); + spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); + + /* Fill whole blob except last page */ + spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1, + blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Write last page with a pattern */ + spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1, + blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Read whole blob and check consistency */ + memset(payload_read, 0xFF, payload_size); + spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); + CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); + + /* Fill whole blob except first page */ + spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1, + blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Write first page with a pattern */ + spdk_blob_io_write(blob, channel, payload_pattern, 0, 1, + blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Read whole blob and check consistency */ + memset(payload_read, 0xFF, payload_size); + spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); + CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); + + + /* Fill whole blob with a pattern (5 clusters) */ + + /* 1. Read test. */ + _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, + blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + memset(payload_read, 0xFF, payload_size); + spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); + poll_threads(); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); + + /* 2. Write test. */ + spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload, + blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + memset(payload_read, 0xFF, payload_size); + _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); + + spdk_bs_free_io_channel(channel); + poll_threads(); + + g_blob = NULL; + g_blobid = 0; + + free(payload_read); + free(payload_write); + free(payload_pattern); + + ut_blob_close_and_delete(bs, blob); +} + +static void +blob_operation_split_rw_iov(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + struct spdk_io_channel *channel; + struct spdk_blob_opts opts; + uint64_t cluster_size; + + uint64_t payload_size; + uint8_t *payload_read; + uint8_t *payload_write; + uint8_t *payload_pattern; + + uint64_t page_size; + uint64_t pages_per_cluster; + uint64_t pages_per_payload; + + struct iovec iov_read[2]; + struct iovec iov_write[2]; + + uint64_t i, j; + + cluster_size = spdk_bs_get_cluster_size(bs); + page_size = spdk_bs_get_page_size(bs); + pages_per_cluster = cluster_size / page_size; + pages_per_payload = pages_per_cluster * 5; + payload_size = cluster_size * 5; + + payload_read = malloc(payload_size); + SPDK_CU_ASSERT_FATAL(payload_read != NULL); + + payload_write = malloc(payload_size); + SPDK_CU_ASSERT_FATAL(payload_write != NULL); + + payload_pattern = malloc(payload_size); + SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); + + /* Prepare random pattern to write */ + for (i = 0; i < pages_per_payload; i++) { + for (j = 0; j < page_size / sizeof(uint64_t); j++) { + uint64_t *tmp; + + tmp = (uint64_t *)payload_pattern; + tmp += ((page_size * i) / sizeof(uint64_t)) + j; + *tmp = i + 1; + } + } + + channel = spdk_bs_alloc_io_channel(bs); + SPDK_CU_ASSERT_FATAL(channel != NULL); + + /* Create blob */ + ut_spdk_blob_opts_init(&opts); + opts.thin_provision = false; + opts.num_clusters = 5; + + blob = ut_blob_create_and_open(bs, &opts); + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); + + /* Initial read should return zeroes payload */ + memset(payload_read, 0xFF, payload_size); + iov_read[0].iov_base = payload_read; + iov_read[0].iov_len = cluster_size * 3; + iov_read[1].iov_base = payload_read + cluster_size * 3; + iov_read[1].iov_len = cluster_size * 2; + spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); + + /* First of iovs fills whole blob except last page and second of iovs writes last page + * with a pattern. */ + iov_write[0].iov_base = payload_pattern; + iov_write[0].iov_len = payload_size - page_size; + iov_write[1].iov_base = payload_pattern; + iov_write[1].iov_len = page_size; + spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Read whole blob and check consistency */ + memset(payload_read, 0xFF, payload_size); + iov_read[0].iov_base = payload_read; + iov_read[0].iov_len = cluster_size * 2; + iov_read[1].iov_base = payload_read + cluster_size * 2; + iov_read[1].iov_len = cluster_size * 3; + spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); + CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); + + /* First of iovs fills only first page and second of iovs writes whole blob except + * first page with a pattern. */ + iov_write[0].iov_base = payload_pattern; + iov_write[0].iov_len = page_size; + iov_write[1].iov_base = payload_pattern; + iov_write[1].iov_len = payload_size - page_size; + spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Read whole blob and check consistency */ + memset(payload_read, 0xFF, payload_size); + iov_read[0].iov_base = payload_read; + iov_read[0].iov_len = cluster_size * 4; + iov_read[1].iov_base = payload_read + cluster_size * 4; + iov_read[1].iov_len = cluster_size; + spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); + CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); + + + /* Fill whole blob with a pattern (5 clusters) */ + + /* 1. Read test. */ + _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, + blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + memset(payload_read, 0xFF, payload_size); + iov_read[0].iov_base = payload_read; + iov_read[0].iov_len = cluster_size; + iov_read[1].iov_base = payload_read + cluster_size; + iov_read[1].iov_len = cluster_size * 4; + spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); + + /* 2. Write test. */ + iov_write[0].iov_base = payload_read; + iov_write[0].iov_len = cluster_size * 2; + iov_write[1].iov_base = payload_read + cluster_size * 2; + iov_write[1].iov_len = cluster_size * 3; + spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + memset(payload_read, 0xFF, payload_size); + _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); + + spdk_bs_free_io_channel(channel); + poll_threads(); + + g_blob = NULL; + g_blobid = 0; + + free(payload_read); + free(payload_write); + free(payload_pattern); + + ut_blob_close_and_delete(bs, blob); +} + +static void +blob_unmap(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + struct spdk_io_channel *channel; + struct spdk_blob_opts opts; + uint8_t payload[4096]; + int i; + + channel = spdk_bs_alloc_io_channel(bs); + CU_ASSERT(channel != NULL); + + ut_spdk_blob_opts_init(&opts); + opts.num_clusters = 10; + + blob = ut_blob_create_and_open(bs, &opts); + + spdk_blob_resize(blob, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + memset(payload, 0, sizeof(payload)); + payload[0] = 0xFF; + + /* + * Set first byte of every cluster to 0xFF. + * First cluster on device is reserved so let's start from cluster number 1 + */ + for (i = 1; i < 11; i++) { + g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF; + } + + /* Confirm writes */ + for (i = 0; i < 10; i++) { + payload[0] = 0; + spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1, + blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(payload[0] == 0xFF); + } + + /* Mark some clusters as unallocated */ + blob->active.clusters[1] = 0; + blob->active.clusters[2] = 0; + blob->active.clusters[3] = 0; + blob->active.clusters[6] = 0; + blob->active.clusters[8] = 0; + + /* Unmap clusters by resizing to 0 */ + spdk_blob_resize(blob, 0, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_blob_sync_md(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Confirm that only 'allocated' clusters were unmapped */ + for (i = 1; i < 11; i++) { + switch (i) { + case 2: + case 3: + case 4: + case 7: + case 9: + CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF); + break; + default: + CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0); + break; + } + } + + spdk_bs_free_io_channel(channel); + poll_threads(); + + ut_blob_close_and_delete(bs, blob); +} + +static void +blob_iter(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + spdk_blob_id blobid; + struct spdk_blob_opts blob_opts; + + spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_blob == NULL); + CU_ASSERT(g_bserrno == -ENOENT); + + ut_spdk_blob_opts_init(&blob_opts); + spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobid = g_blobid; + + spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_blob != NULL); + CU_ASSERT(g_bserrno == 0); + blob = g_blob; + CU_ASSERT(spdk_blob_get_id(blob) == blobid); + + spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_blob == NULL); + CU_ASSERT(g_bserrno == -ENOENT); +} + +static void +blob_xattr(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob = g_blob; + spdk_blob_id blobid = spdk_blob_get_id(blob); + uint64_t length; + int rc; + const char *name1, *name2; + const void *value; + size_t value_len; + struct spdk_xattr_names *names; + + /* Test that set_xattr fails if md_ro flag is set. */ + blob->md_ro = true; + rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); + CU_ASSERT(rc == -EPERM); + + blob->md_ro = false; + rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); + CU_ASSERT(rc == 0); + + length = 2345; + rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); + CU_ASSERT(rc == 0); + + /* Overwrite "length" xattr. */ + length = 3456; + rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); + CU_ASSERT(rc == 0); + + /* get_xattr should still work even if md_ro flag is set. */ + value = NULL; + blob->md_ro = true; + rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(*(uint64_t *)value == length); + CU_ASSERT(value_len == 8); + blob->md_ro = false; + + rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); + CU_ASSERT(rc == -ENOENT); + + names = NULL; + rc = spdk_blob_get_xattr_names(blob, &names); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(names != NULL); + CU_ASSERT(spdk_xattr_names_get_count(names) == 2); + name1 = spdk_xattr_names_get_name(names, 0); + SPDK_CU_ASSERT_FATAL(name1 != NULL); + CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length")); + name2 = spdk_xattr_names_get_name(names, 1); + SPDK_CU_ASSERT_FATAL(name2 != NULL); + CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length")); + CU_ASSERT(strcmp(name1, name2)); + spdk_xattr_names_free(names); + + /* Confirm that remove_xattr fails if md_ro is set to true. */ + blob->md_ro = true; + rc = spdk_blob_remove_xattr(blob, "name"); + CU_ASSERT(rc == -EPERM); + + blob->md_ro = false; + rc = spdk_blob_remove_xattr(blob, "name"); + CU_ASSERT(rc == 0); + + rc = spdk_blob_remove_xattr(blob, "foobar"); + CU_ASSERT(rc == -ENOENT); + + /* Set internal xattr */ + length = 7898; + rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true); + CU_ASSERT(rc == 0); + rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true); + CU_ASSERT(rc == 0); + CU_ASSERT(*(uint64_t *)value == length); + /* try to get public xattr with same name */ + rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); + CU_ASSERT(rc != 0); + rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false); + CU_ASSERT(rc != 0); + /* Check if SPDK_BLOB_INTERNAL_XATTR is set */ + CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == + SPDK_BLOB_INTERNAL_XATTR); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + + /* Check if xattrs are persisted */ + ut_bs_reload(&bs, NULL); + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob = g_blob; + + rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true); + CU_ASSERT(rc == 0); + CU_ASSERT(*(uint64_t *)value == length); + + /* try to get internal xattr trough public call */ + rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); + CU_ASSERT(rc != 0); + + rc = blob_remove_xattr(blob, "internal", true); + CU_ASSERT(rc == 0); + + CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0); +} + +static void +bs_load(void) +{ + struct spdk_blob_store *bs; + struct spdk_bs_dev *dev; + spdk_blob_id blobid; + struct spdk_blob *blob; + struct spdk_bs_super_block *super_block; + uint64_t length; + int rc; + const void *value; + size_t value_len; + struct spdk_bs_opts opts; + struct spdk_blob_opts blob_opts; + + dev = init_dev(); + spdk_bs_opts_init(&opts); + snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); + + /* Initialize a new blob store */ + spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + /* Try to open a blobid that does not exist */ + spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -ENOENT); + CU_ASSERT(g_blob == NULL); + + /* Create a blob */ + blob = ut_blob_create_and_open(bs, NULL); + blobid = spdk_blob_get_id(blob); + + /* Try again to open valid blob but without the upper bit set */ + spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -ENOENT); + CU_ASSERT(g_blob == NULL); + + /* Set some xattrs */ + rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); + CU_ASSERT(rc == 0); + + length = 2345; + rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); + CU_ASSERT(rc == 0); + + /* Resize the blob */ + spdk_blob_resize(blob, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + blob = NULL; + g_blob = NULL; + g_blobid = SPDK_BLOBID_INVALID; + + /* Unload the blob store */ + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; + g_blob = NULL; + g_blobid = 0; + + super_block = (struct spdk_bs_super_block *)g_dev_buffer; + CU_ASSERT(super_block->clean == 1); + + /* Load should fail for device with an unsupported blocklen */ + dev = init_dev(); + dev->blocklen = SPDK_BS_PAGE_SIZE * 2; + spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + + /* Load should when max_md_ops is set to zero */ + dev = init_dev(); + spdk_bs_opts_init(&opts); + opts.max_md_ops = 0; + spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + + /* Load should when max_channel_ops is set to zero */ + dev = init_dev(); + spdk_bs_opts_init(&opts); + opts.max_channel_ops = 0; + spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + + /* Load an existing blob store */ + dev = init_dev(); + spdk_bs_opts_init(&opts); + snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); + spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + super_block = (struct spdk_bs_super_block *)g_dev_buffer; + CU_ASSERT(super_block->clean == 1); + CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blob != NULL); + blob = g_blob; + + /* Verify that blobstore is marked dirty after first metadata sync */ + spdk_blob_sync_md(blob, blob_op_complete, NULL); + CU_ASSERT(super_block->clean == 1); + + /* Get the xattrs */ + value = NULL; + rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(*(uint64_t *)value == length); + CU_ASSERT(value_len == 8); + + rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); + CU_ASSERT(rc == -ENOENT); + + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + blob = NULL; + g_blob = NULL; + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; + + /* Load should fail: bdev size < saved size */ + dev = init_dev(); + dev->blockcnt /= 2; + + spdk_bs_opts_init(&opts); + snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); + spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + + CU_ASSERT(g_bserrno == -EILSEQ); + + /* Load should succeed: bdev size > saved size */ + dev = init_dev(); + dev->blockcnt *= 4; + + spdk_bs_opts_init(&opts); + snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); + spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + CU_ASSERT(g_bserrno == 0); + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + + + /* Test compatibility mode */ + + dev = init_dev(); + super_block->size = 0; + super_block->crc = blob_md_page_calc_crc(super_block); + + spdk_bs_opts_init(&opts); + snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); + spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + /* Create a blob */ + ut_spdk_blob_opts_init(&blob_opts); + spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + + /* Blobstore should update number of blocks in super_block */ + CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); + CU_ASSERT(super_block->clean == 0); + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(super_block->clean == 1); + g_bs = NULL; + +} + +static void +bs_load_pending_removal(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob_opts opts; + struct spdk_blob *blob, *snapshot; + spdk_blob_id blobid, snapshotid; + const void *value; + size_t value_len; + int rc; + + /* Create blob */ + ut_spdk_blob_opts_init(&opts); + opts.num_clusters = 10; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + + /* Create snapshot */ + spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + snapshotid = g_blobid; + + spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot = g_blob; + + /* Set SNAPSHOT_PENDING_REMOVAL xattr */ + snapshot->md_ro = false; + rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); + CU_ASSERT(rc == 0); + snapshot->md_ro = true; + + spdk_blob_close(snapshot, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Reload blobstore */ + ut_bs_reload(&bs, NULL); + + /* Snapshot should not be removed as blob is still pointing to it */ + spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot = g_blob; + + /* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */ + rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); + CU_ASSERT(rc != 0); + + /* Set SNAPSHOT_PENDING_REMOVAL xattr again */ + snapshot->md_ro = false; + rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); + CU_ASSERT(rc == 0); + snapshot->md_ro = true; + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob = g_blob; + + /* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */ + blob_remove_xattr(blob, BLOB_SNAPSHOT, true); + + spdk_blob_sync_md(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_blob_close(snapshot, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Reload blobstore */ + ut_bs_reload(&bs, NULL); + + /* Snapshot should be removed as blob is not pointing to it anymore */ + spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno != 0); +} + +static void +bs_load_custom_cluster_size(void) +{ + struct spdk_blob_store *bs; + struct spdk_bs_dev *dev; + struct spdk_bs_super_block *super_block; + struct spdk_bs_opts opts; + uint32_t custom_cluster_size = 4194304; /* 4MiB */ + uint32_t cluster_sz; + uint64_t total_clusters; + + dev = init_dev(); + spdk_bs_opts_init(&opts); + opts.cluster_sz = custom_cluster_size; + snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); + + /* Initialize a new blob store */ + spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + cluster_sz = bs->cluster_sz; + total_clusters = bs->total_clusters; + + /* Unload the blob store */ + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; + g_blob = NULL; + g_blobid = 0; + + super_block = (struct spdk_bs_super_block *)g_dev_buffer; + CU_ASSERT(super_block->clean == 1); + + /* Load an existing blob store */ + dev = init_dev(); + spdk_bs_opts_init(&opts); + snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); + spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + /* Compare cluster size and number to one after initialization */ + CU_ASSERT(cluster_sz == bs->cluster_sz); + CU_ASSERT(total_clusters == bs->total_clusters); + + super_block = (struct spdk_bs_super_block *)g_dev_buffer; + CU_ASSERT(super_block->clean == 1); + CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(super_block->clean == 1); + g_bs = NULL; +} + +static void +bs_type(void) +{ + struct spdk_blob_store *bs; + struct spdk_bs_dev *dev; + struct spdk_bs_opts opts; + + dev = init_dev(); + spdk_bs_opts_init(&opts); + snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); + + /* Initialize a new blob store */ + spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + /* Unload the blob store */ + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; + g_blob = NULL; + g_blobid = 0; + + /* Load non existing blobstore type */ + dev = init_dev(); + snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); + spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno != 0); + + /* Load with empty blobstore type */ + dev = init_dev(); + memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); + spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; + + /* Initialize a new blob store with empty bstype */ + dev = init_dev(); + memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); + spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; + + /* Load non existing blobstore type */ + dev = init_dev(); + snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); + spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno != 0); + + /* Load with empty blobstore type */ + dev = init_dev(); + memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); + spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; +} + +static void +bs_super_block(void) +{ + struct spdk_blob_store *bs; + struct spdk_bs_dev *dev; + struct spdk_bs_super_block *super_block; + struct spdk_bs_opts opts; + struct spdk_bs_super_block_ver1 super_block_v1; + + dev = init_dev(); + spdk_bs_opts_init(&opts); + snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); + + /* Initialize a new blob store */ + spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + /* Unload the blob store */ + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; + g_blob = NULL; + g_blobid = 0; + + /* Load an existing blob store with version newer than supported */ + super_block = (struct spdk_bs_super_block *)g_dev_buffer; + super_block->version++; + + dev = init_dev(); + memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); + spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno != 0); + + /* Create a new blob store with super block version 1 */ + dev = init_dev(); + super_block_v1.version = 1; + memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature)); + super_block_v1.length = 0x1000; + super_block_v1.clean = 1; + super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF; + super_block_v1.cluster_size = 0x100000; + super_block_v1.used_page_mask_start = 0x01; + super_block_v1.used_page_mask_len = 0x01; + super_block_v1.used_cluster_mask_start = 0x02; + super_block_v1.used_cluster_mask_len = 0x01; + super_block_v1.md_start = 0x03; + super_block_v1.md_len = 0x40; + memset(super_block_v1.reserved, 0, 4036); + super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1); + memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1)); + + memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); + spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; +} + +/* + * Create a blobstore and then unload it. + */ +static void +bs_unload(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + + /* Create a blob and open it. */ + blob = ut_blob_create_and_open(bs, NULL); + + /* Try to unload blobstore, should fail with open blob */ + g_bserrno = -1; + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EBUSY); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + + /* Close the blob, then successfully unload blobstore */ + g_bserrno = -1; + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); +} + +/* + * Create a blobstore with a cluster size different than the default, and ensure it is + * persisted. + */ +static void +bs_cluster_sz(void) +{ + struct spdk_blob_store *bs; + struct spdk_bs_dev *dev; + struct spdk_bs_opts opts; + uint32_t cluster_sz; + + /* Set cluster size to zero */ + dev = init_dev(); + spdk_bs_opts_init(&opts); + opts.cluster_sz = 0; + + /* Initialize a new blob store */ + spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + SPDK_CU_ASSERT_FATAL(g_bs == NULL); + + /* + * Set cluster size to blobstore page size, + * to work it is required to be at least twice the blobstore page size. + */ + dev = init_dev(); + spdk_bs_opts_init(&opts); + opts.cluster_sz = SPDK_BS_PAGE_SIZE; + + /* Initialize a new blob store */ + spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -ENOMEM); + SPDK_CU_ASSERT_FATAL(g_bs == NULL); + + /* + * Set cluster size to lower than page size, + * to work it is required to be at least twice the blobstore page size. + */ + dev = init_dev(); + spdk_bs_opts_init(&opts); + opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1; + + /* Initialize a new blob store */ + spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + SPDK_CU_ASSERT_FATAL(g_bs == NULL); + + /* Set cluster size to twice the default */ + dev = init_dev(); + spdk_bs_opts_init(&opts); + opts.cluster_sz *= 2; + cluster_sz = opts.cluster_sz; + + /* Initialize a new blob store */ + spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); + + ut_bs_reload(&bs, &opts); + + CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; +} + +/* + * Create a blobstore, reload it and ensure total usable cluster count + * stays the same. + */ +static void +bs_usable_clusters(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + uint32_t clusters; + int i; + + + clusters = spdk_bs_total_data_cluster_count(bs); + + ut_bs_reload(&bs, NULL); + + CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); + + /* Create and resize blobs to make sure that useable cluster count won't change */ + for (i = 0; i < 4; i++) { + g_bserrno = -1; + g_blobid = SPDK_BLOBID_INVALID; + blob = ut_blob_create_and_open(bs, NULL); + + spdk_blob_resize(blob, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + g_bserrno = -1; + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); + } + + /* Reload the blob store to make sure that nothing changed */ + ut_bs_reload(&bs, NULL); + + CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); +} + +/* + * Test resizing of the metadata blob. This requires creating enough blobs + * so that one cluster is not enough to fit the metadata for those blobs. + * To induce this condition to happen more quickly, we reduce the cluster + * size to 16KB, which means only 4 4KB blob metadata pages can fit. + */ +static void +bs_resize_md(void) +{ + struct spdk_blob_store *bs; + const int CLUSTER_PAGE_COUNT = 4; + const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4; + struct spdk_bs_dev *dev; + struct spdk_bs_opts opts; + struct spdk_blob *blob; + struct spdk_blob_opts blob_opts; + uint32_t cluster_sz; + spdk_blob_id blobids[NUM_BLOBS]; + int i; + + + dev = init_dev(); + spdk_bs_opts_init(&opts); + opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096; + cluster_sz = opts.cluster_sz; + + /* Initialize a new blob store */ + spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); + + ut_spdk_blob_opts_init(&blob_opts); + + for (i = 0; i < NUM_BLOBS; i++) { + g_bserrno = -1; + g_blobid = SPDK_BLOBID_INVALID; + spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobids[i] = g_blobid; + } + + ut_bs_reload(&bs, &opts); + + CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); + + for (i = 0; i < NUM_BLOBS; i++) { + g_bserrno = -1; + g_blob = NULL; + spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blob != NULL); + blob = g_blob; + g_bserrno = -1; + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + } + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; +} + +static void +bs_destroy(void) +{ + struct spdk_blob_store *bs; + struct spdk_bs_dev *dev; + + /* Initialize a new blob store */ + dev = init_dev(); + spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + /* Destroy the blob store */ + g_bserrno = -1; + spdk_bs_destroy(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Loading an non-existent blob store should fail. */ + g_bs = NULL; + dev = init_dev(); + + g_bserrno = 0; + spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno != 0); +} + +/* Try to hit all of the corner cases associated with serializing + * a blob to disk + */ +static void +blob_serialize_test(void) +{ + struct spdk_bs_dev *dev; + struct spdk_bs_opts opts; + struct spdk_blob_store *bs; + spdk_blob_id blobid[2]; + struct spdk_blob *blob[2]; + uint64_t i; + char *value; + int rc; + + dev = init_dev(); + + /* Initialize a new blobstore with very small clusters */ + spdk_bs_opts_init(&opts); + opts.cluster_sz = dev->blocklen * 8; + spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + /* Create and open two blobs */ + for (i = 0; i < 2; i++) { + blob[i] = ut_blob_create_and_open(bs, NULL); + blobid[i] = spdk_blob_get_id(blob[i]); + + /* Set a fairly large xattr on both blobs to eat up + * metadata space + */ + value = calloc(dev->blocklen - 64, sizeof(char)); + SPDK_CU_ASSERT_FATAL(value != NULL); + memset(value, i, dev->blocklen / 2); + rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64); + CU_ASSERT(rc == 0); + free(value); + } + + /* Resize the blobs, alternating 1 cluster at a time. + * This thwarts run length encoding and will cause spill + * over of the extents. + */ + for (i = 0; i < 6; i++) { + spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + } + + for (i = 0; i < 2; i++) { + spdk_blob_sync_md(blob[i], blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + } + + /* Close the blobs */ + for (i = 0; i < 2; i++) { + spdk_blob_close(blob[i], blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + } + + ut_bs_reload(&bs, &opts); + + for (i = 0; i < 2; i++) { + blob[i] = NULL; + + spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blob != NULL); + blob[i] = g_blob; + + CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3); + + spdk_blob_close(blob[i], blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + } + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; +} + +static void +blob_crc(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + spdk_blob_id blobid; + uint32_t page_num; + int index; + struct spdk_blob_md_page *page; + + blob = ut_blob_create_and_open(bs, NULL); + blobid = spdk_blob_get_id(blob); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + page_num = bs_blobid_to_page(blobid); + index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); + page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; + page->crc = 0; + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + CU_ASSERT(g_blob == NULL); + g_bserrno = 0; + + spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); +} + +static void +super_block_crc(void) +{ + struct spdk_blob_store *bs; + struct spdk_bs_dev *dev; + struct spdk_bs_super_block *super_block; + + dev = init_dev(); + spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; + + super_block = (struct spdk_bs_super_block *)g_dev_buffer; + super_block->crc = 0; + dev = init_dev(); + + /* Load an existing blob store */ + g_bserrno = 0; + spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EILSEQ); +} + +/* For blob dirty shutdown test case we do the following sub-test cases: + * 1 Initialize new blob store and create 1 super blob with some xattrs, then we + * dirty shutdown and reload the blob store and verify the xattrs. + * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown, + * reload the blob store and verify the clusters number. + * 3 Create the second blob and then dirty shutdown, reload the blob store + * and verify the second blob. + * 4 Delete the second blob and then dirty shutdown, reload the blob store + * and verify the second blob is invalid. + * 5 Create the second blob again and also create the third blob, modify the + * md of second blob which makes the md invalid, and then dirty shutdown, + * reload the blob store verify the second blob, it should invalid and also + * verify the third blob, it should correct. + */ +static void +blob_dirty_shutdown(void) +{ + int rc; + int index; + struct spdk_blob_store *bs = g_bs; + spdk_blob_id blobid1, blobid2, blobid3; + struct spdk_blob *blob = g_blob; + uint64_t length; + uint64_t free_clusters; + const void *value; + size_t value_len; + uint32_t page_num; + struct spdk_blob_md_page *page; + struct spdk_blob_opts blob_opts; + + /* Create first blob */ + blobid1 = spdk_blob_get_id(blob); + + /* Set some xattrs */ + rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); + CU_ASSERT(rc == 0); + + length = 2345; + rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); + CU_ASSERT(rc == 0); + + /* Put xattr that fits exactly single page. + * This results in adding additional pages to MD. + * First is flags and smaller xattr, second the large xattr, + * third are just the extents. + */ + size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) - + strlen("large_xattr"); + char *xattr = calloc(xattr_length, sizeof(char)); + SPDK_CU_ASSERT_FATAL(xattr != NULL); + rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); + free(xattr); + SPDK_CU_ASSERT_FATAL(rc == 0); + + /* Resize the blob */ + spdk_blob_resize(blob, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Set the blob as the super blob */ + spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + free_clusters = spdk_bs_free_cluster_count(bs); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + blob = NULL; + g_blob = NULL; + g_blobid = SPDK_BLOBID_INVALID; + + ut_bs_dirty_load(&bs, NULL); + + /* Get the super blob */ + spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(blobid1 == g_blobid); + + spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blob != NULL); + blob = g_blob; + + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + + /* Get the xattrs */ + value = NULL; + rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(*(uint64_t *)value == length); + CU_ASSERT(value_len == 8); + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); + + /* Resize the blob */ + spdk_blob_resize(blob, 20, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + free_clusters = spdk_bs_free_cluster_count(bs); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + blob = NULL; + g_blob = NULL; + g_blobid = SPDK_BLOBID_INVALID; + + ut_bs_dirty_load(&bs, NULL); + + spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blob != NULL); + blob = g_blob; + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + blob = NULL; + g_blob = NULL; + g_blobid = SPDK_BLOBID_INVALID; + + /* Create second blob */ + blob = ut_blob_create_and_open(bs, NULL); + blobid2 = spdk_blob_get_id(blob); + + /* Set some xattrs */ + rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); + CU_ASSERT(rc == 0); + + length = 5432; + rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); + CU_ASSERT(rc == 0); + + /* Resize the blob */ + spdk_blob_resize(blob, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + free_clusters = spdk_bs_free_cluster_count(bs); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + blob = NULL; + g_blob = NULL; + g_blobid = SPDK_BLOBID_INVALID; + + ut_bs_dirty_load(&bs, NULL); + + spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blob != NULL); + blob = g_blob; + + /* Get the xattrs */ + value = NULL; + rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(*(uint64_t *)value == length); + CU_ASSERT(value_len == 8); + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + + ut_blob_close_and_delete(bs, blob); + + free_clusters = spdk_bs_free_cluster_count(bs); + + ut_bs_dirty_load(&bs, NULL); + + spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno != 0); + CU_ASSERT(g_blob == NULL); + + spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blob != NULL); + blob = g_blob; + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + ut_bs_reload(&bs, NULL); + + /* Create second blob */ + ut_spdk_blob_opts_init(&blob_opts); + spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobid2 = g_blobid; + + /* Create third blob */ + spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobid3 = g_blobid; + + spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blob != NULL); + blob = g_blob; + + /* Set some xattrs for second blob */ + rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); + CU_ASSERT(rc == 0); + + length = 5432; + rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); + CU_ASSERT(rc == 0); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + blob = NULL; + g_blob = NULL; + g_blobid = SPDK_BLOBID_INVALID; + + spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blob != NULL); + blob = g_blob; + + /* Set some xattrs for third blob */ + rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1); + CU_ASSERT(rc == 0); + + length = 5432; + rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); + CU_ASSERT(rc == 0); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + blob = NULL; + g_blob = NULL; + g_blobid = SPDK_BLOBID_INVALID; + + /* Mark second blob as invalid */ + page_num = bs_blobid_to_page(blobid2); + + index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); + page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; + page->sequence_num = 1; + page->crc = blob_md_page_calc_crc(page); + + free_clusters = spdk_bs_free_cluster_count(bs); + + ut_bs_dirty_load(&bs, NULL); + + spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno != 0); + CU_ASSERT(g_blob == NULL); + + spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blob != NULL); + blob = g_blob; + + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); +} + +static void +blob_flags(void) +{ + struct spdk_blob_store *bs = g_bs; + spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro; + struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro; + struct spdk_blob_opts blob_opts; + int rc; + + /* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */ + blob_invalid = ut_blob_create_and_open(bs, NULL); + blobid_invalid = spdk_blob_get_id(blob_invalid); + + blob_data_ro = ut_blob_create_and_open(bs, NULL); + blobid_data_ro = spdk_blob_get_id(blob_data_ro); + + ut_spdk_blob_opts_init(&blob_opts); + blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES; + blob_md_ro = ut_blob_create_and_open(bs, &blob_opts); + blobid_md_ro = spdk_blob_get_id(blob_md_ro); + CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES); + + /* Change the size of blob_data_ro to check if flags are serialized + * when blob has non zero number of extents */ + spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Set the xattr to check if flags are serialized + * when blob has non zero number of xattrs */ + rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1); + CU_ASSERT(rc == 0); + + blob_invalid->invalid_flags = (1ULL << 63); + blob_invalid->state = SPDK_BLOB_STATE_DIRTY; + blob_data_ro->data_ro_flags = (1ULL << 62); + blob_data_ro->state = SPDK_BLOB_STATE_DIRTY; + blob_md_ro->md_ro_flags = (1ULL << 61); + blob_md_ro->state = SPDK_BLOB_STATE_DIRTY; + + g_bserrno = -1; + spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bserrno = -1; + spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bserrno = -1; + spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + g_bserrno = -1; + spdk_blob_close(blob_invalid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + blob_invalid = NULL; + g_bserrno = -1; + spdk_blob_close(blob_data_ro, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + blob_data_ro = NULL; + g_bserrno = -1; + spdk_blob_close(blob_md_ro, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + blob_md_ro = NULL; + + g_blob = NULL; + g_blobid = SPDK_BLOBID_INVALID; + + ut_bs_reload(&bs, NULL); + + g_blob = NULL; + g_bserrno = 0; + spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno != 0); + CU_ASSERT(g_blob == NULL); + + g_blob = NULL; + g_bserrno = -1; + spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob_data_ro = g_blob; + /* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */ + CU_ASSERT(blob_data_ro->data_ro == true); + CU_ASSERT(blob_data_ro->md_ro == true); + CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10); + + g_blob = NULL; + g_bserrno = -1; + spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob_md_ro = g_blob; + CU_ASSERT(blob_md_ro->data_ro == false); + CU_ASSERT(blob_md_ro->md_ro == true); + + g_bserrno = -1; + spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + ut_blob_close_and_delete(bs, blob_data_ro); + ut_blob_close_and_delete(bs, blob_md_ro); +} + +static void +bs_version(void) +{ + struct spdk_bs_super_block *super; + struct spdk_blob_store *bs = g_bs; + struct spdk_bs_dev *dev; + struct spdk_blob *blob; + struct spdk_blob_opts blob_opts; + spdk_blob_id blobid; + + /* Unload the blob store */ + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; + + /* + * Change the bs version on disk. This will allow us to + * test that the version does not get modified automatically + * when loading and unloading the blobstore. + */ + super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; + CU_ASSERT(super->version == SPDK_BS_VERSION); + CU_ASSERT(super->clean == 1); + super->version = 2; + /* + * Version 2 metadata does not have a used blobid mask, so clear + * those fields in the super block and zero the corresponding + * region on "disk". We will use this to ensure blob IDs are + * correctly reconstructed. + */ + memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0, + super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE); + super->used_blobid_mask_start = 0; + super->used_blobid_mask_len = 0; + super->crc = blob_md_page_calc_crc(super); + + /* Load an existing blob store */ + dev = init_dev(); + spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + CU_ASSERT(super->clean == 1); + bs = g_bs; + + /* + * Create a blob - just to make sure that when we unload it + * results in writing the super block (since metadata pages + * were allocated. + */ + ut_spdk_blob_opts_init(&blob_opts); + spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobid = g_blobid; + + /* Unload the blob store */ + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; + CU_ASSERT(super->version == 2); + CU_ASSERT(super->used_blobid_mask_start == 0); + CU_ASSERT(super->used_blobid_mask_len == 0); + + dev = init_dev(); + spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + g_blob = NULL; + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blob != NULL); + blob = g_blob; + + ut_blob_close_and_delete(bs, blob); + + CU_ASSERT(super->version == 2); + CU_ASSERT(super->used_blobid_mask_start == 0); + CU_ASSERT(super->used_blobid_mask_len == 0); +} + +static void +blob_set_xattrs_test(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + struct spdk_blob_opts opts; + const void *value; + size_t value_len; + char *xattr; + size_t xattr_length; + int rc; + + /* Create blob with extra attributes */ + ut_spdk_blob_opts_init(&opts); + + opts.xattrs.names = g_xattr_names; + opts.xattrs.get_value = _get_xattr_value; + opts.xattrs.count = 3; + opts.xattrs.ctx = &g_ctx; + + blob = ut_blob_create_and_open(bs, &opts); + + /* Get the xattrs */ + value = NULL; + + rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(value_len == strlen(g_xattr_values[0])); + CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); + + rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(value_len == strlen(g_xattr_values[1])); + CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); + + rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); + CU_ASSERT(rc == 0); + SPDK_CU_ASSERT_FATAL(value != NULL); + CU_ASSERT(value_len == strlen(g_xattr_values[2])); + CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); + + /* Try to get non existing attribute */ + + rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); + CU_ASSERT(rc == -ENOENT); + + /* Try xattr exceeding maximum length of descriptor in single page */ + xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - + strlen("large_xattr") + 1; + xattr = calloc(xattr_length, sizeof(char)); + SPDK_CU_ASSERT_FATAL(xattr != NULL); + rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); + free(xattr); + SPDK_CU_ASSERT_FATAL(rc == -ENOMEM); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + blob = NULL; + g_blob = NULL; + g_blobid = SPDK_BLOBID_INVALID; + + /* NULL callback */ + ut_spdk_blob_opts_init(&opts); + opts.xattrs.names = g_xattr_names; + opts.xattrs.get_value = NULL; + opts.xattrs.count = 1; + opts.xattrs.ctx = &g_ctx; + + spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + + /* NULL values */ + ut_spdk_blob_opts_init(&opts); + opts.xattrs.names = g_xattr_names; + opts.xattrs.get_value = _get_xattr_value_null; + opts.xattrs.count = 1; + opts.xattrs.ctx = NULL; + + spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == -EINVAL); +} + +static void +blob_thin_prov_alloc(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + struct spdk_blob_opts opts; + spdk_blob_id blobid; + uint64_t free_clusters; + + free_clusters = spdk_bs_free_cluster_count(bs); + + /* Set blob as thin provisioned */ + ut_spdk_blob_opts_init(&opts); + opts.thin_provision = true; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + + CU_ASSERT(blob->active.num_clusters == 0); + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); + + /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ + spdk_blob_resize(blob, 5, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + CU_ASSERT(blob->active.num_clusters == 5); + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); + + /* Grow it to 1TB - still unallocated */ + spdk_blob_resize(blob, 262144, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + CU_ASSERT(blob->active.num_clusters == 262144); + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); + + spdk_blob_sync_md(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + /* Sync must not change anything */ + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + CU_ASSERT(blob->active.num_clusters == 262144); + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); + /* Since clusters are not allocated, + * number of metadata pages is expected to be minimal. + */ + CU_ASSERT(blob->active.num_pages == 1); + + /* Shrink the blob to 3 clusters - still unallocated */ + spdk_blob_resize(blob, 3, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + CU_ASSERT(blob->active.num_clusters == 3); + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); + + spdk_blob_sync_md(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + /* Sync must not change anything */ + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + CU_ASSERT(blob->active.num_clusters == 3); + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + ut_bs_reload(&bs, NULL); + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob = g_blob; + + /* Check that clusters allocation and size is still the same */ + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + CU_ASSERT(blob->active.num_clusters == 3); + + ut_blob_close_and_delete(bs, blob); +} + +static void +blob_insert_cluster_msg_test(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + struct spdk_blob_opts opts; + spdk_blob_id blobid; + uint64_t free_clusters; + uint64_t new_cluster = 0; + uint32_t cluster_num = 3; + uint32_t extent_page = 0; + + free_clusters = spdk_bs_free_cluster_count(bs); + + /* Set blob as thin provisioned */ + ut_spdk_blob_opts_init(&opts); + opts.thin_provision = true; + opts.num_clusters = 4; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + + CU_ASSERT(blob->active.num_clusters == 4); + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4); + CU_ASSERT(blob->active.clusters[cluster_num] == 0); + + /* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread. + * This is to simulate behaviour when cluster is allocated after blob creation. + * Such as _spdk_bs_allocate_and_copy_cluster(). */ + bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false); + CU_ASSERT(blob->active.clusters[cluster_num] == 0); + + blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, + blob_op_complete, NULL); + poll_threads(); + + CU_ASSERT(blob->active.clusters[cluster_num] != 0); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + ut_bs_reload(&bs, NULL); + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob = g_blob; + + CU_ASSERT(blob->active.clusters[cluster_num] != 0); + + ut_blob_close_and_delete(bs, blob); +} + +static void +blob_thin_prov_rw(void) +{ + static const uint8_t zero[10 * 4096] = { 0 }; + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + struct spdk_io_channel *channel, *channel_thread1; + struct spdk_blob_opts opts; + uint64_t free_clusters; + uint64_t page_size; + uint8_t payload_read[10 * 4096]; + uint8_t payload_write[10 * 4096]; + uint64_t write_bytes; + uint64_t read_bytes; + + free_clusters = spdk_bs_free_cluster_count(bs); + page_size = spdk_bs_get_page_size(bs); + + channel = spdk_bs_alloc_io_channel(bs); + CU_ASSERT(channel != NULL); + + ut_spdk_blob_opts_init(&opts); + opts.thin_provision = true; + + blob = ut_blob_create_and_open(bs, &opts); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + + CU_ASSERT(blob->active.num_clusters == 0); + + /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ + spdk_blob_resize(blob, 5, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + CU_ASSERT(blob->active.num_clusters == 5); + + spdk_blob_sync_md(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + /* Sync must not change anything */ + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + CU_ASSERT(blob->active.num_clusters == 5); + + /* Payload should be all zeros from unallocated clusters */ + memset(payload_read, 0xFF, sizeof(payload_read)); + spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); + + write_bytes = g_dev_write_bytes; + read_bytes = g_dev_read_bytes; + + /* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */ + set_thread(1); + channel_thread1 = spdk_bs_alloc_io_channel(bs); + CU_ASSERT(channel_thread1 != NULL); + memset(payload_write, 0xE5, sizeof(payload_write)); + spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL); + CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); + /* Perform write on thread 0. That will try to allocate cluster, + * but fail due to another thread issuing the cluster allocation first. */ + set_thread(0); + memset(payload_write, 0xE5, sizeof(payload_write)); + spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); + CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs)); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); + /* For thin-provisioned blob we need to write 20 pages plus one page metadata and + * read 0 bytes */ + if (g_use_extent_table) { + /* Add one more page for EXTENT_PAGE write */ + CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22); + } else { + CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21); + } + CU_ASSERT(g_dev_read_bytes - read_bytes == 0); + + spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); + + ut_blob_close_and_delete(bs, blob); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + + set_thread(1); + spdk_bs_free_io_channel(channel_thread1); + set_thread(0); + spdk_bs_free_io_channel(channel); + poll_threads(); + g_blob = NULL; + g_blobid = 0; +} + +static void +blob_thin_prov_rle(void) +{ + static const uint8_t zero[10 * 4096] = { 0 }; + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + struct spdk_io_channel *channel; + struct spdk_blob_opts opts; + spdk_blob_id blobid; + uint64_t free_clusters; + uint64_t page_size; + uint8_t payload_read[10 * 4096]; + uint8_t payload_write[10 * 4096]; + uint64_t write_bytes; + uint64_t read_bytes; + uint64_t io_unit; + + free_clusters = spdk_bs_free_cluster_count(bs); + page_size = spdk_bs_get_page_size(bs); + + ut_spdk_blob_opts_init(&opts); + opts.thin_provision = true; + opts.num_clusters = 5; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + + channel = spdk_bs_alloc_io_channel(bs); + CU_ASSERT(channel != NULL); + + /* Target specifically second cluster in a blob as first allocation */ + io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs); + + /* Payload should be all zeros from unallocated clusters */ + memset(payload_read, 0xFF, sizeof(payload_read)); + spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); + + write_bytes = g_dev_write_bytes; + read_bytes = g_dev_read_bytes; + + /* Issue write to second cluster in a blob */ + memset(payload_write, 0xE5, sizeof(payload_write)); + spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); + /* For thin-provisioned blob we need to write 10 pages plus one page metadata and + * read 0 bytes */ + if (g_use_extent_table) { + /* Add one more page for EXTENT_PAGE write */ + CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12); + } else { + CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11); + } + CU_ASSERT(g_dev_read_bytes - read_bytes == 0); + + spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); + + spdk_bs_free_io_channel(channel); + poll_threads(); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + ut_bs_reload(&bs, NULL); + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob = g_blob; + + channel = spdk_bs_alloc_io_channel(bs); + CU_ASSERT(channel != NULL); + + /* Read second cluster after blob reload to confirm data written */ + spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); + + spdk_bs_free_io_channel(channel); + poll_threads(); + + ut_blob_close_and_delete(bs, blob); +} + +static void +blob_thin_prov_rw_iov(void) +{ + static const uint8_t zero[10 * 4096] = { 0 }; + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob; + struct spdk_io_channel *channel; + struct spdk_blob_opts opts; + uint64_t free_clusters; + uint8_t payload_read[10 * 4096]; + uint8_t payload_write[10 * 4096]; + struct iovec iov_read[3]; + struct iovec iov_write[3]; + + free_clusters = spdk_bs_free_cluster_count(bs); + + channel = spdk_bs_alloc_io_channel(bs); + CU_ASSERT(channel != NULL); + + ut_spdk_blob_opts_init(&opts); + opts.thin_provision = true; + + blob = ut_blob_create_and_open(bs, &opts); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + + CU_ASSERT(blob->active.num_clusters == 0); + + /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ + spdk_blob_resize(blob, 5, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + CU_ASSERT(blob->active.num_clusters == 5); + + spdk_blob_sync_md(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + /* Sync must not change anything */ + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + CU_ASSERT(blob->active.num_clusters == 5); + + /* Payload should be all zeros from unallocated clusters */ + memset(payload_read, 0xAA, sizeof(payload_read)); + iov_read[0].iov_base = payload_read; + iov_read[0].iov_len = 3 * 4096; + iov_read[1].iov_base = payload_read + 3 * 4096; + iov_read[1].iov_len = 4 * 4096; + iov_read[2].iov_base = payload_read + 7 * 4096; + iov_read[2].iov_len = 3 * 4096; + spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); + + memset(payload_write, 0xE5, sizeof(payload_write)); + iov_write[0].iov_base = payload_write; + iov_write[0].iov_len = 1 * 4096; + iov_write[1].iov_base = payload_write + 1 * 4096; + iov_write[1].iov_len = 5 * 4096; + iov_write[2].iov_base = payload_write + 6 * 4096; + iov_write[2].iov_len = 4 * 4096; + + spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + memset(payload_read, 0xAA, sizeof(payload_read)); + iov_read[0].iov_base = payload_read; + iov_read[0].iov_len = 3 * 4096; + iov_read[1].iov_base = payload_read + 3 * 4096; + iov_read[1].iov_len = 4 * 4096; + iov_read[2].iov_base = payload_read + 7 * 4096; + iov_read[2].iov_len = 3 * 4096; + spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); + + spdk_bs_free_io_channel(channel); + poll_threads(); + + ut_blob_close_and_delete(bs, blob); +} + +struct iter_ctx { + int current_iter; + spdk_blob_id blobid[4]; +}; + +static void +test_iter(void *arg, struct spdk_blob *blob, int bserrno) +{ + struct iter_ctx *iter_ctx = arg; + spdk_blob_id blobid; + + CU_ASSERT(bserrno == 0); + blobid = spdk_blob_get_id(blob); + CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]); +} + +static void +bs_load_iter_test(void) +{ + struct spdk_blob_store *bs; + struct spdk_bs_dev *dev; + struct iter_ctx iter_ctx = { 0 }; + struct spdk_blob *blob; + int i, rc; + struct spdk_bs_opts opts; + + dev = init_dev(); + spdk_bs_opts_init(&opts); + snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); + + /* Initialize a new blob store */ + spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + for (i = 0; i < 4; i++) { + blob = ut_blob_create_and_open(bs, NULL); + iter_ctx.blobid[i] = spdk_blob_get_id(blob); + + /* Just save the blobid as an xattr for testing purposes. */ + rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id)); + CU_ASSERT(rc == 0); + + /* Resize the blob */ + spdk_blob_resize(blob, i, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + } + + g_bserrno = -1; + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + dev = init_dev(); + spdk_bs_opts_init(&opts); + snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); + opts.iter_cb_fn = test_iter; + opts.iter_cb_arg = &iter_ctx; + + /* Test blob iteration during load after a clean shutdown. */ + spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + /* Dirty shutdown */ + bs_free(bs); + + dev = init_dev(); + spdk_bs_opts_init(&opts); + snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); + opts.iter_cb_fn = test_iter; + iter_ctx.current_iter = 0; + opts.iter_cb_arg = &iter_ctx; + + /* Test blob iteration during load after a dirty shutdown. */ + spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; +} + +static void +blob_snapshot_rw(void) +{ + static const uint8_t zero[10 * 4096] = { 0 }; + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob, *snapshot; + struct spdk_io_channel *channel; + struct spdk_blob_opts opts; + spdk_blob_id blobid, snapshotid; + uint64_t free_clusters; + uint64_t cluster_size; + uint64_t page_size; + uint8_t payload_read[10 * 4096]; + uint8_t payload_write[10 * 4096]; + uint64_t write_bytes; + uint64_t read_bytes; + + free_clusters = spdk_bs_free_cluster_count(bs); + cluster_size = spdk_bs_get_cluster_size(bs); + page_size = spdk_bs_get_page_size(bs); + + channel = spdk_bs_alloc_io_channel(bs); + CU_ASSERT(channel != NULL); + + ut_spdk_blob_opts_init(&opts); + opts.thin_provision = true; + opts.num_clusters = 5; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); + + memset(payload_read, 0xFF, sizeof(payload_read)); + spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); + + memset(payload_write, 0xE5, sizeof(payload_write)); + spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); + + /* Create snapshot from blob */ + spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + snapshotid = g_blobid; + + spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot = g_blob; + CU_ASSERT(snapshot->data_ro == true); + CU_ASSERT(snapshot->md_ro == true); + + CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); + + write_bytes = g_dev_write_bytes; + read_bytes = g_dev_read_bytes; + + memset(payload_write, 0xAA, sizeof(payload_write)); + spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); + + /* For a clone we need to allocate and copy one cluster, update one page of metadata + * and then write 10 pages of payload. + */ + if (g_use_extent_table) { + /* Add one more page for EXTENT_PAGE write */ + CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size); + } else { + CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size); + } + CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size); + + spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); + + /* Data on snapshot should not change after write to clone */ + memset(payload_write, 0xE5, sizeof(payload_write)); + spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); + + ut_blob_close_and_delete(bs, blob); + ut_blob_close_and_delete(bs, snapshot); + + spdk_bs_free_io_channel(channel); + poll_threads(); + g_blob = NULL; + g_blobid = 0; +} + +static void +blob_snapshot_rw_iov(void) +{ + static const uint8_t zero[10 * 4096] = { 0 }; + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob, *snapshot; + struct spdk_io_channel *channel; + struct spdk_blob_opts opts; + spdk_blob_id blobid, snapshotid; + uint64_t free_clusters; + uint8_t payload_read[10 * 4096]; + uint8_t payload_write[10 * 4096]; + struct iovec iov_read[3]; + struct iovec iov_write[3]; + + free_clusters = spdk_bs_free_cluster_count(bs); + + channel = spdk_bs_alloc_io_channel(bs); + CU_ASSERT(channel != NULL); + + ut_spdk_blob_opts_init(&opts); + opts.thin_provision = true; + opts.num_clusters = 5; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); + + /* Create snapshot from blob */ + spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + snapshotid = g_blobid; + + spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot = g_blob; + CU_ASSERT(snapshot->data_ro == true); + CU_ASSERT(snapshot->md_ro == true); + CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); + + /* Payload should be all zeros from unallocated clusters */ + memset(payload_read, 0xAA, sizeof(payload_read)); + iov_read[0].iov_base = payload_read; + iov_read[0].iov_len = 3 * 4096; + iov_read[1].iov_base = payload_read + 3 * 4096; + iov_read[1].iov_len = 4 * 4096; + iov_read[2].iov_base = payload_read + 7 * 4096; + iov_read[2].iov_len = 3 * 4096; + spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); + + memset(payload_write, 0xE5, sizeof(payload_write)); + iov_write[0].iov_base = payload_write; + iov_write[0].iov_len = 1 * 4096; + iov_write[1].iov_base = payload_write + 1 * 4096; + iov_write[1].iov_len = 5 * 4096; + iov_write[2].iov_base = payload_write + 6 * 4096; + iov_write[2].iov_len = 4 * 4096; + + spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + memset(payload_read, 0xAA, sizeof(payload_read)); + iov_read[0].iov_base = payload_read; + iov_read[0].iov_len = 3 * 4096; + iov_read[1].iov_base = payload_read + 3 * 4096; + iov_read[1].iov_len = 4 * 4096; + iov_read[2].iov_base = payload_read + 7 * 4096; + iov_read[2].iov_len = 3 * 4096; + spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); + + spdk_bs_free_io_channel(channel); + poll_threads(); + + ut_blob_close_and_delete(bs, blob); + ut_blob_close_and_delete(bs, snapshot); +} + +/** + * Inflate / decouple parent rw unit tests. + * + * -------------- + * original blob: 0 1 2 3 4 + * ,---------+---------+---------+---------+---------. + * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | + * +---------+---------+---------+---------+---------+ + * snapshot2 | - |yyyyyyyyy| - |yyyyyyyyy| - | + * +---------+---------+---------+---------+---------+ + * blob | - |zzzzzzzzz| - | - | - | + * '---------+---------+---------+---------+---------' + * . . . . . . + * -------- . . . . . . + * inflate: . . . . . . + * ,---------+---------+---------+---------+---------. + * blob |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000| + * '---------+---------+---------+---------+---------' + * + * NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency + * on snapshot2 and snapshot removed . . . + * . . . . . . + * ---------------- . . . . . . + * decouple parent: . . . . . . + * ,---------+---------+---------+---------+---------. + * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | + * +---------+---------+---------+---------+---------+ + * blob | - |zzzzzzzzz| - |yyyyyyyyy| - | + * '---------+---------+---------+---------+---------' + * + * NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency + * on snapshot2 removed and on snapshot still exists. Snapshot2 + * should remain a clone of snapshot. + */ +static void +_blob_inflate_rw(bool decouple_parent) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob *blob, *snapshot, *snapshot2; + struct spdk_io_channel *channel; + struct spdk_blob_opts opts; + spdk_blob_id blobid, snapshotid, snapshot2id; + uint64_t free_clusters; + uint64_t cluster_size; + + uint64_t payload_size; + uint8_t *payload_read; + uint8_t *payload_write; + uint8_t *payload_clone; + + uint64_t pages_per_cluster; + uint64_t pages_per_payload; + + int i; + spdk_blob_id ids[2]; + size_t count; + + free_clusters = spdk_bs_free_cluster_count(bs); + cluster_size = spdk_bs_get_cluster_size(bs); + pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs); + pages_per_payload = pages_per_cluster * 5; + + payload_size = cluster_size * 5; + + payload_read = malloc(payload_size); + SPDK_CU_ASSERT_FATAL(payload_read != NULL); + + payload_write = malloc(payload_size); + SPDK_CU_ASSERT_FATAL(payload_write != NULL); + + payload_clone = malloc(payload_size); + SPDK_CU_ASSERT_FATAL(payload_clone != NULL); + + channel = spdk_bs_alloc_io_channel(bs); + SPDK_CU_ASSERT_FATAL(channel != NULL); + + /* Create blob */ + ut_spdk_blob_opts_init(&opts); + opts.thin_provision = true; + opts.num_clusters = 5; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); + + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); + + /* 1) Initial read should return zeroed payload */ + memset(payload_read, 0xFF, payload_size); + spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, + blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); + + /* Fill whole blob with a pattern, except last cluster (to be sure it + * isn't allocated) */ + memset(payload_write, 0xE5, payload_size - cluster_size); + spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload - + pages_per_cluster, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); + + /* 2) Create snapshot from blob (first level) */ + spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + snapshotid = g_blobid; + + spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot = g_blob; + CU_ASSERT(snapshot->data_ro == true); + CU_ASSERT(snapshot->md_ro == true); + + CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); + + /* Write every second cluster with a pattern. + * + * Last cluster shouldn't be written, to be sure that snapshot nor clone + * doesn't allocate it. + * + * payload_clone stores expected result on "blob" read at the time and + * is used only to check data consistency on clone before and after + * inflation. Initially we fill it with a backing snapshots pattern + * used before. + */ + memset(payload_clone, 0xE5, payload_size - cluster_size); + memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size); + memset(payload_write, 0xAA, payload_size); + for (i = 1; i < 5; i += 2) { + spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster, + pages_per_cluster, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Update expected result */ + memcpy(payload_clone + (cluster_size * i), payload_write, + cluster_size); + } + CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); + + /* Check data consistency on clone */ + memset(payload_read, 0xFF, payload_size); + spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, + blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); + + /* 3) Create second levels snapshot from blob */ + spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + snapshot2id = g_blobid; + + spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot2 = g_blob; + CU_ASSERT(snapshot2->data_ro == true); + CU_ASSERT(snapshot2->md_ro == true); + + CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5); + + CU_ASSERT(snapshot2->parent_id == snapshotid); + + /* Write one cluster on the top level blob. This cluster (1) covers + * already allocated cluster in the snapshot2, so shouldn't be inflated + * at all */ + spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster, + pages_per_cluster, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Update expected result */ + memcpy(payload_clone + cluster_size, payload_write, cluster_size); + + /* Check data consistency on clone */ + memset(payload_read, 0xFF, payload_size); + spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, + blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); + + + /* Close all blobs */ + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_blob_close(snapshot2, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_blob_close(snapshot, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Check snapshot-clone relations */ + count = 2; + CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == snapshot2id); + + count = 2; + CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == blobid); + + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id); + + free_clusters = spdk_bs_free_cluster_count(bs); + if (!decouple_parent) { + /* Do full blob inflation */ + spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* All clusters should be inflated (except one already allocated + * in a top level blob) */ + CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4); + + /* Check if relation tree updated correctly */ + count = 2; + CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); + + /* snapshotid have one clone */ + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == snapshot2id); + + /* snapshot2id have no clones */ + count = 2; + CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); + CU_ASSERT(count == 0); + + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); + } else { + /* Decouple parent of blob */ + spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Only one cluster from a parent should be inflated (second one + * is covered by a cluster written on a top level blob, and + * already allocated) */ + CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1); + + /* Check if relation tree updated correctly */ + count = 2; + CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); + + /* snapshotid have two clones now */ + CU_ASSERT(count == 2); + CU_ASSERT(ids[0] == blobid || ids[1] == blobid); + CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id); + + /* snapshot2id have no clones */ + count = 2; + CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); + CU_ASSERT(count == 0); + + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); + } + + /* Try to delete snapshot2 (should pass) */ + spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Try to delete base snapshot */ + spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Reopen blob after snapshot deletion */ + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob = g_blob; + + CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); + + /* Check data consistency on inflated blob */ + memset(payload_read, 0xFF, payload_size); + spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, + blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); + + spdk_bs_free_io_channel(channel); + poll_threads(); + + free(payload_read); + free(payload_write); + free(payload_clone); + + ut_blob_close_and_delete(bs, blob); +} + +static void +blob_inflate_rw(void) +{ + _blob_inflate_rw(false); + _blob_inflate_rw(true); +} + +/** + * Snapshot-clones relation test + * + * snapshot + * | + * +-----+-----+ + * | | + * blob(ro) snapshot2 + * | | + * clone2 clone + */ +static void +blob_relations(void) +{ + struct spdk_blob_store *bs; + struct spdk_bs_dev *dev; + struct spdk_bs_opts bs_opts; + struct spdk_blob_opts opts; + struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2; + spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2; + int rc; + size_t count; + spdk_blob_id ids[10] = {}; + + dev = init_dev(); + spdk_bs_opts_init(&bs_opts); + snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); + + spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + /* 1. Create blob with 10 clusters */ + + ut_spdk_blob_opts_init(&opts); + opts.num_clusters = 10; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + + CU_ASSERT(!spdk_blob_is_read_only(blob)); + CU_ASSERT(!spdk_blob_is_snapshot(blob)); + CU_ASSERT(!spdk_blob_is_clone(blob)); + CU_ASSERT(!spdk_blob_is_thin_provisioned(blob)); + + /* blob should not have underlying snapshot nor clones */ + CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, blobid, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 0); + + + /* 2. Create snapshot */ + + spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + snapshotid = g_blobid; + + spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot = g_blob; + + CU_ASSERT(spdk_blob_is_read_only(snapshot)); + CU_ASSERT(spdk_blob_is_snapshot(snapshot)); + CU_ASSERT(!spdk_blob_is_clone(snapshot)); + CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); + + /* Check if original blob is converted to the clone of snapshot */ + CU_ASSERT(!spdk_blob_is_read_only(blob)); + CU_ASSERT(!spdk_blob_is_snapshot(blob)); + CU_ASSERT(spdk_blob_is_clone(blob)); + CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); + CU_ASSERT(blob->parent_id == snapshotid); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); + + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == blobid); + + + /* 3. Create clone from snapshot */ + + spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + cloneid = g_blobid; + + spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + clone = g_blob; + + CU_ASSERT(!spdk_blob_is_read_only(clone)); + CU_ASSERT(!spdk_blob_is_snapshot(clone)); + CU_ASSERT(spdk_blob_is_clone(clone)); + CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); + CU_ASSERT(clone->parent_id == snapshotid); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid); + + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, cloneid, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 0); + + /* Check if clone is on the snapshot's list */ + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(ids[0] == blobid || ids[1] == blobid); + CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); + + + /* 4. Create snapshot of the clone */ + + spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + snapshotid2 = g_blobid; + + spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot2 = g_blob; + + CU_ASSERT(spdk_blob_is_read_only(snapshot2)); + CU_ASSERT(spdk_blob_is_snapshot(snapshot2)); + CU_ASSERT(spdk_blob_is_clone(snapshot2)); + CU_ASSERT(snapshot2->parent_id == snapshotid); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); + + /* Check if clone is converted to the clone of snapshot2 and snapshot2 + * is a child of snapshot */ + CU_ASSERT(!spdk_blob_is_read_only(clone)); + CU_ASSERT(!spdk_blob_is_snapshot(clone)); + CU_ASSERT(spdk_blob_is_clone(clone)); + CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); + CU_ASSERT(clone->parent_id == snapshotid2); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); + + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == cloneid); + + + /* 5. Try to create clone from read only blob */ + + /* Mark blob as read only */ + spdk_blob_set_read_only(blob); + spdk_blob_sync_md(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Check if previously created blob is read only clone */ + CU_ASSERT(spdk_blob_is_read_only(blob)); + CU_ASSERT(!spdk_blob_is_snapshot(blob)); + CU_ASSERT(spdk_blob_is_clone(blob)); + CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); + + /* Create clone from read only blob */ + spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + cloneid2 = g_blobid; + + spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + clone2 = g_blob; + + CU_ASSERT(!spdk_blob_is_read_only(clone2)); + CU_ASSERT(!spdk_blob_is_snapshot(clone2)); + CU_ASSERT(spdk_blob_is_clone(clone2)); + CU_ASSERT(spdk_blob_is_thin_provisioned(clone2)); + + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); + + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, blobid, ids, &count); + CU_ASSERT(rc == 0); + + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == cloneid2); + + /* Close blobs */ + + spdk_blob_close(clone2, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_blob_close(clone, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_blob_close(snapshot, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_blob_close(snapshot2, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Try to delete snapshot with more than 1 clone */ + spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno != 0); + + ut_bs_reload(&bs, &bs_opts); + + /* NULL ids array should return number of clones in count */ + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count); + CU_ASSERT(rc == -ENOMEM); + CU_ASSERT(count == 2); + + /* incorrect array size */ + count = 1; + rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); + CU_ASSERT(rc == -ENOMEM); + CU_ASSERT(count == 2); + + + /* Verify structure of loaded blob store */ + + /* snapshot */ + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); + + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 2); + CU_ASSERT(ids[0] == blobid || ids[1] == blobid); + CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2); + + /* blob */ + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, blobid, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == cloneid2); + + /* clone */ + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, cloneid, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 0); + + /* snapshot2 */ + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == cloneid); + + /* clone2 */ + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 0); + + /* Try to delete blob that user should not be able to remove */ + + spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno != 0); + + /* Remove all blobs */ + + spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + g_bs = NULL; +} + +/** + * Snapshot-clones relation test 2 + * + * snapshot1 + * | + * snapshot2 + * | + * +-----+-----+ + * | | + * blob(ro) snapshot3 + * | | + * | snapshot4 + * | | | + * clone2 clone clone3 + */ +static void +blob_relations2(void) +{ + struct spdk_blob_store *bs; + struct spdk_bs_dev *dev; + struct spdk_bs_opts bs_opts; + struct spdk_blob_opts opts; + struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2; + spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2, + cloneid3; + int rc; + size_t count; + spdk_blob_id ids[10] = {}; + + dev = init_dev(); + spdk_bs_opts_init(&bs_opts); + snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); + + spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + /* 1. Create blob with 10 clusters */ + + ut_spdk_blob_opts_init(&opts); + opts.num_clusters = 10; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + + /* 2. Create snapshot1 */ + + spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + snapshotid1 = g_blobid; + + spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot1 = g_blob; + + CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID); + + CU_ASSERT(blob->parent_id == snapshotid1); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); + + /* Check if blob is the clone of snapshot1 */ + CU_ASSERT(blob->parent_id == snapshotid1); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); + + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == blobid); + + /* 3. Create another snapshot */ + + spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + snapshotid2 = g_blobid; + + spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot2 = g_blob; + + CU_ASSERT(spdk_blob_is_clone(snapshot2)); + CU_ASSERT(snapshot2->parent_id == snapshotid1); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1); + + /* Check if snapshot2 is the clone of snapshot1 and blob + * is a child of snapshot2 */ + CU_ASSERT(blob->parent_id == snapshotid2); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); + + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == blobid); + + /* 4. Create clone from snapshot */ + + spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + cloneid = g_blobid; + + spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + clone = g_blob; + + CU_ASSERT(clone->parent_id == snapshotid2); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); + + /* Check if clone is on the snapshot's list */ + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 2); + CU_ASSERT(ids[0] == blobid || ids[1] == blobid); + CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); + + /* 5. Create snapshot of the clone */ + + spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + snapshotid3 = g_blobid; + + spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot3 = g_blob; + + CU_ASSERT(snapshot3->parent_id == snapshotid2); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); + + /* Check if clone is converted to the clone of snapshot3 and snapshot3 + * is a child of snapshot2 */ + CU_ASSERT(clone->parent_id == snapshotid3); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); + + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == cloneid); + + /* 6. Create another snapshot of the clone */ + + spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + snapshotid4 = g_blobid; + + spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot4 = g_blob; + + CU_ASSERT(snapshot4->parent_id == snapshotid3); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3); + + /* Check if clone is converted to the clone of snapshot4 and snapshot4 + * is a child of snapshot3 */ + CU_ASSERT(clone->parent_id == snapshotid4); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4); + + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == cloneid); + + /* 7. Remove snapshot 4 */ + + ut_blob_close_and_delete(bs, snapshot4); + + /* Check if relations are back to state from before creating snapshot 4 */ + CU_ASSERT(clone->parent_id == snapshotid3); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); + + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == cloneid); + + /* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */ + + spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + cloneid3 = g_blobid; + + spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno != 0); + + /* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */ + + spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot3 = g_blob; + + spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno != 0); + + spdk_blob_close(snapshot3, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* 10. Remove snapshot 1 */ + + ut_blob_close_and_delete(bs, snapshot1); + + /* Check if relations are back to state from before creating snapshot 4 (before step 6) */ + CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); + + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 2); + CU_ASSERT(ids[0] == blobid || ids[1] == blobid); + CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); + + /* 11. Try to create clone from read only blob */ + + /* Mark blob as read only */ + spdk_blob_set_read_only(blob); + spdk_blob_sync_md(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Create clone from read only blob */ + spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + cloneid2 = g_blobid; + + spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + clone2 = g_blob; + + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); + + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, blobid, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == cloneid2); + + /* Close blobs */ + + spdk_blob_close(clone2, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_blob_close(clone, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_blob_close(snapshot2, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_blob_close(snapshot3, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + ut_bs_reload(&bs, &bs_opts); + + /* Verify structure of loaded blob store */ + + /* snapshot2 */ + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); + + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 2); + CU_ASSERT(ids[0] == blobid || ids[1] == blobid); + CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); + + /* blob */ + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, blobid, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == cloneid2); + + /* clone */ + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, cloneid, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 0); + + /* snapshot3 */ + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == cloneid); + + /* clone2 */ + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 0); + + /* Try to delete all blobs in the worse possible order */ + + spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno != 0); + + spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno != 0); + + spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + g_bs = NULL; +} + +static void +blobstore_clean_power_failure(void) +{ + struct spdk_blob_store *bs; + struct spdk_blob *blob; + struct spdk_power_failure_thresholds thresholds = {}; + bool clean = false; + struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; + struct spdk_bs_super_block super_copy = {}; + + thresholds.general_threshold = 1; + while (!clean) { + /* Create bs and blob */ + suite_blob_setup(); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + bs = g_bs; + blob = g_blob; + + /* Super block should not change for rest of the UT, + * save it and compare later. */ + memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block)); + SPDK_CU_ASSERT_FATAL(super->clean == 0); + SPDK_CU_ASSERT_FATAL(bs->clean == 0); + + /* Force bs/super block in a clean state. + * Along with marking blob dirty, to cause blob persist. */ + blob->state = SPDK_BLOB_STATE_DIRTY; + bs->clean = 1; + super->clean = 1; + super->crc = blob_md_page_calc_crc(super); + + g_bserrno = -1; + dev_set_power_failure_thresholds(thresholds); + spdk_blob_sync_md(blob, blob_op_complete, NULL); + poll_threads(); + dev_reset_power_failure_event(); + + if (g_bserrno == 0) { + /* After successful md sync, both bs and super block + * should be marked as not clean. */ + SPDK_CU_ASSERT_FATAL(bs->clean == 0); + SPDK_CU_ASSERT_FATAL(super->clean == 0); + clean = true; + } + + /* Depending on the point of failure, super block was either updated or not. */ + super_copy.clean = super->clean; + super_copy.crc = blob_md_page_calc_crc(&super_copy); + /* Compare that the values in super block remained unchanged. */ + SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block))); + + /* Delete blob and unload bs */ + suite_blob_cleanup(); + + thresholds.general_threshold++; + } +} + +static void +blob_delete_snapshot_power_failure(void) +{ + struct spdk_bs_dev *dev; + struct spdk_blob_store *bs; + struct spdk_blob_opts opts; + struct spdk_blob *blob, *snapshot; + struct spdk_power_failure_thresholds thresholds = {}; + spdk_blob_id blobid, snapshotid; + const void *value; + size_t value_len; + size_t count; + spdk_blob_id ids[3] = {}; + int rc; + bool deleted = false; + int delete_snapshot_bserrno = -1; + + thresholds.general_threshold = 1; + while (!deleted) { + dev = init_dev(); + + spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + /* Create blob */ + ut_spdk_blob_opts_init(&opts); + opts.num_clusters = 10; + + spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobid = g_blobid; + + /* Create snapshot */ + spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + snapshotid = g_blobid; + SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); + SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); + + dev_set_power_failure_thresholds(thresholds); + + spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); + poll_threads(); + delete_snapshot_bserrno = g_bserrno; + + /* Do not shut down cleanly. Assumption is that after snapshot deletion + * reports success, changes to both blobs should already persisted. */ + dev_reset_power_failure_event(); + ut_bs_dirty_load(&bs, NULL); + + SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); + SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob = g_blob; + SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); + + spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); + poll_threads(); + + if (g_bserrno == 0) { + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot = g_blob; + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == blobid); + rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); + CU_ASSERT(rc != 0); + SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); + + spdk_blob_close(snapshot, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + } else { + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); + /* Snapshot might have been left in unrecoverable state, so it does not open. + * Yet delete might perform further changes to the clone after that. + * This UT should test until snapshot is deleted and delete call succeeds. */ + if (delete_snapshot_bserrno == 0) { + deleted = true; + } + } + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + thresholds.general_threshold++; + } +} + +static void +blob_create_snapshot_power_failure(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_bs_dev *dev; + struct spdk_blob_opts opts; + struct spdk_blob *blob, *snapshot; + struct spdk_power_failure_thresholds thresholds = {}; + spdk_blob_id blobid, snapshotid; + const void *value; + size_t value_len; + size_t count; + spdk_blob_id ids[3] = {}; + int rc; + bool created = false; + int create_snapshot_bserrno = -1; + + thresholds.general_threshold = 1; + while (!created) { + dev = init_dev(); + + spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + /* Create blob */ + ut_spdk_blob_opts_init(&opts); + opts.num_clusters = 10; + + spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobid = g_blobid; + SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); + SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); + + dev_set_power_failure_thresholds(thresholds); + + /* Create snapshot */ + spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + create_snapshot_bserrno = g_bserrno; + snapshotid = g_blobid; + SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); + SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); + + /* Do not shut down cleanly. Assumption is that after create snapshot + * reports success, both blobs should be power-fail safe. */ + dev_reset_power_failure_event(); + ut_bs_dirty_load(&bs, NULL); + + SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); + SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob = g_blob; + + if (snapshotid != SPDK_BLOBID_INVALID) { + spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); + poll_threads(); + } + + if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) { + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot = g_blob; + SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); + SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); + count = SPDK_COUNTOF(ids); + rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); + CU_ASSERT(rc == 0); + CU_ASSERT(count == 1); + CU_ASSERT(ids[0] == blobid); + rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len); + CU_ASSERT(rc != 0); + + spdk_blob_close(snapshot, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + if (create_snapshot_bserrno == 0) { + created = true; + } + } else { + CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); + SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false); + } + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + thresholds.general_threshold++; + } +} + +static void +test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) +{ + uint8_t payload_ff[64 * 512]; + uint8_t payload_aa[64 * 512]; + uint8_t payload_00[64 * 512]; + uint8_t *cluster0, *cluster1; + + memset(payload_ff, 0xFF, sizeof(payload_ff)); + memset(payload_aa, 0xAA, sizeof(payload_aa)); + memset(payload_00, 0x00, sizeof(payload_00)); + + /* Try to perform I/O with io unit = 512 */ + spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* If thin provisioned is set cluster should be allocated now */ + SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); + cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; + + /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. + * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ + /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ + CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); + + /* Verify write with offset on first page */ + spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ + CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); + + /* Verify write with offset on first page */ + spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL); + poll_threads(); + + /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ + CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); + CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); + + /* Verify write with offset on second page */ + spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL); + poll_threads(); + + /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ + CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); + CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); + + /* Verify write across multiple pages */ + spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL); + poll_threads(); + + /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ + CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); + CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); + + /* Verify write across multiple clusters */ + spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL); + poll_threads(); + + SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); + cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; + + /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] + * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ + CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); + CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); + + CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); + CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); + + /* Verify write to second cluster */ + spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL); + poll_threads(); + + SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); + cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; + + /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] + * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ + CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); + CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); + + CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); + CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); + CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); + CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); +} + +static void +test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) +{ + uint8_t payload_read[64 * 512]; + uint8_t payload_ff[64 * 512]; + uint8_t payload_aa[64 * 512]; + uint8_t payload_00[64 * 512]; + + memset(payload_ff, 0xFF, sizeof(payload_ff)); + memset(payload_aa, 0xAA, sizeof(payload_aa)); + memset(payload_00, 0x00, sizeof(payload_00)); + + /* Read only first io unit */ + /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] + * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] + * payload_read: F000 0000 | 0000 0000 ... */ + memset(payload_read, 0x00, sizeof(payload_read)); + spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); + + /* Read four io_units starting from offset = 2 + * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] + * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] + * payload_read: F0AA 0000 | 0000 0000 ... */ + + memset(payload_read, 0x00, sizeof(payload_read)); + spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); + CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); + CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); + + /* Read eight io_units across multiple pages + * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] + * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] + * payload_read: AAAA AAAA | 0000 0000 ... */ + memset(payload_read, 0x00, sizeof(payload_read)); + spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); + CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); + + /* Read eight io_units across multiple clusters + * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] + * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] + * payload_read: FFFF FFFF | 0000 0000 ... */ + memset(payload_read, 0x00, sizeof(payload_read)); + spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); + CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); + + /* Read four io_units from second cluster + * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] + * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] + * payload_read: 00FF 0000 | 0000 0000 ... */ + memset(payload_read, 0x00, sizeof(payload_read)); + spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); + CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); + CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); + + /* Read second cluster + * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] + * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] + * payload_read: FFFF 0000 | 0000 FF00 ... */ + memset(payload_read, 0x00, sizeof(payload_read)); + spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); + CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); + CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); + CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); + + /* Read whole two clusters + * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] + * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ + memset(payload_read, 0x00, sizeof(payload_read)); + spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); + CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); + + CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); + CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); + CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); + CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); +} + + +static void +test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) +{ + uint8_t payload_ff[64 * 512]; + uint8_t payload_aa[64 * 512]; + uint8_t payload_00[64 * 512]; + uint8_t *cluster0, *cluster1; + + memset(payload_ff, 0xFF, sizeof(payload_ff)); + memset(payload_aa, 0xAA, sizeof(payload_aa)); + memset(payload_00, 0x00, sizeof(payload_00)); + + cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; + cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; + + /* Unmap */ + spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL); + poll_threads(); + + CU_ASSERT(g_bserrno == 0); + + CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); + CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); +} + +static void +test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) +{ + uint8_t payload_ff[64 * 512]; + uint8_t payload_aa[64 * 512]; + uint8_t payload_00[64 * 512]; + uint8_t *cluster0, *cluster1; + + memset(payload_ff, 0xFF, sizeof(payload_ff)); + memset(payload_aa, 0xAA, sizeof(payload_aa)); + memset(payload_00, 0x00, sizeof(payload_00)); + + cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; + cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; + + /* Write zeroes */ + spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL); + poll_threads(); + + CU_ASSERT(g_bserrno == 0); + + CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); + CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); +} + + +static void +test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) +{ + uint8_t payload_ff[64 * 512]; + uint8_t payload_aa[64 * 512]; + uint8_t payload_00[64 * 512]; + uint8_t *cluster0, *cluster1; + struct iovec iov[4]; + + memset(payload_ff, 0xFF, sizeof(payload_ff)); + memset(payload_aa, 0xAA, sizeof(payload_aa)); + memset(payload_00, 0x00, sizeof(payload_00)); + + /* Try to perform I/O with io unit = 512 */ + iov[0].iov_base = payload_ff; + iov[0].iov_len = 1 * 512; + spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* If thin provisioned is set cluster should be allocated now */ + SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); + cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; + + /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. + * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ + /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ + CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); + + /* Verify write with offset on first page */ + iov[0].iov_base = payload_ff; + iov[0].iov_len = 1 * 512; + spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ + CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); + + /* Verify write with offset on first page */ + iov[0].iov_base = payload_ff; + iov[0].iov_len = 4 * 512; + spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL); + poll_threads(); + + /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ + CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); + CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); + + /* Verify write with offset on second page */ + iov[0].iov_base = payload_ff; + iov[0].iov_len = 4 * 512; + spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL); + poll_threads(); + + /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ + CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); + CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); + + /* Verify write across multiple pages */ + iov[0].iov_base = payload_aa; + iov[0].iov_len = 8 * 512; + spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL); + poll_threads(); + + /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ + CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); + CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); + + /* Verify write across multiple clusters */ + + iov[0].iov_base = payload_ff; + iov[0].iov_len = 8 * 512; + spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL); + poll_threads(); + + SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); + cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; + + /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] + * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ + CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); + CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0); + CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); + + CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); + CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); + + /* Verify write to second cluster */ + + iov[0].iov_base = payload_ff; + iov[0].iov_len = 2 * 512; + spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL); + poll_threads(); + + SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); + cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; + + /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] + * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ + CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); + CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); + + CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); + CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); + CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); + CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); +} + +static void +test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) +{ + uint8_t payload_read[64 * 512]; + uint8_t payload_ff[64 * 512]; + uint8_t payload_aa[64 * 512]; + uint8_t payload_00[64 * 512]; + struct iovec iov[4]; + + memset(payload_ff, 0xFF, sizeof(payload_ff)); + memset(payload_aa, 0xAA, sizeof(payload_aa)); + memset(payload_00, 0x00, sizeof(payload_00)); + + /* Read only first io unit */ + /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] + * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] + * payload_read: F000 0000 | 0000 0000 ... */ + memset(payload_read, 0x00, sizeof(payload_read)); + iov[0].iov_base = payload_read; + iov[0].iov_len = 1 * 512; + spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); + poll_threads(); + + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); + + /* Read four io_units starting from offset = 2 + * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] + * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] + * payload_read: F0AA 0000 | 0000 0000 ... */ + + memset(payload_read, 0x00, sizeof(payload_read)); + iov[0].iov_base = payload_read; + iov[0].iov_len = 4 * 512; + spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); + CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); + CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); + + /* Read eight io_units across multiple pages + * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] + * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] + * payload_read: AAAA AAAA | 0000 0000 ... */ + memset(payload_read, 0x00, sizeof(payload_read)); + iov[0].iov_base = payload_read; + iov[0].iov_len = 4 * 512; + iov[1].iov_base = payload_read + 4 * 512; + iov[1].iov_len = 4 * 512; + spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); + CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); + + /* Read eight io_units across multiple clusters + * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] + * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] + * payload_read: FFFF FFFF | 0000 0000 ... */ + memset(payload_read, 0x00, sizeof(payload_read)); + iov[0].iov_base = payload_read; + iov[0].iov_len = 2 * 512; + iov[1].iov_base = payload_read + 2 * 512; + iov[1].iov_len = 2 * 512; + iov[2].iov_base = payload_read + 4 * 512; + iov[2].iov_len = 2 * 512; + iov[3].iov_base = payload_read + 6 * 512; + iov[3].iov_len = 2 * 512; + spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); + CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); + + /* Read four io_units from second cluster + * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] + * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] + * payload_read: 00FF 0000 | 0000 0000 ... */ + memset(payload_read, 0x00, sizeof(payload_read)); + iov[0].iov_base = payload_read; + iov[0].iov_len = 1 * 512; + iov[1].iov_base = payload_read + 1 * 512; + iov[1].iov_len = 3 * 512; + spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); + CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); + CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); + + /* Read second cluster + * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] + * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] + * payload_read: FFFF 0000 | 0000 FF00 ... */ + memset(payload_read, 0x00, sizeof(payload_read)); + iov[0].iov_base = payload_read; + iov[0].iov_len = 1 * 512; + iov[1].iov_base = payload_read + 1 * 512; + iov[1].iov_len = 2 * 512; + iov[2].iov_base = payload_read + 3 * 512; + iov[2].iov_len = 4 * 512; + iov[3].iov_base = payload_read + 7 * 512; + iov[3].iov_len = 25 * 512; + spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); + CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); + CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); + CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); + + /* Read whole two clusters + * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] + * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ + memset(payload_read, 0x00, sizeof(payload_read)); + iov[0].iov_base = payload_read; + iov[0].iov_len = 1 * 512; + iov[1].iov_base = payload_read + 1 * 512; + iov[1].iov_len = 8 * 512; + iov[2].iov_base = payload_read + 9 * 512; + iov[2].iov_len = 16 * 512; + iov[3].iov_base = payload_read + 25 * 512; + iov[3].iov_len = 39 * 512; + spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); + CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); + CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); + CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); + + CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); + CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); + CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); + CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); +} + +static void +blob_io_unit(void) +{ + struct spdk_bs_opts bsopts; + struct spdk_blob_opts opts; + struct spdk_blob_store *bs; + struct spdk_bs_dev *dev; + struct spdk_blob *blob, *snapshot, *clone; + spdk_blob_id blobid; + struct spdk_io_channel *channel; + + /* Create dev with 512 bytes io unit size */ + + spdk_bs_opts_init(&bsopts); + bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ + snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); + + /* Try to initialize a new blob store with unsupported io_unit */ + dev = init_dev(); + dev->blocklen = 512; + dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; + + /* Initialize a new blob store */ + spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); + channel = spdk_bs_alloc_io_channel(bs); + + /* Create thick provisioned blob */ + ut_spdk_blob_opts_init(&opts); + opts.thin_provision = false; + opts.num_clusters = 32; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + + test_io_write(dev, blob, channel); + test_io_read(dev, blob, channel); + test_io_zeroes(dev, blob, channel); + + test_iov_write(dev, blob, channel); + test_iov_read(dev, blob, channel); + + test_io_unmap(dev, blob, channel); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + blob = NULL; + g_blob = NULL; + + /* Create thin provisioned blob */ + + ut_spdk_blob_opts_init(&opts); + opts.thin_provision = true; + opts.num_clusters = 32; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + + test_io_write(dev, blob, channel); + test_io_read(dev, blob, channel); + + test_io_zeroes(dev, blob, channel); + + test_iov_write(dev, blob, channel); + test_iov_read(dev, blob, channel); + + /* Create snapshot */ + + spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobid = g_blobid; + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blob != NULL); + snapshot = g_blob; + + spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobid = g_blobid; + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blob != NULL); + clone = g_blob; + + test_io_read(dev, blob, channel); + test_io_read(dev, snapshot, channel); + test_io_read(dev, clone, channel); + + test_iov_read(dev, blob, channel); + test_iov_read(dev, snapshot, channel); + test_iov_read(dev, clone, channel); + + /* Inflate clone */ + + spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); + poll_threads(); + + CU_ASSERT(g_bserrno == 0); + + test_io_read(dev, clone, channel); + + test_io_unmap(dev, clone, channel); + + test_iov_write(dev, clone, channel); + test_iov_read(dev, clone, channel); + + spdk_blob_close(blob, blob_op_complete, NULL); + spdk_blob_close(snapshot, blob_op_complete, NULL); + spdk_blob_close(clone, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + blob = NULL; + g_blob = NULL; + + spdk_bs_free_io_channel(channel); + poll_threads(); + + /* Unload the blob store */ + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; + g_blob = NULL; + g_blobid = 0; +} + +static void +blob_io_unit_compatiblity(void) +{ + struct spdk_bs_opts bsopts; + struct spdk_blob_store *bs; + struct spdk_bs_dev *dev; + struct spdk_bs_super_block *super; + + /* Create dev with 512 bytes io unit size */ + + spdk_bs_opts_init(&bsopts); + bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ + snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); + + /* Try to initialize a new blob store with unsupported io_unit */ + dev = init_dev(); + dev->blocklen = 512; + dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; + + /* Initialize a new blob store */ + spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); + + /* Unload the blob store */ + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + /* Modify super block to behave like older version. + * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */ + super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; + super->io_unit_size = 0; + super->crc = blob_md_page_calc_crc(super); + + dev = init_dev(); + dev->blocklen = 512; + dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; + + spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_bs != NULL); + bs = g_bs; + + CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE); + + /* Unload the blob store */ + spdk_bs_unload(bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + g_bs = NULL; + g_blob = NULL; + g_blobid = 0; +} + +static void +blob_simultaneous_operations(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob_opts opts; + struct spdk_blob *blob, *snapshot; + spdk_blob_id blobid, snapshotid; + struct spdk_io_channel *channel; + + channel = spdk_bs_alloc_io_channel(bs); + SPDK_CU_ASSERT_FATAL(channel != NULL); + + ut_spdk_blob_opts_init(&opts); + opts.num_clusters = 10; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + + /* Create snapshot and try to remove blob in the same time: + * - snapshot should be created successfully + * - delete operation should fail w -EBUSY */ + CU_ASSERT(blob->locked_operation_in_progress == false); + spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); + CU_ASSERT(blob->locked_operation_in_progress == true); + spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); + CU_ASSERT(blob->locked_operation_in_progress == true); + /* Deletion failure */ + CU_ASSERT(g_bserrno == -EBUSY); + poll_threads(); + CU_ASSERT(blob->locked_operation_in_progress == false); + /* Snapshot creation success */ + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + + snapshotid = g_blobid; + + spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + snapshot = g_blob; + + /* Inflate blob and try to remove blob in the same time: + * - blob should be inflated successfully + * - delete operation should fail w -EBUSY */ + CU_ASSERT(blob->locked_operation_in_progress == false); + spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); + CU_ASSERT(blob->locked_operation_in_progress == true); + spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); + CU_ASSERT(blob->locked_operation_in_progress == true); + /* Deletion failure */ + CU_ASSERT(g_bserrno == -EBUSY); + poll_threads(); + CU_ASSERT(blob->locked_operation_in_progress == false); + /* Inflation success */ + CU_ASSERT(g_bserrno == 0); + + /* Clone snapshot and try to remove snapshot in the same time: + * - snapshot should be cloned successfully + * - delete operation should fail w -EBUSY */ + CU_ASSERT(blob->locked_operation_in_progress == false); + spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); + spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); + /* Deletion failure */ + CU_ASSERT(g_bserrno == -EBUSY); + poll_threads(); + CU_ASSERT(blob->locked_operation_in_progress == false); + /* Clone created */ + CU_ASSERT(g_bserrno == 0); + + /* Resize blob and try to remove blob in the same time: + * - blob should be resized successfully + * - delete operation should fail w -EBUSY */ + CU_ASSERT(blob->locked_operation_in_progress == false); + spdk_blob_resize(blob, 50, blob_op_complete, NULL); + CU_ASSERT(blob->locked_operation_in_progress == true); + spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); + CU_ASSERT(blob->locked_operation_in_progress == true); + /* Deletion failure */ + CU_ASSERT(g_bserrno == -EBUSY); + poll_threads(); + CU_ASSERT(blob->locked_operation_in_progress == false); + /* Blob resized successfully */ + CU_ASSERT(g_bserrno == 0); + + /* Issue two consecutive blob syncs, neither should fail. + * Force sync to actually occur by marking blob dirty each time. + * Execution of sync should not be enough to complete the operation, + * since disk I/O is required to complete it. */ + g_bserrno = -1; + + blob->state = SPDK_BLOB_STATE_DIRTY; + spdk_blob_sync_md(blob, blob_op_complete, NULL); + SPDK_CU_ASSERT_FATAL(g_bserrno == -1); + + blob->state = SPDK_BLOB_STATE_DIRTY; + spdk_blob_sync_md(blob, blob_op_complete, NULL); + SPDK_CU_ASSERT_FATAL(g_bserrno == -1); + + uint32_t completions = 0; + while (completions < 2) { + SPDK_CU_ASSERT_FATAL(poll_thread_times(0, 1)); + if (g_bserrno == 0) { + g_bserrno = -1; + completions++; + } + /* Never should the g_bserrno be other than -1. + * It would mean that either of syncs failed. */ + SPDK_CU_ASSERT_FATAL(g_bserrno == -1); + } + + spdk_bs_free_io_channel(channel); + poll_threads(); + + ut_blob_close_and_delete(bs, snapshot); + ut_blob_close_and_delete(bs, blob); +} + +static void +blob_persist_test(void) +{ + struct spdk_blob_store *bs = g_bs; + struct spdk_blob_opts opts; + struct spdk_blob *blob; + spdk_blob_id blobid; + struct spdk_io_channel *channel; + char *xattr; + size_t xattr_length; + int rc; + uint32_t page_count_clear, page_count_xattr; + uint64_t poller_iterations; + bool run_poller; + + channel = spdk_bs_alloc_io_channel(bs); + SPDK_CU_ASSERT_FATAL(channel != NULL); + + ut_spdk_blob_opts_init(&opts); + opts.num_clusters = 10; + + blob = ut_blob_create_and_open(bs, &opts); + blobid = spdk_blob_get_id(blob); + + /* Save the amount of md pages used after creation of a blob. + * This should be consistent after removing xattr. */ + page_count_clear = spdk_bit_array_count_set(bs->used_md_pages); + SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); + SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); + + /* Add xattr with maximum length of descriptor to exceed single metadata page. */ + xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - + strlen("large_xattr"); + xattr = calloc(xattr_length, sizeof(char)); + SPDK_CU_ASSERT_FATAL(xattr != NULL); + + rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); + SPDK_CU_ASSERT_FATAL(rc == 0); + spdk_blob_sync_md(blob, blob_op_complete, NULL); + poll_threads(); + SPDK_CU_ASSERT_FATAL(g_bserrno == 0); + + /* Save the amount of md pages used after adding the large xattr */ + page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages); + SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); + SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); + + /* Add xattr to a blob and sync it. While sync is occuring, remove the xattr and sync again. + * Interrupt the first sync after increasing number of poller iterations, until it succeeds. + * Expectation is that after second sync completes no xattr is saved in metadata. */ + poller_iterations = 1; + run_poller = true; + while (run_poller) { + rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); + SPDK_CU_ASSERT_FATAL(rc == 0); + g_bserrno = -1; + spdk_blob_sync_md(blob, blob_op_complete, NULL); + poll_thread_times(0, poller_iterations); + if (g_bserrno == 0) { + /* Poller iteration count was high enough for first sync to complete. + * Verify that blob takes up enough of md_pages to store the xattr. */ + SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); + SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); + SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr); + run_poller = false; + } + rc = spdk_blob_remove_xattr(blob, "large_xattr"); + SPDK_CU_ASSERT_FATAL(rc == 0); + spdk_blob_sync_md(blob, blob_op_complete, NULL); + poll_threads(); + SPDK_CU_ASSERT_FATAL(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); + SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); + SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear); + + /* Reload bs and re-open blob to verify that xattr was not persisted. */ + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + + ut_bs_reload(&bs, NULL); + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + SPDK_CU_ASSERT_FATAL(g_blob != NULL); + blob = g_blob; + + rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length); + SPDK_CU_ASSERT_FATAL(rc == -ENOENT); + + poller_iterations++; + /* Stop at high iteration count to prevent infinite loop. + * This value should be enough for first md sync to complete in any case. */ + SPDK_CU_ASSERT_FATAL(poller_iterations < 50); + } + + free(xattr); + + ut_blob_close_and_delete(bs, blob); + + spdk_bs_free_io_channel(channel); + poll_threads(); +} + +static void +suite_bs_setup(void) +{ + struct spdk_bs_dev *dev; + + dev = init_dev(); + memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); + spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_bs != NULL); +} + +static void +suite_bs_cleanup(void) +{ + spdk_bs_unload(g_bs, bs_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bs = NULL; + memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); +} + +static struct spdk_blob * +ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts) +{ + struct spdk_blob *blob; + struct spdk_blob_opts create_blob_opts; + spdk_blob_id blobid; + + if (blob_opts == NULL) { + ut_spdk_blob_opts_init(&create_blob_opts); + blob_opts = &create_blob_opts; + } + + spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); + blobid = g_blobid; + g_blobid = -1; + + spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + CU_ASSERT(g_blob != NULL); + blob = g_blob; + + g_blob = NULL; + g_bserrno = -1; + + return blob; +} + +static void +ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob) +{ + spdk_blob_id blobid = spdk_blob_get_id(blob); + + spdk_blob_close(blob, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_blob = NULL; + + spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); + poll_threads(); + CU_ASSERT(g_bserrno == 0); + g_bserrno = -1; +} + +static void +suite_blob_setup(void) +{ + suite_bs_setup(); + CU_ASSERT(g_bs != NULL); + + g_blob = ut_blob_create_and_open(g_bs, NULL); + CU_ASSERT(g_blob != NULL); +} + +static void +suite_blob_cleanup(void) +{ + ut_blob_close_and_delete(g_bs, g_blob); + CU_ASSERT(g_blob == NULL); + + suite_bs_cleanup(); + CU_ASSERT(g_bs == NULL); +} + +int main(int argc, char **argv) +{ + CU_pSuite suite, suite_bs, suite_blob; + unsigned int num_failures; + + CU_set_error_action(CUEA_ABORT); + CU_initialize_registry(); + + suite = CU_add_suite("blob", NULL, NULL); + suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL, + suite_bs_setup, suite_bs_cleanup); + suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL, + suite_blob_setup, suite_blob_cleanup); + + CU_ADD_TEST(suite, blob_init); + CU_ADD_TEST(suite_bs, blob_open); + CU_ADD_TEST(suite_bs, blob_create); + CU_ADD_TEST(suite_bs, blob_create_fail); + CU_ADD_TEST(suite_bs, blob_create_internal); + CU_ADD_TEST(suite, blob_thin_provision); + CU_ADD_TEST(suite_bs, blob_snapshot); + CU_ADD_TEST(suite_bs, blob_clone); + CU_ADD_TEST(suite_bs, blob_inflate); + CU_ADD_TEST(suite_bs, blob_delete); + CU_ADD_TEST(suite_bs, blob_resize_test); + CU_ADD_TEST(suite, blob_read_only); + CU_ADD_TEST(suite_bs, channel_ops); + CU_ADD_TEST(suite_bs, blob_super); + CU_ADD_TEST(suite_blob, blob_write); + CU_ADD_TEST(suite_blob, blob_read); + CU_ADD_TEST(suite_blob, blob_rw_verify); + CU_ADD_TEST(suite_bs, blob_rw_verify_iov); + CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem); + CU_ADD_TEST(suite_blob, blob_rw_iov_read_only); + CU_ADD_TEST(suite_bs, blob_unmap); + CU_ADD_TEST(suite_bs, blob_iter); + CU_ADD_TEST(suite_blob, blob_xattr); + CU_ADD_TEST(suite, bs_load); + CU_ADD_TEST(suite_bs, bs_load_pending_removal); + CU_ADD_TEST(suite, bs_load_custom_cluster_size); + CU_ADD_TEST(suite_bs, bs_unload); + CU_ADD_TEST(suite, bs_cluster_sz); + CU_ADD_TEST(suite_bs, bs_usable_clusters); + CU_ADD_TEST(suite, bs_resize_md); + CU_ADD_TEST(suite, bs_destroy); + CU_ADD_TEST(suite, bs_type); + CU_ADD_TEST(suite, bs_super_block); + CU_ADD_TEST(suite, blob_serialize_test); + CU_ADD_TEST(suite_bs, blob_crc); + CU_ADD_TEST(suite, super_block_crc); + CU_ADD_TEST(suite_blob, blob_dirty_shutdown); + CU_ADD_TEST(suite_bs, blob_flags); + CU_ADD_TEST(suite_bs, bs_version); + CU_ADD_TEST(suite_bs, blob_set_xattrs_test); + CU_ADD_TEST(suite_bs, blob_thin_prov_alloc); + CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test); + CU_ADD_TEST(suite_bs, blob_thin_prov_rw); + CU_ADD_TEST(suite_bs, blob_thin_prov_rle); + CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov); + CU_ADD_TEST(suite, bs_load_iter_test); + CU_ADD_TEST(suite_bs, blob_snapshot_rw); + CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov); + CU_ADD_TEST(suite, blob_relations); + CU_ADD_TEST(suite, blob_relations2); + CU_ADD_TEST(suite, blobstore_clean_power_failure); + CU_ADD_TEST(suite, blob_delete_snapshot_power_failure); + CU_ADD_TEST(suite, blob_create_snapshot_power_failure); + CU_ADD_TEST(suite_bs, blob_inflate_rw); + CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io); + CU_ADD_TEST(suite_bs, blob_operation_split_rw); + CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov); + CU_ADD_TEST(suite, blob_io_unit); + CU_ADD_TEST(suite, blob_io_unit_compatiblity); + CU_ADD_TEST(suite_bs, blob_simultaneous_operations); + CU_ADD_TEST(suite_bs, blob_persist_test); + + allocate_threads(2); + set_thread(0); + + g_dev_buffer = calloc(1, DEV_BUFFER_SIZE); + + CU_basic_set_mode(CU_BRM_VERBOSE); + g_use_extent_table = false; + CU_basic_run_tests(); + num_failures = CU_get_number_of_failures(); + g_use_extent_table = true; + CU_basic_run_tests(); + num_failures += CU_get_number_of_failures(); + CU_cleanup_registry(); + + free(g_dev_buffer); + + free_threads(); + + return num_failures; +} diff --git a/src/spdk/test/unit/lib/blob/bs_dev_common.c b/src/spdk/test/unit/lib/blob/bs_dev_common.c new file mode 100644 index 000000000..4e94fef8b --- /dev/null +++ b/src/spdk/test/unit/lib/blob/bs_dev_common.c @@ -0,0 +1,395 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk/thread.h" +#include "bs_scheduler.c" + + +#define DEV_BUFFER_SIZE (64 * 1024 * 1024) +#define DEV_BUFFER_BLOCKLEN (4096) +#define DEV_BUFFER_BLOCKCNT (DEV_BUFFER_SIZE / DEV_BUFFER_BLOCKLEN) +uint8_t *g_dev_buffer; +uint64_t g_dev_write_bytes; +uint64_t g_dev_read_bytes; + +struct spdk_power_failure_counters { + uint64_t general_counter; + uint64_t read_counter; + uint64_t write_counter; + uint64_t unmap_counter; + uint64_t write_zero_counter; + uint64_t flush_counter; +}; + +static struct spdk_power_failure_counters g_power_failure_counters = {}; + +struct spdk_power_failure_thresholds { + uint64_t general_threshold; + uint64_t read_threshold; + uint64_t write_threshold; + uint64_t unmap_threshold; + uint64_t write_zero_threshold; + uint64_t flush_threshold; +}; + +static struct spdk_power_failure_thresholds g_power_failure_thresholds = {}; + +static uint64_t g_power_failure_rc; + +void dev_reset_power_failure_event(void); +void dev_reset_power_failure_counters(void); +void dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds); + +void +dev_reset_power_failure_event(void) +{ + memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters)); + memset(&g_power_failure_thresholds, 0, sizeof(g_power_failure_thresholds)); + g_power_failure_rc = 0; +} + +void +dev_reset_power_failure_counters(void) +{ + memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters)); + g_power_failure_rc = 0; +} + +/** + * Set power failure event. Power failure will occur after given number + * of IO operations. It may occure after number of particular operations + * (read, write, unmap, write zero or flush) or after given number of + * any IO operations (general_treshold). Value 0 means that the treshold + * is disabled. Any other value is the number of operation starting from + * which power failure event will happen. + */ +void +dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds) +{ + g_power_failure_thresholds = thresholds; +} + +/* Define here for UT only. */ +struct spdk_io_channel g_io_channel; + +static struct spdk_io_channel * +dev_create_channel(struct spdk_bs_dev *dev) +{ + return &g_io_channel; +} + +static void +dev_destroy_channel(struct spdk_bs_dev *dev, struct spdk_io_channel *channel) +{ +} + +static void +dev_destroy(struct spdk_bs_dev *dev) +{ + free(dev); +} + + +static void +dev_complete_cb(void *arg) +{ + struct spdk_bs_dev_cb_args *cb_args = arg; + + cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, g_power_failure_rc); +} + +static void +dev_complete(void *arg) +{ + _bs_send_msg(dev_complete_cb, arg, NULL); +} + +static void +dev_read(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload, + uint64_t lba, uint32_t lba_count, + struct spdk_bs_dev_cb_args *cb_args) +{ + uint64_t offset, length; + + if (g_power_failure_thresholds.read_threshold != 0) { + g_power_failure_counters.read_counter++; + } + + if (g_power_failure_thresholds.general_threshold != 0) { + g_power_failure_counters.general_counter++; + } + + if ((g_power_failure_thresholds.read_threshold == 0 || + g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) && + (g_power_failure_thresholds.general_threshold == 0 || + g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { + offset = lba * dev->blocklen; + length = lba_count * dev->blocklen; + SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); + + memcpy(payload, &g_dev_buffer[offset], length); + g_dev_read_bytes += length; + } else { + g_power_failure_rc = -EIO; + } + + spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); +} + +static void +dev_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload, + uint64_t lba, uint32_t lba_count, + struct spdk_bs_dev_cb_args *cb_args) +{ + uint64_t offset, length; + + if (g_power_failure_thresholds.write_threshold != 0) { + g_power_failure_counters.write_counter++; + } + + if (g_power_failure_thresholds.general_threshold != 0) { + g_power_failure_counters.general_counter++; + } + + if ((g_power_failure_thresholds.write_threshold == 0 || + g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) && + (g_power_failure_thresholds.general_threshold == 0 || + g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { + offset = lba * dev->blocklen; + length = lba_count * dev->blocklen; + SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); + + memcpy(&g_dev_buffer[offset], payload, length); + g_dev_write_bytes += length; + } else { + g_power_failure_rc = -EIO; + } + + spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); +} + +static void +__check_iov(struct iovec *iov, int iovcnt, uint64_t length) +{ + int i; + + for (i = 0; i < iovcnt; i++) { + length -= iov[i].iov_len; + } + + CU_ASSERT(length == 0); +} + +static void +dev_readv(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, + struct iovec *iov, int iovcnt, + uint64_t lba, uint32_t lba_count, + struct spdk_bs_dev_cb_args *cb_args) +{ + uint64_t offset, length; + int i; + + if (g_power_failure_thresholds.read_threshold != 0) { + g_power_failure_counters.read_counter++; + } + + if (g_power_failure_thresholds.general_threshold != 0) { + g_power_failure_counters.general_counter++; + } + + if ((g_power_failure_thresholds.read_threshold == 0 || + g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) && + (g_power_failure_thresholds.general_threshold == 0 || + g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { + offset = lba * dev->blocklen; + length = lba_count * dev->blocklen; + SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); + __check_iov(iov, iovcnt, length); + + for (i = 0; i < iovcnt; i++) { + memcpy(iov[i].iov_base, &g_dev_buffer[offset], iov[i].iov_len); + offset += iov[i].iov_len; + } + + g_dev_read_bytes += length; + } else { + g_power_failure_rc = -EIO; + } + + spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); +} + +static void +dev_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, + struct iovec *iov, int iovcnt, + uint64_t lba, uint32_t lba_count, + struct spdk_bs_dev_cb_args *cb_args) +{ + uint64_t offset, length; + int i; + + if (g_power_failure_thresholds.write_threshold != 0) { + g_power_failure_counters.write_counter++; + } + + if (g_power_failure_thresholds.general_threshold != 0) { + g_power_failure_counters.general_counter++; + } + + if ((g_power_failure_thresholds.write_threshold == 0 || + g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) && + (g_power_failure_thresholds.general_threshold == 0 || + g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { + offset = lba * dev->blocklen; + length = lba_count * dev->blocklen; + SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); + __check_iov(iov, iovcnt, length); + + for (i = 0; i < iovcnt; i++) { + memcpy(&g_dev_buffer[offset], iov[i].iov_base, iov[i].iov_len); + offset += iov[i].iov_len; + } + + g_dev_write_bytes += length; + } else { + g_power_failure_rc = -EIO; + } + + spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); +} + +static void +dev_flush(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, + struct spdk_bs_dev_cb_args *cb_args) +{ + if (g_power_failure_thresholds.flush_threshold != 0) { + g_power_failure_counters.flush_counter++; + } + + if (g_power_failure_thresholds.general_threshold != 0) { + g_power_failure_counters.general_counter++; + } + + if ((g_power_failure_thresholds.flush_threshold != 0 && + g_power_failure_counters.flush_counter >= g_power_failure_thresholds.flush_threshold) || + (g_power_failure_thresholds.general_threshold != 0 && + g_power_failure_counters.general_counter >= g_power_failure_thresholds.general_threshold)) { + g_power_failure_rc = -EIO; + } + + spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); +} + +static void +dev_unmap(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, + uint64_t lba, uint32_t lba_count, + struct spdk_bs_dev_cb_args *cb_args) +{ + uint64_t offset, length; + + if (g_power_failure_thresholds.unmap_threshold != 0) { + g_power_failure_counters.unmap_counter++; + } + + if (g_power_failure_thresholds.general_threshold != 0) { + g_power_failure_counters.general_counter++; + } + + if ((g_power_failure_thresholds.unmap_threshold == 0 || + g_power_failure_counters.unmap_counter < g_power_failure_thresholds.unmap_threshold) && + (g_power_failure_thresholds.general_threshold == 0 || + g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { + offset = lba * dev->blocklen; + length = lba_count * dev->blocklen; + SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); + memset(&g_dev_buffer[offset], 0, length); + } else { + g_power_failure_rc = -EIO; + } + + spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); +} + +static void +dev_write_zeroes(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, + uint64_t lba, uint32_t lba_count, + struct spdk_bs_dev_cb_args *cb_args) +{ + uint64_t offset, length; + + if (g_power_failure_thresholds.write_zero_threshold != 0) { + g_power_failure_counters.write_zero_counter++; + } + + if (g_power_failure_thresholds.general_threshold != 0) { + g_power_failure_counters.general_counter++; + } + + if ((g_power_failure_thresholds.write_zero_threshold == 0 || + g_power_failure_counters.write_zero_counter < g_power_failure_thresholds.write_zero_threshold) && + (g_power_failure_thresholds.general_threshold == 0 || + g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) { + offset = lba * dev->blocklen; + length = lba_count * dev->blocklen; + SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE); + memset(&g_dev_buffer[offset], 0, length); + g_dev_write_bytes += length; + } else { + g_power_failure_rc = -EIO; + } + + spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args); +} + +static struct spdk_bs_dev * +init_dev(void) +{ + struct spdk_bs_dev *dev = calloc(1, sizeof(*dev)); + + SPDK_CU_ASSERT_FATAL(dev != NULL); + + dev->create_channel = dev_create_channel; + dev->destroy_channel = dev_destroy_channel; + dev->destroy = dev_destroy; + dev->read = dev_read; + dev->write = dev_write; + dev->readv = dev_readv; + dev->writev = dev_writev; + dev->flush = dev_flush; + dev->unmap = dev_unmap; + dev->write_zeroes = dev_write_zeroes; + dev->blockcnt = DEV_BUFFER_BLOCKCNT; + dev->blocklen = DEV_BUFFER_BLOCKLEN; + + return dev; +} diff --git a/src/spdk/test/unit/lib/blob/bs_scheduler.c b/src/spdk/test/unit/lib/blob/bs_scheduler.c new file mode 100644 index 000000000..4b58fa007 --- /dev/null +++ b/src/spdk/test/unit/lib/blob/bs_scheduler.c @@ -0,0 +1,87 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +bool g_scheduler_delay = false; + +struct scheduled_ops { + spdk_msg_fn fn; + void *ctx; + + TAILQ_ENTRY(scheduled_ops) ops_queue; +}; + +static TAILQ_HEAD(, scheduled_ops) g_scheduled_ops = TAILQ_HEAD_INITIALIZER(g_scheduled_ops); + +void _bs_flush_scheduler(uint32_t); + +static void +_bs_send_msg(spdk_msg_fn fn, void *ctx, void *thread_ctx) +{ + if (g_scheduler_delay) { + struct scheduled_ops *ops = calloc(1, sizeof(*ops)); + + SPDK_CU_ASSERT_FATAL(ops != NULL); + ops->fn = fn; + ops->ctx = ctx; + TAILQ_INSERT_TAIL(&g_scheduled_ops, ops, ops_queue); + + } else { + fn(ctx); + } +} + +static void +_bs_flush_scheduler_single(void) +{ + struct scheduled_ops *op; + TAILQ_HEAD(, scheduled_ops) ops; + TAILQ_INIT(&ops); + + TAILQ_SWAP(&g_scheduled_ops, &ops, scheduled_ops, ops_queue); + + while (!TAILQ_EMPTY(&ops)) { + op = TAILQ_FIRST(&ops); + TAILQ_REMOVE(&ops, op, ops_queue); + + op->fn(op->ctx); + free(op); + } +} + +void +_bs_flush_scheduler(uint32_t n) +{ + while (n--) { + _bs_flush_scheduler_single(); + } +} |