summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/tests
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/tests')
-rw-r--r--fs/btrfs/tests/btrfs-tests.c304
-rw-r--r--fs/btrfs/tests/btrfs-tests.h57
-rw-r--r--fs/btrfs/tests/extent-buffer-tests.c223
-rw-r--r--fs/btrfs/tests/extent-io-tests.c812
-rw-r--r--fs/btrfs/tests/extent-map-tests.c1047
-rw-r--r--fs/btrfs/tests/free-space-tests.c1063
-rw-r--r--fs/btrfs/tests/free-space-tree-tests.c589
-rw-r--r--fs/btrfs/tests/inode-tests.c1108
-rw-r--r--fs/btrfs/tests/qgroup-tests.c559
9 files changed, 5762 insertions, 0 deletions
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
new file mode 100644
index 0000000000..ca09cf9afc
--- /dev/null
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 Fusion IO. All rights reserved.
+ */
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
+#include <linux/magic.h>
+#include "btrfs-tests.h"
+#include "../ctree.h"
+#include "../free-space-cache.h"
+#include "../free-space-tree.h"
+#include "../transaction.h"
+#include "../volumes.h"
+#include "../disk-io.h"
+#include "../qgroup.h"
+#include "../block-group.h"
+#include "../fs.h"
+
+static struct vfsmount *test_mnt = NULL;
+
+const char *test_error[] = {
+ [TEST_ALLOC_FS_INFO] = "cannot allocate fs_info",
+ [TEST_ALLOC_ROOT] = "cannot allocate root",
+ [TEST_ALLOC_EXTENT_BUFFER] = "cannot extent buffer",
+ [TEST_ALLOC_PATH] = "cannot allocate path",
+ [TEST_ALLOC_INODE] = "cannot allocate inode",
+ [TEST_ALLOC_BLOCK_GROUP] = "cannot allocate block group",
+ [TEST_ALLOC_EXTENT_MAP] = "cannot allocate extent map",
+};
+
+static const struct super_operations btrfs_test_super_ops = {
+ .alloc_inode = btrfs_alloc_inode,
+ .destroy_inode = btrfs_test_destroy_inode,
+};
+
+
+static int btrfs_test_init_fs_context(struct fs_context *fc)
+{
+ struct pseudo_fs_context *ctx = init_pseudo(fc, BTRFS_TEST_MAGIC);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->ops = &btrfs_test_super_ops;
+ return 0;
+}
+
+static struct file_system_type test_type = {
+ .name = "btrfs_test_fs",
+ .init_fs_context = btrfs_test_init_fs_context,
+ .kill_sb = kill_anon_super,
+};
+
+struct inode *btrfs_new_test_inode(void)
+{
+ struct inode *inode;
+
+ inode = new_inode(test_mnt->mnt_sb);
+ if (!inode)
+ return NULL;
+
+ inode->i_mode = S_IFREG;
+ inode->i_ino = BTRFS_FIRST_FREE_OBJECTID;
+ BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
+ BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
+ BTRFS_I(inode)->location.offset = 0;
+ inode_init_owner(&nop_mnt_idmap, inode, NULL, S_IFREG);
+
+ return inode;
+}
+
+static int btrfs_init_test_fs(void)
+{
+ int ret;
+
+ ret = register_filesystem(&test_type);
+ if (ret) {
+ printk(KERN_ERR "btrfs: cannot register test file system\n");
+ return ret;
+ }
+
+ test_mnt = kern_mount(&test_type);
+ if (IS_ERR(test_mnt)) {
+ printk(KERN_ERR "btrfs: cannot mount test file system\n");
+ unregister_filesystem(&test_type);
+ return PTR_ERR(test_mnt);
+ }
+ return 0;
+}
+
+static void btrfs_destroy_test_fs(void)
+{
+ kern_unmount(test_mnt);
+ unregister_filesystem(&test_type);
+}
+
+struct btrfs_device *btrfs_alloc_dummy_device(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_device *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ extent_io_tree_init(NULL, &dev->alloc_state, 0);
+ INIT_LIST_HEAD(&dev->dev_list);
+ list_add(&dev->dev_list, &fs_info->fs_devices->devices);
+
+ return dev;
+}
+
+static void btrfs_free_dummy_device(struct btrfs_device *dev)
+{
+ extent_io_tree_release(&dev->alloc_state);
+ kfree(dev);
+}
+
+struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
+{
+ struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info),
+ GFP_KERNEL);
+
+ if (!fs_info)
+ return fs_info;
+ fs_info->fs_devices = kzalloc(sizeof(struct btrfs_fs_devices),
+ GFP_KERNEL);
+ if (!fs_info->fs_devices) {
+ kfree(fs_info);
+ return NULL;
+ }
+ INIT_LIST_HEAD(&fs_info->fs_devices->devices);
+
+ fs_info->super_copy = kzalloc(sizeof(struct btrfs_super_block),
+ GFP_KERNEL);
+ if (!fs_info->super_copy) {
+ kfree(fs_info->fs_devices);
+ kfree(fs_info);
+ return NULL;
+ }
+
+ btrfs_init_fs_info(fs_info);
+
+ fs_info->nodesize = nodesize;
+ fs_info->sectorsize = sectorsize;
+ fs_info->sectorsize_bits = ilog2(sectorsize);
+ set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
+
+ test_mnt->mnt_sb->s_fs_info = fs_info;
+
+ return fs_info;
+}
+
+void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
+{
+ struct radix_tree_iter iter;
+ void **slot;
+ struct btrfs_device *dev, *tmp;
+
+ if (!fs_info)
+ return;
+
+ if (WARN_ON(!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO,
+ &fs_info->fs_state)))
+ return;
+
+ test_mnt->mnt_sb->s_fs_info = NULL;
+
+ spin_lock(&fs_info->buffer_lock);
+ radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) {
+ struct extent_buffer *eb;
+
+ eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock);
+ if (!eb)
+ continue;
+ /* Shouldn't happen but that kind of thinking creates CVE's */
+ if (radix_tree_exception(eb)) {
+ if (radix_tree_deref_retry(eb))
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
+ slot = radix_tree_iter_resume(slot, &iter);
+ spin_unlock(&fs_info->buffer_lock);
+ free_extent_buffer_stale(eb);
+ spin_lock(&fs_info->buffer_lock);
+ }
+ spin_unlock(&fs_info->buffer_lock);
+
+ btrfs_mapping_tree_free(&fs_info->mapping_tree);
+ list_for_each_entry_safe(dev, tmp, &fs_info->fs_devices->devices,
+ dev_list) {
+ btrfs_free_dummy_device(dev);
+ }
+ btrfs_free_qgroup_config(fs_info);
+ btrfs_free_fs_roots(fs_info);
+ kfree(fs_info->super_copy);
+ btrfs_check_leaked_roots(fs_info);
+ btrfs_extent_buffer_leak_debug_check(fs_info);
+ kfree(fs_info->fs_devices);
+ kfree(fs_info);
+}
+
+void btrfs_free_dummy_root(struct btrfs_root *root)
+{
+ if (IS_ERR_OR_NULL(root))
+ return;
+ /* Will be freed by btrfs_free_fs_roots */
+ if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state)))
+ return;
+ btrfs_global_root_delete(root);
+ btrfs_put_root(root);
+}
+
+struct btrfs_block_group *
+btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
+ unsigned long length)
+{
+ struct btrfs_block_group *cache;
+
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+ if (!cache)
+ return NULL;
+ cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
+ GFP_KERNEL);
+ if (!cache->free_space_ctl) {
+ kfree(cache);
+ return NULL;
+ }
+
+ cache->start = 0;
+ cache->length = length;
+ cache->full_stripe_len = fs_info->sectorsize;
+ cache->fs_info = fs_info;
+
+ INIT_LIST_HEAD(&cache->list);
+ INIT_LIST_HEAD(&cache->cluster_list);
+ INIT_LIST_HEAD(&cache->bg_list);
+ btrfs_init_free_space_ctl(cache, cache->free_space_ctl);
+ mutex_init(&cache->free_space_lock);
+
+ return cache;
+}
+
+void btrfs_free_dummy_block_group(struct btrfs_block_group *cache)
+{
+ if (!cache)
+ return;
+ btrfs_remove_free_space_cache(cache);
+ kfree(cache->free_space_ctl);
+ kfree(cache);
+}
+
+void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info)
+{
+ memset(trans, 0, sizeof(*trans));
+ trans->transid = 1;
+ trans->type = __TRANS_DUMMY;
+ trans->fs_info = fs_info;
+}
+
+int btrfs_run_sanity_tests(void)
+{
+ int ret, i;
+ u32 sectorsize, nodesize;
+ u32 test_sectorsize[] = {
+ PAGE_SIZE,
+ };
+ ret = btrfs_init_test_fs();
+ if (ret)
+ return ret;
+ for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) {
+ sectorsize = test_sectorsize[i];
+ for (nodesize = sectorsize;
+ nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE;
+ nodesize <<= 1) {
+ pr_info("BTRFS: selftest: sectorsize: %u nodesize: %u\n",
+ sectorsize, nodesize);
+ ret = btrfs_test_free_space_cache(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_extent_buffer_operations(sectorsize,
+ nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_extent_io(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_inodes(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_qgroups(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_free_space_tree(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ }
+ }
+ ret = btrfs_test_extent_map();
+
+out:
+ btrfs_destroy_test_fs();
+ return ret;
+}
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h
new file mode 100644
index 0000000000..7a2d7ffbe3
--- /dev/null
+++ b/fs/btrfs/tests/btrfs-tests.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013 Fusion IO. All rights reserved.
+ */
+
+#ifndef BTRFS_TESTS_H
+#define BTRFS_TESTS_H
+
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+int btrfs_run_sanity_tests(void);
+
+#define test_msg(fmt, ...) pr_info("BTRFS: selftest: " fmt "\n", ##__VA_ARGS__)
+#define test_err(fmt, ...) pr_err("BTRFS: selftest: %s:%d " fmt "\n", \
+ __FILE__, __LINE__, ##__VA_ARGS__)
+
+#define test_std_err(index) test_err("%s", test_error[index])
+
+enum {
+ TEST_ALLOC_FS_INFO,
+ TEST_ALLOC_ROOT,
+ TEST_ALLOC_EXTENT_BUFFER,
+ TEST_ALLOC_PATH,
+ TEST_ALLOC_INODE,
+ TEST_ALLOC_BLOCK_GROUP,
+ TEST_ALLOC_EXTENT_MAP,
+};
+
+extern const char *test_error[];
+
+struct btrfs_root;
+struct btrfs_trans_handle;
+
+int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize);
+int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize);
+int btrfs_test_extent_io(u32 sectorsize, u32 nodesize);
+int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
+int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
+int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
+int btrfs_test_extent_map(void);
+struct inode *btrfs_new_test_inode(void);
+struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize);
+void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info);
+void btrfs_free_dummy_root(struct btrfs_root *root);
+struct btrfs_block_group *
+btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long length);
+void btrfs_free_dummy_block_group(struct btrfs_block_group *cache);
+void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info);
+struct btrfs_device *btrfs_alloc_dummy_device(struct btrfs_fs_info *fs_info);
+#else
+static inline int btrfs_run_sanity_tests(void)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c
new file mode 100644
index 0000000000..6a43a64ba5
--- /dev/null
+++ b/fs/btrfs/tests/extent-buffer-tests.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 Fusion IO. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include "btrfs-tests.h"
+#include "../ctree.h"
+#include "../extent_io.h"
+#include "../disk-io.h"
+#include "../accessors.h"
+
+static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_path *path = NULL;
+ struct btrfs_root *root = NULL;
+ struct extent_buffer *eb;
+ char *value = "mary had a little lamb";
+ char *split1 = "mary had a little";
+ char *split2 = " lamb";
+ char *split3 = "mary";
+ char *split4 = " had a little";
+ char buf[32];
+ struct btrfs_key key;
+ u32 value_len = strlen(value);
+ int ret = 0;
+
+ test_msg("running btrfs_split_item tests");
+
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ return -ENOMEM;
+ }
+
+ root = btrfs_alloc_dummy_root(fs_info);
+ if (IS_ERR(root)) {
+ test_std_err(TEST_ALLOC_ROOT);
+ ret = PTR_ERR(root);
+ goto out;
+ }
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ test_std_err(TEST_ALLOC_PATH);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ eb = alloc_dummy_extent_buffer(fs_info, nodesize);
+ path->nodes[0] = eb;
+ if (!eb) {
+ test_std_err(TEST_ALLOC_EXTENT_BUFFER);
+ ret = -ENOMEM;
+ goto out;
+ }
+ path->slots[0] = 0;
+
+ key.objectid = 0;
+ key.type = BTRFS_EXTENT_CSUM_KEY;
+ key.offset = 0;
+
+ /*
+ * Passing a NULL trans handle is fine here, we have a dummy root eb
+ * and the tree is a single node (level 0).
+ */
+ btrfs_setup_item_for_insert(NULL, root, path, &key, value_len);
+ write_extent_buffer(eb, value, btrfs_item_ptr_offset(eb, 0),
+ value_len);
+
+ key.offset = 3;
+
+ /*
+ * Passing NULL trans here should be safe because we have plenty of
+ * space in this leaf to split the item without having to split the
+ * leaf.
+ */
+ ret = btrfs_split_item(NULL, root, path, &key, 17);
+ if (ret) {
+ test_err("split item failed %d", ret);
+ goto out;
+ }
+
+ /*
+ * Read the first slot, it should have the original key and contain only
+ * 'mary had a little'
+ */
+ btrfs_item_key_to_cpu(eb, &key, 0);
+ if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY ||
+ key.offset != 0) {
+ test_err("invalid key at slot 0");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (btrfs_item_size(eb, 0) != strlen(split1)) {
+ test_err("invalid len in the first split");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 0),
+ strlen(split1));
+ if (memcmp(buf, split1, strlen(split1))) {
+ test_err(
+"data in the buffer doesn't match what it should in the first split have='%.*s' want '%s'",
+ (int)strlen(split1), buf, split1);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ btrfs_item_key_to_cpu(eb, &key, 1);
+ if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY ||
+ key.offset != 3) {
+ test_err("invalid key at slot 1");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (btrfs_item_size(eb, 1) != strlen(split2)) {
+ test_err("invalid len in the second split");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 1),
+ strlen(split2));
+ if (memcmp(buf, split2, strlen(split2))) {
+ test_err(
+ "data in the buffer doesn't match what it should in the second split");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ key.offset = 1;
+ /* Do it again so we test memmoving the other items in the leaf */
+ ret = btrfs_split_item(NULL, root, path, &key, 4);
+ if (ret) {
+ test_err("second split item failed %d", ret);
+ goto out;
+ }
+
+ btrfs_item_key_to_cpu(eb, &key, 0);
+ if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY ||
+ key.offset != 0) {
+ test_err("invalid key at slot 0");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (btrfs_item_size(eb, 0) != strlen(split3)) {
+ test_err("invalid len in the first split");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 0),
+ strlen(split3));
+ if (memcmp(buf, split3, strlen(split3))) {
+ test_err(
+ "data in the buffer doesn't match what it should in the third split");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ btrfs_item_key_to_cpu(eb, &key, 1);
+ if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY ||
+ key.offset != 1) {
+ test_err("invalid key at slot 1");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (btrfs_item_size(eb, 1) != strlen(split4)) {
+ test_err("invalid len in the second split");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 1),
+ strlen(split4));
+ if (memcmp(buf, split4, strlen(split4))) {
+ test_err(
+ "data in the buffer doesn't match what it should in the fourth split");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ btrfs_item_key_to_cpu(eb, &key, 2);
+ if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY ||
+ key.offset != 3) {
+ test_err("invalid key at slot 2");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (btrfs_item_size(eb, 2) != strlen(split2)) {
+ test_err("invalid len in the second split");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 2),
+ strlen(split2));
+ if (memcmp(buf, split2, strlen(split2))) {
+ test_err(
+ "data in the buffer doesn't match what it should in the last chunk");
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ btrfs_free_path(path);
+ btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+}
+
+int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize)
+{
+ test_msg("running extent buffer operation tests");
+ return test_btrfs_split_item(sectorsize, nodesize);
+}
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
new file mode 100644
index 0000000000..1cc86af97d
--- /dev/null
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -0,0 +1,812 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 Fusion IO. All rights reserved.
+ */
+
+#include <linux/pagemap.h>
+#include <linux/pagevec.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/sizes.h>
+#include "btrfs-tests.h"
+#include "../ctree.h"
+#include "../extent_io.h"
+#include "../btrfs_inode.h"
+
+#define PROCESS_UNLOCK (1 << 0)
+#define PROCESS_RELEASE (1 << 1)
+#define PROCESS_TEST_LOCKED (1 << 2)
+
+static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
+ unsigned long flags)
+{
+ int ret;
+ struct folio_batch fbatch;
+ unsigned long index = start >> PAGE_SHIFT;
+ unsigned long end_index = end >> PAGE_SHIFT;
+ int i;
+ int count = 0;
+ int loops = 0;
+
+ folio_batch_init(&fbatch);
+
+ while (index <= end_index) {
+ ret = filemap_get_folios_contig(inode->i_mapping, &index,
+ end_index, &fbatch);
+ for (i = 0; i < ret; i++) {
+ struct folio *folio = fbatch.folios[i];
+
+ if (flags & PROCESS_TEST_LOCKED &&
+ !folio_test_locked(folio))
+ count++;
+ if (flags & PROCESS_UNLOCK && folio_test_locked(folio))
+ folio_unlock(folio);
+ if (flags & PROCESS_RELEASE)
+ folio_put(folio);
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
+ loops++;
+ if (loops > 100000) {
+ printk(KERN_ERR
+ "stuck in a loop, start %llu, end %llu, ret %d\n",
+ start, end, ret);
+ break;
+ }
+ }
+
+ return count;
+}
+
+#define STATE_FLAG_STR_LEN 256
+
+#define PRINT_ONE_FLAG(state, dest, cur, name) \
+({ \
+ if (state->state & EXTENT_##name) \
+ cur += scnprintf(dest + cur, STATE_FLAG_STR_LEN - cur, \
+ "%s" #name, cur == 0 ? "" : "|"); \
+})
+
+static void extent_flag_to_str(const struct extent_state *state, char *dest)
+{
+ int cur = 0;
+
+ dest[0] = 0;
+ PRINT_ONE_FLAG(state, dest, cur, DIRTY);
+ PRINT_ONE_FLAG(state, dest, cur, UPTODATE);
+ PRINT_ONE_FLAG(state, dest, cur, LOCKED);
+ PRINT_ONE_FLAG(state, dest, cur, NEW);
+ PRINT_ONE_FLAG(state, dest, cur, DELALLOC);
+ PRINT_ONE_FLAG(state, dest, cur, DEFRAG);
+ PRINT_ONE_FLAG(state, dest, cur, BOUNDARY);
+ PRINT_ONE_FLAG(state, dest, cur, NODATASUM);
+ PRINT_ONE_FLAG(state, dest, cur, CLEAR_META_RESV);
+ PRINT_ONE_FLAG(state, dest, cur, NEED_WAIT);
+ PRINT_ONE_FLAG(state, dest, cur, NORESERVE);
+ PRINT_ONE_FLAG(state, dest, cur, QGROUP_RESERVED);
+ PRINT_ONE_FLAG(state, dest, cur, CLEAR_DATA_RESV);
+}
+
+static void dump_extent_io_tree(const struct extent_io_tree *tree)
+{
+ struct rb_node *node;
+ char flags_str[STATE_FLAG_STR_LEN];
+
+ node = rb_first(&tree->state);
+ test_msg("io tree content:");
+ while (node) {
+ struct extent_state *state;
+
+ state = rb_entry(node, struct extent_state, rb_node);
+ extent_flag_to_str(state, flags_str);
+ test_msg(" start=%llu len=%llu flags=%s", state->start,
+ state->end + 1 - state->start, flags_str);
+ node = rb_next(node);
+ }
+}
+
+static int test_find_delalloc(u32 sectorsize)
+{
+ struct inode *inode;
+ struct extent_io_tree *tmp;
+ struct page *page;
+ struct page *locked_page = NULL;
+ unsigned long index = 0;
+ /* In this test we need at least 2 file extents at its maximum size */
+ u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
+ u64 total_dirty = 2 * max_bytes;
+ u64 start, end, test_start;
+ bool found;
+ int ret = -EINVAL;
+
+ test_msg("running find delalloc tests");
+
+ inode = btrfs_new_test_inode();
+ if (!inode) {
+ test_std_err(TEST_ALLOC_INODE);
+ return -ENOMEM;
+ }
+ tmp = &BTRFS_I(inode)->io_tree;
+
+ /*
+ * Passing NULL as we don't have fs_info but tracepoints are not used
+ * at this point
+ */
+ extent_io_tree_init(NULL, tmp, IO_TREE_SELFTEST);
+
+ /*
+ * First go through and create and mark all of our pages dirty, we pin
+ * everything to make sure our pages don't get evicted and screw up our
+ * test.
+ */
+ for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
+ page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
+ if (!page) {
+ test_err("failed to allocate test page");
+ ret = -ENOMEM;
+ goto out;
+ }
+ SetPageDirty(page);
+ if (index) {
+ unlock_page(page);
+ } else {
+ get_page(page);
+ locked_page = page;
+ }
+ }
+
+ /* Test this scenario
+ * |--- delalloc ---|
+ * |--- search ---|
+ */
+ set_extent_bit(tmp, 0, sectorsize - 1, EXTENT_DELALLOC, NULL);
+ start = 0;
+ end = start + PAGE_SIZE - 1;
+ found = find_lock_delalloc_range(inode, locked_page, &start,
+ &end);
+ if (!found) {
+ test_err("should have found at least one delalloc");
+ goto out_bits;
+ }
+ if (start != 0 || end != (sectorsize - 1)) {
+ test_err("expected start 0 end %u, got start %llu end %llu",
+ sectorsize - 1, start, end);
+ goto out_bits;
+ }
+ unlock_extent(tmp, start, end, NULL);
+ unlock_page(locked_page);
+ put_page(locked_page);
+
+ /*
+ * Test this scenario
+ *
+ * |--- delalloc ---|
+ * |--- search ---|
+ */
+ test_start = SZ_64M;
+ locked_page = find_lock_page(inode->i_mapping,
+ test_start >> PAGE_SHIFT);
+ if (!locked_page) {
+ test_err("couldn't find the locked page");
+ goto out_bits;
+ }
+ set_extent_bit(tmp, sectorsize, max_bytes - 1, EXTENT_DELALLOC, NULL);
+ start = test_start;
+ end = start + PAGE_SIZE - 1;
+ found = find_lock_delalloc_range(inode, locked_page, &start,
+ &end);
+ if (!found) {
+ test_err("couldn't find delalloc in our range");
+ goto out_bits;
+ }
+ if (start != test_start || end != max_bytes - 1) {
+ test_err("expected start %llu end %llu, got start %llu, end %llu",
+ test_start, max_bytes - 1, start, end);
+ goto out_bits;
+ }
+ if (process_page_range(inode, start, end,
+ PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
+ test_err("there were unlocked pages in the range");
+ goto out_bits;
+ }
+ unlock_extent(tmp, start, end, NULL);
+ /* locked_page was unlocked above */
+ put_page(locked_page);
+
+ /*
+ * Test this scenario
+ * |--- delalloc ---|
+ * |--- search ---|
+ */
+ test_start = max_bytes + sectorsize;
+ locked_page = find_lock_page(inode->i_mapping, test_start >>
+ PAGE_SHIFT);
+ if (!locked_page) {
+ test_err("couldn't find the locked page");
+ goto out_bits;
+ }
+ start = test_start;
+ end = start + PAGE_SIZE - 1;
+ found = find_lock_delalloc_range(inode, locked_page, &start,
+ &end);
+ if (found) {
+ test_err("found range when we shouldn't have");
+ goto out_bits;
+ }
+ if (end != test_start + PAGE_SIZE - 1) {
+ test_err("did not return the proper end offset");
+ goto out_bits;
+ }
+
+ /*
+ * Test this scenario
+ * [------- delalloc -------|
+ * [max_bytes]|-- search--|
+ *
+ * We are re-using our test_start from above since it works out well.
+ */
+ set_extent_bit(tmp, max_bytes, total_dirty - 1, EXTENT_DELALLOC, NULL);
+ start = test_start;
+ end = start + PAGE_SIZE - 1;
+ found = find_lock_delalloc_range(inode, locked_page, &start,
+ &end);
+ if (!found) {
+ test_err("didn't find our range");
+ goto out_bits;
+ }
+ if (start != test_start || end != total_dirty - 1) {
+ test_err("expected start %llu end %llu, got start %llu end %llu",
+ test_start, total_dirty - 1, start, end);
+ goto out_bits;
+ }
+ if (process_page_range(inode, start, end,
+ PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
+ test_err("pages in range were not all locked");
+ goto out_bits;
+ }
+ unlock_extent(tmp, start, end, NULL);
+
+ /*
+ * Now to test where we run into a page that is no longer dirty in the
+ * range we want to find.
+ */
+ page = find_get_page(inode->i_mapping,
+ (max_bytes + SZ_1M) >> PAGE_SHIFT);
+ if (!page) {
+ test_err("couldn't find our page");
+ goto out_bits;
+ }
+ ClearPageDirty(page);
+ put_page(page);
+
+ /* We unlocked it in the previous test */
+ lock_page(locked_page);
+ start = test_start;
+ end = start + PAGE_SIZE - 1;
+ /*
+ * Currently if we fail to find dirty pages in the delalloc range we
+ * will adjust max_bytes down to PAGE_SIZE and then re-search. If
+ * this changes at any point in the future we will need to fix this
+ * tests expected behavior.
+ */
+ found = find_lock_delalloc_range(inode, locked_page, &start,
+ &end);
+ if (!found) {
+ test_err("didn't find our range");
+ goto out_bits;
+ }
+ if (start != test_start && end != test_start + PAGE_SIZE - 1) {
+ test_err("expected start %llu end %llu, got start %llu end %llu",
+ test_start, test_start + PAGE_SIZE - 1, start, end);
+ goto out_bits;
+ }
+ if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED |
+ PROCESS_UNLOCK)) {
+ test_err("pages in range were not all locked");
+ goto out_bits;
+ }
+ ret = 0;
+out_bits:
+ if (ret)
+ dump_extent_io_tree(tmp);
+ clear_extent_bits(tmp, 0, total_dirty - 1, (unsigned)-1);
+out:
+ if (locked_page)
+ put_page(locked_page);
+ process_page_range(inode, 0, total_dirty - 1,
+ PROCESS_UNLOCK | PROCESS_RELEASE);
+ iput(inode);
+ return ret;
+}
+
+static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb)
+{
+ unsigned long i;
+
+ for (i = 0; i < eb->len * BITS_PER_BYTE; i++) {
+ int bit, bit1;
+
+ bit = !!test_bit(i, bitmap);
+ bit1 = !!extent_buffer_test_bit(eb, 0, i);
+ if (bit1 != bit) {
+ u8 has;
+ u8 expect;
+
+ read_extent_buffer(eb, &has, i / BITS_PER_BYTE, 1);
+ expect = bitmap_get_value8(bitmap, ALIGN(i, BITS_PER_BYTE));
+
+ test_err(
+ "bits do not match, start byte 0 bit %lu, byte %lu has 0x%02x expect 0x%02x",
+ i, i / BITS_PER_BYTE, has, expect);
+ return -EINVAL;
+ }
+
+ bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
+ i % BITS_PER_BYTE);
+ if (bit1 != bit) {
+ u8 has;
+ u8 expect;
+
+ read_extent_buffer(eb, &has, i / BITS_PER_BYTE, 1);
+ expect = bitmap_get_value8(bitmap, ALIGN(i, BITS_PER_BYTE));
+
+ test_err(
+ "bits do not match, start byte %lu bit %lu, byte %lu has 0x%02x expect 0x%02x",
+ i / BITS_PER_BYTE, i % BITS_PER_BYTE,
+ i / BITS_PER_BYTE, has, expect);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int test_bitmap_set(const char *name, unsigned long *bitmap,
+ struct extent_buffer *eb,
+ unsigned long byte_start, unsigned long bit_start,
+ unsigned long bit_len)
+{
+ int ret;
+
+ bitmap_set(bitmap, byte_start * BITS_PER_BYTE + bit_start, bit_len);
+ extent_buffer_bitmap_set(eb, byte_start, bit_start, bit_len);
+ ret = check_eb_bitmap(bitmap, eb);
+ if (ret < 0)
+ test_err("%s test failed", name);
+ return ret;
+}
+
+static int test_bitmap_clear(const char *name, unsigned long *bitmap,
+ struct extent_buffer *eb,
+ unsigned long byte_start, unsigned long bit_start,
+ unsigned long bit_len)
+{
+ int ret;
+
+ bitmap_clear(bitmap, byte_start * BITS_PER_BYTE + bit_start, bit_len);
+ extent_buffer_bitmap_clear(eb, byte_start, bit_start, bit_len);
+ ret = check_eb_bitmap(bitmap, eb);
+ if (ret < 0)
+ test_err("%s test failed", name);
+ return ret;
+}
+static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb)
+{
+ unsigned long i, j;
+ unsigned long byte_len = eb->len;
+ u32 x;
+ int ret;
+
+ ret = test_bitmap_clear("clear all run 1", bitmap, eb, 0, 0,
+ byte_len * BITS_PER_BYTE);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_set("set all", bitmap, eb, 0, 0, byte_len * BITS_PER_BYTE);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_clear("clear all run 2", bitmap, eb, 0, 0,
+ byte_len * BITS_PER_BYTE);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_set("same byte set", bitmap, eb, 0, 2, 4);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_clear("same byte partial clear", bitmap, eb, 0, 4, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_set("cross byte set", bitmap, eb, 2, 4, 8);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_set("cross multi byte set", bitmap, eb, 4, 4, 24);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_clear("cross byte clear", bitmap, eb, 2, 6, 4);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_clear("cross multi byte clear", bitmap, eb, 4, 6, 20);
+ if (ret < 0)
+ return ret;
+
+ /* Straddling pages test */
+ if (byte_len > PAGE_SIZE) {
+ ret = test_bitmap_set("cross page set", bitmap, eb,
+ PAGE_SIZE - sizeof(long) / 2, 0,
+ sizeof(long) * BITS_PER_BYTE);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_set("cross page set all", bitmap, eb, 0, 0,
+ byte_len * BITS_PER_BYTE);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_clear("cross page clear", bitmap, eb,
+ PAGE_SIZE - sizeof(long) / 2, 0,
+ sizeof(long) * BITS_PER_BYTE);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Generate a wonky pseudo-random bit pattern for the sake of not using
+ * something repetitive that could miss some hypothetical off-by-n bug.
+ */
+ x = 0;
+ ret = test_bitmap_clear("clear all run 3", bitmap, eb, 0, 0,
+ byte_len * BITS_PER_BYTE);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < byte_len * BITS_PER_BYTE / 32; i++) {
+ x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffU;
+ for (j = 0; j < 32; j++) {
+ if (x & (1U << j)) {
+ bitmap_set(bitmap, i * 32 + j, 1);
+ extent_buffer_bitmap_set(eb, 0, i * 32 + j, 1);
+ }
+ }
+ }
+
+ ret = check_eb_bitmap(bitmap, eb);
+ if (ret) {
+ test_err("random bit pattern failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_fs_info *fs_info;
+ unsigned long *bitmap = NULL;
+ struct extent_buffer *eb = NULL;
+ int ret;
+
+ test_msg("running extent buffer bitmap tests");
+
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ return -ENOMEM;
+ }
+
+ bitmap = kmalloc(nodesize, GFP_KERNEL);
+ if (!bitmap) {
+ test_err("couldn't allocate test bitmap");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ eb = __alloc_dummy_extent_buffer(fs_info, 0, nodesize);
+ if (!eb) {
+ test_std_err(TEST_ALLOC_ROOT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = __test_eb_bitmaps(bitmap, eb);
+ if (ret)
+ goto out;
+
+ free_extent_buffer(eb);
+
+ /*
+ * Test again for case where the tree block is sectorsize aligned but
+ * not nodesize aligned.
+ */
+ eb = __alloc_dummy_extent_buffer(fs_info, sectorsize, nodesize);
+ if (!eb) {
+ test_std_err(TEST_ALLOC_ROOT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = __test_eb_bitmaps(bitmap, eb);
+out:
+ free_extent_buffer(eb);
+ kfree(bitmap);
+ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+}
+
+static int test_find_first_clear_extent_bit(void)
+{
+ struct extent_io_tree tree;
+ u64 start, end;
+ int ret = -EINVAL;
+
+ test_msg("running find_first_clear_extent_bit test");
+
+ extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST);
+
+ /* Test correct handling of empty tree */
+ find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED);
+ if (start != 0 || end != -1) {
+ test_err(
+ "error getting a range from completely empty tree: start %llu end %llu",
+ start, end);
+ goto out;
+ }
+ /*
+ * Set 1M-4M alloc/discard and 32M-64M thus leaving a hole between
+ * 4M-32M
+ */
+ set_extent_bit(&tree, SZ_1M, SZ_4M - 1,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
+
+ find_first_clear_extent_bit(&tree, SZ_512K, &start, &end,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED);
+
+ if (start != 0 || end != SZ_1M - 1) {
+ test_err("error finding beginning range: start %llu end %llu",
+ start, end);
+ goto out;
+ }
+
+ /* Now add 32M-64M so that we have a hole between 4M-32M */
+ set_extent_bit(&tree, SZ_32M, SZ_64M - 1,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
+
+ /*
+ * Request first hole starting at 12M, we should get 4M-32M
+ */
+ find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED);
+
+ if (start != SZ_4M || end != SZ_32M - 1) {
+ test_err("error finding trimmed range: start %llu end %llu",
+ start, end);
+ goto out;
+ }
+
+ /*
+ * Search in the middle of allocated range, should get the next one
+ * available, which happens to be unallocated -> 4M-32M
+ */
+ find_first_clear_extent_bit(&tree, SZ_2M, &start, &end,
+ CHUNK_TRIMMED | CHUNK_ALLOCATED);
+
+ if (start != SZ_4M || end != SZ_32M - 1) {
+ test_err("error finding next unalloc range: start %llu end %llu",
+ start, end);
+ goto out;
+ }
+
+ /*
+ * Set 64M-72M with CHUNK_ALLOC flag, then search for CHUNK_TRIMMED flag
+ * being unset in this range, we should get the entry in range 64M-72M
+ */
+ set_extent_bit(&tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED, NULL);
+ find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end,
+ CHUNK_TRIMMED);
+
+ if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) {
+ test_err("error finding exact range: start %llu end %llu",
+ start, end);
+ goto out;
+ }
+
+ find_first_clear_extent_bit(&tree, SZ_64M - SZ_8M, &start, &end,
+ CHUNK_TRIMMED);
+
+ /*
+ * Search in the middle of set range whose immediate neighbour doesn't
+ * have the bits set so it must be returned
+ */
+ if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) {
+ test_err("error finding next alloc range: start %llu end %llu",
+ start, end);
+ goto out;
+ }
+
+ /*
+ * Search beyond any known range, shall return after last known range
+ * and end should be -1
+ */
+ find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED);
+ if (start != SZ_64M + SZ_8M || end != -1) {
+ test_err(
+ "error handling beyond end of range search: start %llu end %llu",
+ start, end);
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret)
+ dump_extent_io_tree(&tree);
+ clear_extent_bits(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED);
+
+ return ret;
+}
+
+static void dump_eb_and_memory_contents(struct extent_buffer *eb, void *memory,
+ const char *test_name)
+{
+ for (int i = 0; i < eb->len; i++) {
+ struct page *page = eb->pages[i >> PAGE_SHIFT];
+ void *addr = page_address(page) + offset_in_page(i);
+
+ if (memcmp(addr, memory + i, 1) != 0) {
+ test_err("%s failed", test_name);
+ test_err("eb and memory diffs at byte %u, eb has 0x%02x memory has 0x%02x",
+ i, *(u8 *)addr, *(u8 *)(memory + i));
+ return;
+ }
+ }
+}
+
+static int verify_eb_and_memory(struct extent_buffer *eb, void *memory,
+ const char *test_name)
+{
+ for (int i = 0; i < (eb->len >> PAGE_SHIFT); i++) {
+ void *eb_addr = page_address(eb->pages[i]);
+
+ if (memcmp(memory + (i << PAGE_SHIFT), eb_addr, PAGE_SIZE) != 0) {
+ dump_eb_and_memory_contents(eb, memory, test_name);
+ return -EUCLEAN;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Init both memory and extent buffer contents to the same randomly generated
+ * contents.
+ */
+static void init_eb_and_memory(struct extent_buffer *eb, void *memory)
+{
+ get_random_bytes(memory, eb->len);
+ write_extent_buffer(eb, memory, 0, eb->len);
+}
+
+static int test_eb_mem_ops(u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_fs_info *fs_info;
+ struct extent_buffer *eb = NULL;
+ void *memory = NULL;
+ int ret;
+
+ test_msg("running extent buffer memory operation tests");
+
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ return -ENOMEM;
+ }
+
+ memory = kvzalloc(nodesize, GFP_KERNEL);
+ if (!memory) {
+ test_err("failed to allocate memory");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ eb = __alloc_dummy_extent_buffer(fs_info, SZ_1M, nodesize);
+ if (!eb) {
+ test_std_err(TEST_ALLOC_EXTENT_BUFFER);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ init_eb_and_memory(eb, memory);
+ ret = verify_eb_and_memory(eb, memory, "full eb write");
+ if (ret < 0)
+ goto out;
+
+ memcpy(memory, memory + 16, 16);
+ memcpy_extent_buffer(eb, 0, 16, 16);
+ ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 1");
+ if (ret < 0)
+ goto out;
+
+ memcpy(memory, memory + 2048, 16);
+ memcpy_extent_buffer(eb, 0, 2048, 16);
+ ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 2");
+ if (ret < 0)
+ goto out;
+ memcpy(memory, memory + 2048, 2048);
+ memcpy_extent_buffer(eb, 0, 2048, 2048);
+ ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 3");
+ if (ret < 0)
+ goto out;
+
+ memmove(memory + 512, memory + 256, 512);
+ memmove_extent_buffer(eb, 512, 256, 512);
+ ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 1");
+ if (ret < 0)
+ goto out;
+
+ memmove(memory + 2048, memory + 512, 2048);
+ memmove_extent_buffer(eb, 2048, 512, 2048);
+ ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 2");
+ if (ret < 0)
+ goto out;
+ memmove(memory + 512, memory + 2048, 2048);
+ memmove_extent_buffer(eb, 512, 2048, 2048);
+ ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 3");
+ if (ret < 0)
+ goto out;
+
+ if (nodesize > PAGE_SIZE) {
+ memcpy(memory, memory + 4096 - 128, 256);
+ memcpy_extent_buffer(eb, 0, 4096 - 128, 256);
+ ret = verify_eb_and_memory(eb, memory, "cross page non-overlapping memcpy 1");
+ if (ret < 0)
+ goto out;
+
+ memcpy(memory + 4096 - 128, memory + 4096 + 128, 256);
+ memcpy_extent_buffer(eb, 4096 - 128, 4096 + 128, 256);
+ ret = verify_eb_and_memory(eb, memory, "cross page non-overlapping memcpy 2");
+ if (ret < 0)
+ goto out;
+
+ memmove(memory + 4096 - 128, memory + 4096 - 64, 256);
+ memmove_extent_buffer(eb, 4096 - 128, 4096 - 64, 256);
+ ret = verify_eb_and_memory(eb, memory, "cross page overlapping memcpy 1");
+ if (ret < 0)
+ goto out;
+
+ memmove(memory + 4096 - 64, memory + 4096 - 128, 256);
+ memmove_extent_buffer(eb, 4096 - 64, 4096 - 128, 256);
+ ret = verify_eb_and_memory(eb, memory, "cross page overlapping memcpy 2");
+ if (ret < 0)
+ goto out;
+ }
+out:
+ free_extent_buffer(eb);
+ kvfree(memory);
+ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+}
+
+int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
+{
+ int ret;
+
+ test_msg("running extent I/O tests");
+
+ ret = test_find_delalloc(sectorsize);
+ if (ret)
+ goto out;
+
+ ret = test_find_first_clear_extent_bit();
+ if (ret)
+ goto out;
+
+ ret = test_eb_bitmaps(sectorsize, nodesize);
+ if (ret)
+ goto out;
+
+ ret = test_eb_mem_ops(sectorsize, nodesize);
+out:
+ return ret;
+}
diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
new file mode 100644
index 0000000000..29bdd08b24
--- /dev/null
+++ b/fs/btrfs/tests/extent-map-tests.c
@@ -0,0 +1,1047 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Oracle. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include "btrfs-tests.h"
+#include "../ctree.h"
+#include "../btrfs_inode.h"
+#include "../volumes.h"
+#include "../disk-io.h"
+#include "../block-group.h"
+
+static void free_extent_map_tree(struct extent_map_tree *em_tree)
+{
+ struct extent_map *em;
+ struct rb_node *node;
+
+ write_lock(&em_tree->lock);
+ while (!RB_EMPTY_ROOT(&em_tree->map.rb_root)) {
+ node = rb_first_cached(&em_tree->map);
+ em = rb_entry(node, struct extent_map, rb_node);
+ remove_extent_mapping(em_tree, em);
+
+#ifdef CONFIG_BTRFS_DEBUG
+ if (refcount_read(&em->refs) != 1) {
+ test_err(
+"em leak: em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx) refs %d",
+ em->start, em->len, em->block_start,
+ em->block_len, refcount_read(&em->refs));
+
+ refcount_set(&em->refs, 1);
+ }
+#endif
+ free_extent_map(em);
+ }
+ write_unlock(&em_tree->lock);
+}
+
+/*
+ * Test scenario:
+ *
+ * Suppose that no extent map has been loaded into memory yet, there is a file
+ * extent [0, 16K), followed by another file extent [16K, 20K), two dio reads
+ * are entering btrfs_get_extent() concurrently, t1 is reading [8K, 16K), t2 is
+ * reading [0, 8K)
+ *
+ * t1 t2
+ * btrfs_get_extent() btrfs_get_extent()
+ * -> lookup_extent_mapping() ->lookup_extent_mapping()
+ * -> add_extent_mapping(0, 16K)
+ * -> return em
+ * ->add_extent_mapping(0, 16K)
+ * -> #handle -EEXIST
+ */
+static int test_case_1(struct btrfs_fs_info *fs_info,
+ struct extent_map_tree *em_tree)
+{
+ struct extent_map *em;
+ u64 start = 0;
+ u64 len = SZ_8K;
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ return -ENOMEM;
+ }
+
+ /* Add [0, 16K) */
+ em->start = 0;
+ em->len = SZ_16K;
+ em->block_start = 0;
+ em->block_len = SZ_16K;
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em, 0);
+ write_unlock(&em_tree->lock);
+ if (ret < 0) {
+ test_err("cannot add extent range [0, 16K)");
+ goto out;
+ }
+ free_extent_map(em);
+
+ /* Add [16K, 20K) following [0, 16K) */
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ em->start = SZ_16K;
+ em->len = SZ_4K;
+ em->block_start = SZ_32K; /* avoid merging */
+ em->block_len = SZ_4K;
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em, 0);
+ write_unlock(&em_tree->lock);
+ if (ret < 0) {
+ test_err("cannot add extent range [16K, 20K)");
+ goto out;
+ }
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Add [0, 8K), should return [0, 16K) instead. */
+ em->start = start;
+ em->len = len;
+ em->block_start = start;
+ em->block_len = len;
+ write_lock(&em_tree->lock);
+ ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
+ write_unlock(&em_tree->lock);
+ if (ret) {
+ test_err("case1 [%llu %llu]: ret %d", start, start + len, ret);
+ goto out;
+ }
+ if (em &&
+ (em->start != 0 || extent_map_end(em) != SZ_16K ||
+ em->block_start != 0 || em->block_len != SZ_16K)) {
+ test_err(
+"case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu",
+ start, start + len, ret, em->start, em->len,
+ em->block_start, em->block_len);
+ ret = -EINVAL;
+ }
+ free_extent_map(em);
+out:
+ free_extent_map_tree(em_tree);
+
+ return ret;
+}
+
+/*
+ * Test scenario:
+ *
+ * Reading the inline ending up with EEXIST, ie. read an inline
+ * extent and discard page cache and read it again.
+ */
+static int test_case_2(struct btrfs_fs_info *fs_info,
+ struct extent_map_tree *em_tree)
+{
+ struct extent_map *em;
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ return -ENOMEM;
+ }
+
+ /* Add [0, 1K) */
+ em->start = 0;
+ em->len = SZ_1K;
+ em->block_start = EXTENT_MAP_INLINE;
+ em->block_len = (u64)-1;
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em, 0);
+ write_unlock(&em_tree->lock);
+ if (ret < 0) {
+ test_err("cannot add extent range [0, 1K)");
+ goto out;
+ }
+ free_extent_map(em);
+
+ /* Add [4K, 8K) following [0, 1K) */
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ em->start = SZ_4K;
+ em->len = SZ_4K;
+ em->block_start = SZ_4K;
+ em->block_len = SZ_4K;
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em, 0);
+ write_unlock(&em_tree->lock);
+ if (ret < 0) {
+ test_err("cannot add extent range [4K, 8K)");
+ goto out;
+ }
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Add [0, 1K) */
+ em->start = 0;
+ em->len = SZ_1K;
+ em->block_start = EXTENT_MAP_INLINE;
+ em->block_len = (u64)-1;
+ write_lock(&em_tree->lock);
+ ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
+ write_unlock(&em_tree->lock);
+ if (ret) {
+ test_err("case2 [0 1K]: ret %d", ret);
+ goto out;
+ }
+ if (em &&
+ (em->start != 0 || extent_map_end(em) != SZ_1K ||
+ em->block_start != EXTENT_MAP_INLINE || em->block_len != (u64)-1)) {
+ test_err(
+"case2 [0 1K]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu",
+ ret, em->start, em->len, em->block_start,
+ em->block_len);
+ ret = -EINVAL;
+ }
+ free_extent_map(em);
+out:
+ free_extent_map_tree(em_tree);
+
+ return ret;
+}
+
+static int __test_case_3(struct btrfs_fs_info *fs_info,
+ struct extent_map_tree *em_tree, u64 start)
+{
+ struct extent_map *em;
+ u64 len = SZ_4K;
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ return -ENOMEM;
+ }
+
+ /* Add [4K, 8K) */
+ em->start = SZ_4K;
+ em->len = SZ_4K;
+ em->block_start = SZ_4K;
+ em->block_len = SZ_4K;
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em, 0);
+ write_unlock(&em_tree->lock);
+ if (ret < 0) {
+ test_err("cannot add extent range [4K, 8K)");
+ goto out;
+ }
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Add [0, 16K) */
+ em->start = 0;
+ em->len = SZ_16K;
+ em->block_start = 0;
+ em->block_len = SZ_16K;
+ write_lock(&em_tree->lock);
+ ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
+ write_unlock(&em_tree->lock);
+ if (ret) {
+ test_err("case3 [0x%llx 0x%llx): ret %d",
+ start, start + len, ret);
+ goto out;
+ }
+ /*
+ * Since bytes within em are contiguous, em->block_start is identical to
+ * em->start.
+ */
+ if (em &&
+ (start < em->start || start + len > extent_map_end(em) ||
+ em->start != em->block_start || em->len != em->block_len)) {
+ test_err(
+"case3 [0x%llx 0x%llx): ret %d em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)",
+ start, start + len, ret, em->start, em->len,
+ em->block_start, em->block_len);
+ ret = -EINVAL;
+ }
+ free_extent_map(em);
+out:
+ free_extent_map_tree(em_tree);
+
+ return ret;
+}
+
+/*
+ * Test scenario:
+ *
+ * Suppose that no extent map has been loaded into memory yet.
+ * There is a file extent [0, 16K), two jobs are running concurrently
+ * against it, t1 is buffered writing to [4K, 8K) and t2 is doing dio
+ * read from [0, 4K) or [8K, 12K) or [12K, 16K).
+ *
+ * t1 goes ahead of t2 and adds em [4K, 8K) into tree.
+ *
+ * t1 t2
+ * cow_file_range() btrfs_get_extent()
+ * -> lookup_extent_mapping()
+ * -> add_extent_mapping()
+ * -> add_extent_mapping()
+ */
+static int test_case_3(struct btrfs_fs_info *fs_info,
+ struct extent_map_tree *em_tree)
+{
+ int ret;
+
+ ret = __test_case_3(fs_info, em_tree, 0);
+ if (ret)
+ return ret;
+ ret = __test_case_3(fs_info, em_tree, SZ_8K);
+ if (ret)
+ return ret;
+ ret = __test_case_3(fs_info, em_tree, (12 * SZ_1K));
+
+ return ret;
+}
+
+static int __test_case_4(struct btrfs_fs_info *fs_info,
+ struct extent_map_tree *em_tree, u64 start)
+{
+ struct extent_map *em;
+ u64 len = SZ_4K;
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ return -ENOMEM;
+ }
+
+ /* Add [0K, 8K) */
+ em->start = 0;
+ em->len = SZ_8K;
+ em->block_start = 0;
+ em->block_len = SZ_8K;
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em, 0);
+ write_unlock(&em_tree->lock);
+ if (ret < 0) {
+ test_err("cannot add extent range [0, 8K)");
+ goto out;
+ }
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Add [8K, 32K) */
+ em->start = SZ_8K;
+ em->len = 24 * SZ_1K;
+ em->block_start = SZ_16K; /* avoid merging */
+ em->block_len = 24 * SZ_1K;
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em, 0);
+ write_unlock(&em_tree->lock);
+ if (ret < 0) {
+ test_err("cannot add extent range [8K, 32K)");
+ goto out;
+ }
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* Add [0K, 32K) */
+ em->start = 0;
+ em->len = SZ_32K;
+ em->block_start = 0;
+ em->block_len = SZ_32K;
+ write_lock(&em_tree->lock);
+ ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
+ write_unlock(&em_tree->lock);
+ if (ret) {
+ test_err("case4 [0x%llx 0x%llx): ret %d",
+ start, len, ret);
+ goto out;
+ }
+ if (em && (start < em->start || start + len > extent_map_end(em))) {
+ test_err(
+"case4 [0x%llx 0x%llx): ret %d, added wrong em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)",
+ start, len, ret, em->start, em->len, em->block_start,
+ em->block_len);
+ ret = -EINVAL;
+ }
+ free_extent_map(em);
+out:
+ free_extent_map_tree(em_tree);
+
+ return ret;
+}
+
+/*
+ * Test scenario:
+ *
+ * Suppose that no extent map has been loaded into memory yet.
+ * There is a file extent [0, 32K), two jobs are running concurrently
+ * against it, t1 is doing dio write to [8K, 32K) and t2 is doing dio
+ * read from [0, 4K) or [4K, 8K).
+ *
+ * t1 goes ahead of t2 and splits em [0, 32K) to em [0K, 8K) and [8K 32K).
+ *
+ * t1 t2
+ * btrfs_get_blocks_direct() btrfs_get_blocks_direct()
+ * -> btrfs_get_extent() -> btrfs_get_extent()
+ * -> lookup_extent_mapping()
+ * -> add_extent_mapping() -> lookup_extent_mapping()
+ * # load [0, 32K)
+ * -> btrfs_new_extent_direct()
+ * -> btrfs_drop_extent_cache()
+ * # split [0, 32K)
+ * -> add_extent_mapping()
+ * # add [8K, 32K)
+ * -> add_extent_mapping()
+ * # handle -EEXIST when adding
+ * # [0, 32K)
+ */
+static int test_case_4(struct btrfs_fs_info *fs_info,
+ struct extent_map_tree *em_tree)
+{
+ int ret;
+
+ ret = __test_case_4(fs_info, em_tree, 0);
+ if (ret)
+ return ret;
+ ret = __test_case_4(fs_info, em_tree, SZ_4K);
+
+ return ret;
+}
+
+static int add_compressed_extent(struct extent_map_tree *em_tree,
+ u64 start, u64 len, u64 block_start)
+{
+ struct extent_map *em;
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ return -ENOMEM;
+ }
+
+ em->start = start;
+ em->len = len;
+ em->block_start = block_start;
+ em->block_len = SZ_4K;
+ set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em, 0);
+ write_unlock(&em_tree->lock);
+ free_extent_map(em);
+ if (ret < 0) {
+ test_err("cannot add extent map [%llu, %llu)", start, start + len);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct extent_range {
+ u64 start;
+ u64 len;
+};
+
+/* The valid states of the tree after every drop, as described below. */
+struct extent_range valid_ranges[][7] = {
+ {
+ { .start = 0, .len = SZ_8K }, /* [0, 8K) */
+ { .start = SZ_4K * 3, .len = SZ_4K * 3}, /* [12k, 24k) */
+ { .start = SZ_4K * 6, .len = SZ_4K * 3}, /* [24k, 36k) */
+ { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */
+ { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */
+ },
+ {
+ { .start = 0, .len = SZ_8K }, /* [0, 8K) */
+ { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */
+ { .start = SZ_4K * 6, .len = SZ_4K * 3}, /* [24k, 36k) */
+ { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */
+ { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */
+ },
+ {
+ { .start = 0, .len = SZ_8K }, /* [0, 8K) */
+ { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */
+ { .start = SZ_4K * 6, .len = SZ_4K}, /* [24k, 28k) */
+ { .start = SZ_32K, .len = SZ_4K}, /* [32k, 36k) */
+ { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */
+ { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */
+ },
+ {
+ { .start = 0, .len = SZ_8K}, /* [0, 8K) */
+ { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */
+ { .start = SZ_4K * 6, .len = SZ_4K}, /* [24k, 28k) */
+ }
+};
+
+static int validate_range(struct extent_map_tree *em_tree, int index)
+{
+ struct rb_node *n;
+ int i;
+
+ for (i = 0, n = rb_first_cached(&em_tree->map);
+ valid_ranges[index][i].len && n;
+ i++, n = rb_next(n)) {
+ struct extent_map *entry = rb_entry(n, struct extent_map, rb_node);
+
+ if (entry->start != valid_ranges[index][i].start) {
+ test_err("mapping has start %llu expected %llu",
+ entry->start, valid_ranges[index][i].start);
+ return -EINVAL;
+ }
+
+ if (entry->len != valid_ranges[index][i].len) {
+ test_err("mapping has len %llu expected %llu",
+ entry->len, valid_ranges[index][i].len);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * We exited because we don't have any more entries in the extent_map
+ * but we still expect more valid entries.
+ */
+ if (valid_ranges[index][i].len) {
+ test_err("missing an entry");
+ return -EINVAL;
+ }
+
+ /* We exited the loop but still have entries in the extent map. */
+ if (n) {
+ test_err("we have a left over entry in the extent map we didn't expect");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Test scenario:
+ *
+ * Test the various edge cases of btrfs_drop_extent_map_range, create the
+ * following ranges
+ *
+ * [0, 12k)[12k, 24k)[24k, 36k)[36k, 40k)[40k,64k)
+ *
+ * And then we'll drop:
+ *
+ * [8k, 12k) - test the single front split
+ * [12k, 20k) - test the single back split
+ * [28k, 32k) - test the double split
+ * [32k, 64k) - test whole em dropping
+ *
+ * They'll have the EXTENT_FLAG_COMPRESSED flag set to keep the em tree from
+ * merging the em's.
+ */
+static int test_case_5(void)
+{
+ struct extent_map_tree *em_tree;
+ struct inode *inode;
+ u64 start, end;
+ int ret;
+
+ test_msg("Running btrfs_drop_extent_map_range tests");
+
+ inode = btrfs_new_test_inode();
+ if (!inode) {
+ test_std_err(TEST_ALLOC_INODE);
+ return -ENOMEM;
+ }
+
+ em_tree = &BTRFS_I(inode)->extent_tree;
+
+ /* [0, 12k) */
+ ret = add_compressed_extent(em_tree, 0, SZ_4K * 3, 0);
+ if (ret) {
+ test_err("cannot add extent range [0, 12K)");
+ goto out;
+ }
+
+ /* [12k, 24k) */
+ ret = add_compressed_extent(em_tree, SZ_4K * 3, SZ_4K * 3, SZ_4K);
+ if (ret) {
+ test_err("cannot add extent range [12k, 24k)");
+ goto out;
+ }
+
+ /* [24k, 36k) */
+ ret = add_compressed_extent(em_tree, SZ_4K * 6, SZ_4K * 3, SZ_8K);
+ if (ret) {
+ test_err("cannot add extent range [12k, 24k)");
+ goto out;
+ }
+
+ /* [36k, 40k) */
+ ret = add_compressed_extent(em_tree, SZ_32K + SZ_4K, SZ_4K, SZ_4K * 3);
+ if (ret) {
+ test_err("cannot add extent range [12k, 24k)");
+ goto out;
+ }
+
+ /* [40k, 64k) */
+ ret = add_compressed_extent(em_tree, SZ_4K * 10, SZ_4K * 6, SZ_16K);
+ if (ret) {
+ test_err("cannot add extent range [12k, 24k)");
+ goto out;
+ }
+
+ /* Drop [8k, 12k) */
+ start = SZ_8K;
+ end = (3 * SZ_4K) - 1;
+ btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false);
+ ret = validate_range(&BTRFS_I(inode)->extent_tree, 0);
+ if (ret)
+ goto out;
+
+ /* Drop [12k, 20k) */
+ start = SZ_4K * 3;
+ end = SZ_16K + SZ_4K - 1;
+ btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false);
+ ret = validate_range(&BTRFS_I(inode)->extent_tree, 1);
+ if (ret)
+ goto out;
+
+ /* Drop [28k, 32k) */
+ start = SZ_32K - SZ_4K;
+ end = SZ_32K - 1;
+ btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false);
+ ret = validate_range(&BTRFS_I(inode)->extent_tree, 2);
+ if (ret)
+ goto out;
+
+ /* Drop [32k, 64k) */
+ start = SZ_32K;
+ end = SZ_64K - 1;
+ btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false);
+ ret = validate_range(&BTRFS_I(inode)->extent_tree, 3);
+ if (ret)
+ goto out;
+out:
+ iput(inode);
+ return ret;
+}
+
+/*
+ * Test the btrfs_add_extent_mapping helper which will attempt to create an em
+ * for areas between two existing ems. Validate it doesn't do this when there
+ * are two unmerged em's side by side.
+ */
+static int test_case_6(struct btrfs_fs_info *fs_info, struct extent_map_tree *em_tree)
+{
+ struct extent_map *em = NULL;
+ int ret;
+
+ ret = add_compressed_extent(em_tree, 0, SZ_4K, 0);
+ if (ret)
+ goto out;
+
+ ret = add_compressed_extent(em_tree, SZ_4K, SZ_4K, 0);
+ if (ret)
+ goto out;
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ return -ENOMEM;
+ }
+
+ em->start = SZ_4K;
+ em->len = SZ_4K;
+ em->block_start = SZ_16K;
+ em->block_len = SZ_16K;
+ write_lock(&em_tree->lock);
+ ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, 0, SZ_8K);
+ write_unlock(&em_tree->lock);
+
+ if (ret != 0) {
+ test_err("got an error when adding our em: %d", ret);
+ goto out;
+ }
+
+ ret = -EINVAL;
+ if (em->start != 0) {
+ test_err("unexpected em->start at %llu, wanted 0", em->start);
+ goto out;
+ }
+ if (em->len != SZ_4K) {
+ test_err("unexpected em->len %llu, expected 4K", em->len);
+ goto out;
+ }
+ ret = 0;
+out:
+ free_extent_map(em);
+ free_extent_map_tree(em_tree);
+ return ret;
+}
+
+/*
+ * Regression test for btrfs_drop_extent_map_range. Calling with skip_pinned ==
+ * true would mess up the start/end calculations and subsequent splits would be
+ * incorrect.
+ */
+static int test_case_7(void)
+{
+ struct extent_map_tree *em_tree;
+ struct extent_map *em;
+ struct inode *inode;
+ int ret;
+
+ test_msg("Running btrfs_drop_extent_cache with pinned");
+
+ inode = btrfs_new_test_inode();
+ if (!inode) {
+ test_std_err(TEST_ALLOC_INODE);
+ return -ENOMEM;
+ }
+
+ em_tree = &BTRFS_I(inode)->extent_tree;
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* [0, 16K), pinned */
+ em->start = 0;
+ em->len = SZ_16K;
+ em->block_start = 0;
+ em->block_len = SZ_4K;
+ set_bit(EXTENT_FLAG_PINNED, &em->flags);
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em, 0);
+ write_unlock(&em_tree->lock);
+ if (ret < 0) {
+ test_err("couldn't add extent map");
+ goto out;
+ }
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* [32K, 48K), not pinned */
+ em->start = SZ_32K;
+ em->len = SZ_16K;
+ em->block_start = SZ_32K;
+ em->block_len = SZ_16K;
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em, 0);
+ write_unlock(&em_tree->lock);
+ if (ret < 0) {
+ test_err("couldn't add extent map");
+ goto out;
+ }
+ free_extent_map(em);
+
+ /*
+ * Drop [0, 36K) This should skip the [0, 4K) extent and then split the
+ * [32K, 48K) extent.
+ */
+ btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (36 * SZ_1K) - 1, true);
+
+ /* Make sure our extent maps look sane. */
+ ret = -EINVAL;
+
+ em = lookup_extent_mapping(em_tree, 0, SZ_16K);
+ if (!em) {
+ test_err("didn't find an em at 0 as expected");
+ goto out;
+ }
+
+ if (em->start != 0) {
+ test_err("em->start is %llu, expected 0", em->start);
+ goto out;
+ }
+
+ if (em->len != SZ_16K) {
+ test_err("em->len is %llu, expected 16K", em->len);
+ goto out;
+ }
+
+ free_extent_map(em);
+
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, SZ_16K, SZ_16K);
+ read_unlock(&em_tree->lock);
+ if (em) {
+ test_err("found an em when we weren't expecting one");
+ goto out;
+ }
+
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, SZ_32K, SZ_16K);
+ read_unlock(&em_tree->lock);
+ if (!em) {
+ test_err("didn't find an em at 32K as expected");
+ goto out;
+ }
+
+ if (em->start != (36 * SZ_1K)) {
+ test_err("em->start is %llu, expected 36K", em->start);
+ goto out;
+ }
+
+ if (em->len != (12 * SZ_1K)) {
+ test_err("em->len is %llu, expected 12K", em->len);
+ goto out;
+ }
+
+ free_extent_map(em);
+
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, 48 * SZ_1K, (u64)-1);
+ read_unlock(&em_tree->lock);
+ if (em) {
+ test_err("found an unexpected em above 48K");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ free_extent_map(em);
+ iput(inode);
+ return ret;
+}
+
+struct rmap_test_vector {
+ u64 raid_type;
+ u64 physical_start;
+ u64 data_stripe_size;
+ u64 num_data_stripes;
+ u64 num_stripes;
+ /* Assume we won't have more than 5 physical stripes */
+ u64 data_stripe_phys_start[5];
+ bool expected_mapped_addr;
+ /* Physical to logical addresses */
+ u64 mapped_logical[5];
+};
+
+static int test_rmap_block(struct btrfs_fs_info *fs_info,
+ struct rmap_test_vector *test)
+{
+ struct extent_map *em;
+ struct map_lookup *map = NULL;
+ u64 *logical = NULL;
+ int i, out_ndaddrs, out_stripe_len;
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ return -ENOMEM;
+ }
+
+ map = kmalloc(map_lookup_size(test->num_stripes), GFP_KERNEL);
+ if (!map) {
+ kfree(em);
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ return -ENOMEM;
+ }
+
+ set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
+ /* Start at 4GiB logical address */
+ em->start = SZ_4G;
+ em->len = test->data_stripe_size * test->num_data_stripes;
+ em->block_len = em->len;
+ em->orig_block_len = test->data_stripe_size;
+ em->map_lookup = map;
+
+ map->num_stripes = test->num_stripes;
+ map->type = test->raid_type;
+
+ for (i = 0; i < map->num_stripes; i++) {
+ struct btrfs_device *dev = btrfs_alloc_dummy_device(fs_info);
+
+ if (IS_ERR(dev)) {
+ test_err("cannot allocate device");
+ ret = PTR_ERR(dev);
+ goto out;
+ }
+ map->stripes[i].dev = dev;
+ map->stripes[i].physical = test->data_stripe_phys_start[i];
+ }
+
+ write_lock(&fs_info->mapping_tree.lock);
+ ret = add_extent_mapping(&fs_info->mapping_tree, em, 0);
+ write_unlock(&fs_info->mapping_tree.lock);
+ if (ret) {
+ test_err("error adding block group mapping to mapping tree");
+ goto out_free;
+ }
+
+ ret = btrfs_rmap_block(fs_info, em->start, btrfs_sb_offset(1),
+ &logical, &out_ndaddrs, &out_stripe_len);
+ if (ret || (out_ndaddrs == 0 && test->expected_mapped_addr)) {
+ test_err("didn't rmap anything but expected %d",
+ test->expected_mapped_addr);
+ goto out;
+ }
+
+ if (out_stripe_len != BTRFS_STRIPE_LEN) {
+ test_err("calculated stripe length doesn't match");
+ goto out;
+ }
+
+ if (out_ndaddrs != test->expected_mapped_addr) {
+ for (i = 0; i < out_ndaddrs; i++)
+ test_msg("mapped %llu", logical[i]);
+ test_err("unexpected number of mapped addresses: %d", out_ndaddrs);
+ goto out;
+ }
+
+ for (i = 0; i < out_ndaddrs; i++) {
+ if (logical[i] != test->mapped_logical[i]) {
+ test_err("unexpected logical address mapped");
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ write_lock(&fs_info->mapping_tree.lock);
+ remove_extent_mapping(&fs_info->mapping_tree, em);
+ write_unlock(&fs_info->mapping_tree.lock);
+ /* For us */
+ free_extent_map(em);
+out_free:
+ /* For the tree */
+ free_extent_map(em);
+ kfree(logical);
+ return ret;
+}
+
+int btrfs_test_extent_map(void)
+{
+ struct btrfs_fs_info *fs_info = NULL;
+ struct extent_map_tree *em_tree;
+ int ret = 0, i;
+ struct rmap_test_vector rmap_tests[] = {
+ {
+ /*
+ * Test a chunk with 2 data stripes one of which
+ * intersects the physical address of the super block
+ * is correctly recognised.
+ */
+ .raid_type = BTRFS_BLOCK_GROUP_RAID1,
+ .physical_start = SZ_64M - SZ_4M,
+ .data_stripe_size = SZ_256M,
+ .num_data_stripes = 2,
+ .num_stripes = 2,
+ .data_stripe_phys_start =
+ {SZ_64M - SZ_4M, SZ_64M - SZ_4M + SZ_256M},
+ .expected_mapped_addr = true,
+ .mapped_logical= {SZ_4G + SZ_4M}
+ },
+ {
+ /*
+ * Test that out-of-range physical addresses are
+ * ignored
+ */
+
+ /* SINGLE chunk type */
+ .raid_type = 0,
+ .physical_start = SZ_4G,
+ .data_stripe_size = SZ_256M,
+ .num_data_stripes = 1,
+ .num_stripes = 1,
+ .data_stripe_phys_start = {SZ_256M},
+ .expected_mapped_addr = false,
+ .mapped_logical = {0}
+ }
+ };
+
+ test_msg("running extent_map tests");
+
+ /*
+ * Note: the fs_info is not set up completely, we only need
+ * fs_info::fsid for the tracepoint.
+ */
+ fs_info = btrfs_alloc_dummy_fs_info(PAGE_SIZE, PAGE_SIZE);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ return -ENOMEM;
+ }
+
+ em_tree = kzalloc(sizeof(*em_tree), GFP_KERNEL);
+ if (!em_tree) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ extent_map_tree_init(em_tree);
+
+ ret = test_case_1(fs_info, em_tree);
+ if (ret)
+ goto out;
+ ret = test_case_2(fs_info, em_tree);
+ if (ret)
+ goto out;
+ ret = test_case_3(fs_info, em_tree);
+ if (ret)
+ goto out;
+ ret = test_case_4(fs_info, em_tree);
+ if (ret)
+ goto out;
+ ret = test_case_5();
+ if (ret)
+ goto out;
+ ret = test_case_6(fs_info, em_tree);
+ if (ret)
+ goto out;
+ ret = test_case_7();
+ if (ret)
+ goto out;
+
+ test_msg("running rmap tests");
+ for (i = 0; i < ARRAY_SIZE(rmap_tests); i++) {
+ ret = test_rmap_block(fs_info, &rmap_tests[i]);
+ if (ret)
+ goto out;
+ }
+
+out:
+ kfree(em_tree);
+ btrfs_free_dummy_fs_info(fs_info);
+
+ return ret;
+}
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
new file mode 100644
index 0000000000..ebf68fcd21
--- /dev/null
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -0,0 +1,1063 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 Fusion IO. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include "btrfs-tests.h"
+#include "../ctree.h"
+#include "../disk-io.h"
+#include "../free-space-cache.h"
+#include "../block-group.h"
+
+#define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
+
+/*
+ * This test just does basic sanity checking, making sure we can add an extent
+ * entry and remove space from either end and the middle, and make sure we can
+ * remove space that covers adjacent extent entries.
+ */
+static int test_extents(struct btrfs_block_group *cache)
+{
+ int ret = 0;
+
+ test_msg("running extent only tests");
+
+ /* First just make sure we can remove an entire entry */
+ ret = btrfs_add_free_space(cache, 0, SZ_4M);
+ if (ret) {
+ test_err("error adding initial extents %d", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 0, SZ_4M);
+ if (ret) {
+ test_err("error removing extent %d", ret);
+ return ret;
+ }
+
+ if (test_check_exists(cache, 0, SZ_4M)) {
+ test_err("full remove left some lingering space");
+ return -1;
+ }
+
+ /* Ok edge and middle cases now */
+ ret = btrfs_add_free_space(cache, 0, SZ_4M);
+ if (ret) {
+ test_err("error adding half extent %d", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 3 * SZ_1M, SZ_1M);
+ if (ret) {
+ test_err("error removing tail end %d", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 0, SZ_1M);
+ if (ret) {
+ test_err("error removing front end %d", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, SZ_2M, 4096);
+ if (ret) {
+ test_err("error removing middle piece %d", ret);
+ return ret;
+ }
+
+ if (test_check_exists(cache, 0, SZ_1M)) {
+ test_err("still have space at the front");
+ return -1;
+ }
+
+ if (test_check_exists(cache, SZ_2M, 4096)) {
+ test_err("still have space in the middle");
+ return -1;
+ }
+
+ if (test_check_exists(cache, 3 * SZ_1M, SZ_1M)) {
+ test_err("still have space at the end");
+ return -1;
+ }
+
+ /* Cleanup */
+ btrfs_remove_free_space_cache(cache);
+
+ return 0;
+}
+
+static int test_bitmaps(struct btrfs_block_group *cache, u32 sectorsize)
+{
+ u64 next_bitmap_offset;
+ int ret;
+
+ test_msg("running bitmap only tests");
+
+ ret = test_add_free_space_entry(cache, 0, SZ_4M, 1);
+ if (ret) {
+ test_err("couldn't create a bitmap entry %d", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 0, SZ_4M);
+ if (ret) {
+ test_err("error removing bitmap full range %d", ret);
+ return ret;
+ }
+
+ if (test_check_exists(cache, 0, SZ_4M)) {
+ test_err("left some space in bitmap");
+ return -1;
+ }
+
+ ret = test_add_free_space_entry(cache, 0, SZ_4M, 1);
+ if (ret) {
+ test_err("couldn't add to our bitmap entry %d", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, SZ_1M, SZ_2M);
+ if (ret) {
+ test_err("couldn't remove middle chunk %d", ret);
+ return ret;
+ }
+
+ /*
+ * The first bitmap we have starts at offset 0 so the next one is just
+ * at the end of the first bitmap.
+ */
+ next_bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
+
+ /* Test a bit straddling two bitmaps */
+ ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M,
+ SZ_4M, 1);
+ if (ret) {
+ test_err("couldn't add space that straddles two bitmaps %d",
+ ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, next_bitmap_offset - SZ_1M, SZ_2M);
+ if (ret) {
+ test_err("couldn't remove overlapping space %d", ret);
+ return ret;
+ }
+
+ if (test_check_exists(cache, next_bitmap_offset - SZ_1M, SZ_2M)) {
+ test_err("left some space when removing overlapping");
+ return -1;
+ }
+
+ btrfs_remove_free_space_cache(cache);
+
+ return 0;
+}
+
+/* This is the high grade jackassery */
+static int test_bitmaps_and_extents(struct btrfs_block_group *cache,
+ u32 sectorsize)
+{
+ u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
+ int ret;
+
+ test_msg("running bitmap and extent tests");
+
+ /*
+ * First let's do something simple, an extent at the same offset as the
+ * bitmap, but the free space completely in the extent and then
+ * completely in the bitmap.
+ */
+ ret = test_add_free_space_entry(cache, SZ_4M, SZ_1M, 1);
+ if (ret) {
+ test_err("couldn't create bitmap entry %d", ret);
+ return ret;
+ }
+
+ ret = test_add_free_space_entry(cache, 0, SZ_1M, 0);
+ if (ret) {
+ test_err("couldn't add extent entry %d", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 0, SZ_1M);
+ if (ret) {
+ test_err("couldn't remove extent entry %d", ret);
+ return ret;
+ }
+
+ if (test_check_exists(cache, 0, SZ_1M)) {
+ test_err("left remnants after our remove");
+ return -1;
+ }
+
+ /* Now to add back the extent entry and remove from the bitmap */
+ ret = test_add_free_space_entry(cache, 0, SZ_1M, 0);
+ if (ret) {
+ test_err("couldn't re-add extent entry %d", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, SZ_4M, SZ_1M);
+ if (ret) {
+ test_err("couldn't remove from bitmap %d", ret);
+ return ret;
+ }
+
+ if (test_check_exists(cache, SZ_4M, SZ_1M)) {
+ test_err("left remnants in the bitmap");
+ return -1;
+ }
+
+ /*
+ * Ok so a little more evil, extent entry and bitmap at the same offset,
+ * removing an overlapping chunk.
+ */
+ ret = test_add_free_space_entry(cache, SZ_1M, SZ_4M, 1);
+ if (ret) {
+ test_err("couldn't add to a bitmap %d", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, SZ_512K, 3 * SZ_1M);
+ if (ret) {
+ test_err("couldn't remove overlapping space %d", ret);
+ return ret;
+ }
+
+ if (test_check_exists(cache, SZ_512K, 3 * SZ_1M)) {
+ test_err("left over pieces after removing overlapping");
+ return -1;
+ }
+
+ btrfs_remove_free_space_cache(cache);
+
+ /* Now with the extent entry offset into the bitmap */
+ ret = test_add_free_space_entry(cache, SZ_4M, SZ_4M, 1);
+ if (ret) {
+ test_err("couldn't add space to the bitmap %d", ret);
+ return ret;
+ }
+
+ ret = test_add_free_space_entry(cache, SZ_2M, SZ_2M, 0);
+ if (ret) {
+ test_err("couldn't add extent to the cache %d", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 3 * SZ_1M, SZ_4M);
+ if (ret) {
+ test_err("problem removing overlapping space %d", ret);
+ return ret;
+ }
+
+ if (test_check_exists(cache, 3 * SZ_1M, SZ_4M)) {
+ test_err("left something behind when removing space");
+ return -1;
+ }
+
+ /*
+ * This has blown up in the past, the extent entry starts before the
+ * bitmap entry, but we're trying to remove an offset that falls
+ * completely within the bitmap range and is in both the extent entry
+ * and the bitmap entry, looks like this
+ *
+ * [ extent ]
+ * [ bitmap ]
+ * [ del ]
+ */
+ btrfs_remove_free_space_cache(cache);
+ ret = test_add_free_space_entry(cache, bitmap_offset + SZ_4M, SZ_4M, 1);
+ if (ret) {
+ test_err("couldn't add bitmap %d", ret);
+ return ret;
+ }
+
+ ret = test_add_free_space_entry(cache, bitmap_offset - SZ_1M,
+ 5 * SZ_1M, 0);
+ if (ret) {
+ test_err("couldn't add extent entry %d", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, bitmap_offset + SZ_1M, 5 * SZ_1M);
+ if (ret) {
+ test_err("failed to free our space %d", ret);
+ return ret;
+ }
+
+ if (test_check_exists(cache, bitmap_offset + SZ_1M, 5 * SZ_1M)) {
+ test_err("left stuff over");
+ return -1;
+ }
+
+ btrfs_remove_free_space_cache(cache);
+
+ /*
+ * This blew up before, we have part of the free space in a bitmap and
+ * then the entirety of the rest of the space in an extent. This used
+ * to return -EAGAIN back from btrfs_remove_extent, make sure this
+ * doesn't happen.
+ */
+ ret = test_add_free_space_entry(cache, SZ_1M, SZ_2M, 1);
+ if (ret) {
+ test_err("couldn't add bitmap entry %d", ret);
+ return ret;
+ }
+
+ ret = test_add_free_space_entry(cache, 3 * SZ_1M, SZ_1M, 0);
+ if (ret) {
+ test_err("couldn't add extent entry %d", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, SZ_1M, 3 * SZ_1M);
+ if (ret) {
+ test_err("error removing bitmap and extent overlapping %d", ret);
+ return ret;
+ }
+
+ btrfs_remove_free_space_cache(cache);
+ return 0;
+}
+
+/* Used by test_steal_space_from_bitmap_to_extent(). */
+static bool test_use_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info)
+{
+ return ctl->free_extents > 0;
+}
+
+/* Used by test_steal_space_from_bitmap_to_extent(). */
+static int
+check_num_extents_and_bitmaps(const struct btrfs_block_group *cache,
+ const int num_extents,
+ const int num_bitmaps)
+{
+ if (cache->free_space_ctl->free_extents != num_extents) {
+ test_err(
+ "incorrect # of extent entries in the cache: %d, expected %d",
+ cache->free_space_ctl->free_extents, num_extents);
+ return -EINVAL;
+ }
+ if (cache->free_space_ctl->total_bitmaps != num_bitmaps) {
+ test_err(
+ "incorrect # of extent entries in the cache: %d, expected %d",
+ cache->free_space_ctl->total_bitmaps, num_bitmaps);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Used by test_steal_space_from_bitmap_to_extent(). */
+static int check_cache_empty(struct btrfs_block_group *cache)
+{
+ u64 offset;
+ u64 max_extent_size;
+
+ /*
+ * Now lets confirm that there's absolutely no free space left to
+ * allocate.
+ */
+ if (cache->free_space_ctl->free_space != 0) {
+ test_err("cache free space is not 0");
+ return -EINVAL;
+ }
+
+ /* And any allocation request, no matter how small, should fail now. */
+ offset = btrfs_find_space_for_alloc(cache, 0, 4096, 0,
+ &max_extent_size);
+ if (offset != 0) {
+ test_err("space allocation did not fail, returned offset: %llu",
+ offset);
+ return -EINVAL;
+ }
+
+ /* And no extent nor bitmap entries in the cache anymore. */
+ return check_num_extents_and_bitmaps(cache, 0, 0);
+}
+
+/*
+ * Before we were able to steal free space from a bitmap entry to an extent
+ * entry, we could end up with 2 entries representing a contiguous free space.
+ * One would be an extent entry and the other a bitmap entry. Since in order
+ * to allocate space to a caller we use only 1 entry, we couldn't return that
+ * whole range to the caller if it was requested. This forced the caller to
+ * either assume ENOSPC or perform several smaller space allocations, which
+ * wasn't optimal as they could be spread all over the block group while under
+ * concurrency (extra overhead and fragmentation).
+ *
+ * This stealing approach is beneficial, since we always prefer to allocate
+ * from extent entries, both for clustered and non-clustered allocation
+ * requests.
+ */
+static int
+test_steal_space_from_bitmap_to_extent(struct btrfs_block_group *cache,
+ u32 sectorsize)
+{
+ int ret;
+ u64 offset;
+ u64 max_extent_size;
+ const struct btrfs_free_space_op test_free_space_ops = {
+ .use_bitmap = test_use_bitmap,
+ };
+ const struct btrfs_free_space_op *orig_free_space_ops;
+
+ test_msg("running space stealing from bitmap to extent tests");
+
+ /*
+ * For this test, we want to ensure we end up with an extent entry
+ * immediately adjacent to a bitmap entry, where the bitmap starts
+ * at an offset where the extent entry ends. We keep adding and
+ * removing free space to reach into this state, but to get there
+ * we need to reach a point where marking new free space doesn't
+ * result in adding new extent entries or merging the new space
+ * with existing extent entries - the space ends up being marked
+ * in an existing bitmap that covers the new free space range.
+ *
+ * To get there, we need to reach the threshold defined set at
+ * cache->free_space_ctl->extents_thresh, which currently is
+ * 256 extents on a x86_64 system at least, and a few other
+ * conditions (check free_space_cache.c). Instead of making the
+ * test much longer and complicated, use a "use_bitmap" operation
+ * that forces use of bitmaps as soon as we have at least 1
+ * extent entry.
+ */
+ orig_free_space_ops = cache->free_space_ctl->op;
+ cache->free_space_ctl->op = &test_free_space_ops;
+
+ /*
+ * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
+ */
+ ret = test_add_free_space_entry(cache, SZ_128M - SZ_256K, SZ_128K, 0);
+ if (ret) {
+ test_err("couldn't add extent entry %d", ret);
+ return ret;
+ }
+
+ /* Bitmap entry covering free space range [128Mb + 512Kb, 256Mb[ */
+ ret = test_add_free_space_entry(cache, SZ_128M + SZ_512K,
+ SZ_128M - SZ_512K, 1);
+ if (ret) {
+ test_err("couldn't add bitmap entry %d", ret);
+ return ret;
+ }
+
+ ret = check_num_extents_and_bitmaps(cache, 2, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Now make only the first 256Kb of the bitmap marked as free, so that
+ * we end up with only the following ranges marked as free space:
+ *
+ * [128Mb - 256Kb, 128Mb - 128Kb[
+ * [128Mb + 512Kb, 128Mb + 768Kb[
+ */
+ ret = btrfs_remove_free_space(cache,
+ SZ_128M + 768 * SZ_1K,
+ SZ_128M - 768 * SZ_1K);
+ if (ret) {
+ test_err("failed to free part of bitmap space %d", ret);
+ return ret;
+ }
+
+ /* Confirm that only those 2 ranges are marked as free. */
+ if (!test_check_exists(cache, SZ_128M - SZ_256K, SZ_128K)) {
+ test_err("free space range missing");
+ return -ENOENT;
+ }
+ if (!test_check_exists(cache, SZ_128M + SZ_512K, SZ_256K)) {
+ test_err("free space range missing");
+ return -ENOENT;
+ }
+
+ /*
+ * Confirm that the bitmap range [128Mb + 768Kb, 256Mb[ isn't marked
+ * as free anymore.
+ */
+ if (test_check_exists(cache, SZ_128M + 768 * SZ_1K,
+ SZ_128M - 768 * SZ_1K)) {
+ test_err("bitmap region not removed from space cache");
+ return -EINVAL;
+ }
+
+ /*
+ * Confirm that the region [128Mb + 256Kb, 128Mb + 512Kb[, which is
+ * covered by the bitmap, isn't marked as free.
+ */
+ if (test_check_exists(cache, SZ_128M + SZ_256K, SZ_256K)) {
+ test_err("invalid bitmap region marked as free");
+ return -EINVAL;
+ }
+
+ /*
+ * Confirm that the region [128Mb, 128Mb + 256Kb[, which is covered
+ * by the bitmap too, isn't marked as free either.
+ */
+ if (test_check_exists(cache, SZ_128M, SZ_256K)) {
+ test_err("invalid bitmap region marked as free");
+ return -EINVAL;
+ }
+
+ /*
+ * Now lets mark the region [128Mb, 128Mb + 512Kb[ as free too. But,
+ * lets make sure the free space cache marks it as free in the bitmap,
+ * and doesn't insert a new extent entry to represent this region.
+ */
+ ret = btrfs_add_free_space(cache, SZ_128M, SZ_512K);
+ if (ret) {
+ test_err("error adding free space: %d", ret);
+ return ret;
+ }
+ /* Confirm the region is marked as free. */
+ if (!test_check_exists(cache, SZ_128M, SZ_512K)) {
+ test_err("bitmap region not marked as free");
+ return -ENOENT;
+ }
+
+ /*
+ * Confirm that no new extent entries or bitmap entries were added to
+ * the cache after adding that free space region.
+ */
+ ret = check_num_extents_and_bitmaps(cache, 2, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Now lets add a small free space region to the right of the previous
+ * one, which is not contiguous with it and is part of the bitmap too.
+ * The goal is to test that the bitmap entry space stealing doesn't
+ * steal this space region.
+ */
+ ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, sectorsize);
+ if (ret) {
+ test_err("error adding free space: %d", ret);
+ return ret;
+ }
+
+ /*
+ * Confirm that no new extent entries or bitmap entries were added to
+ * the cache after adding that free space region.
+ */
+ ret = check_num_extents_and_bitmaps(cache, 2, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Now mark the region [128Mb - 128Kb, 128Mb[ as free too. This will
+ * expand the range covered by the existing extent entry that represents
+ * the free space [128Mb - 256Kb, 128Mb - 128Kb[.
+ */
+ ret = btrfs_add_free_space(cache, SZ_128M - SZ_128K, SZ_128K);
+ if (ret) {
+ test_err("error adding free space: %d", ret);
+ return ret;
+ }
+ /* Confirm the region is marked as free. */
+ if (!test_check_exists(cache, SZ_128M - SZ_128K, SZ_128K)) {
+ test_err("extent region not marked as free");
+ return -ENOENT;
+ }
+
+ /*
+ * Confirm that our extent entry didn't stole all free space from the
+ * bitmap, because of the small 4Kb free space region.
+ */
+ ret = check_num_extents_and_bitmaps(cache, 2, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * So now we have the range [128Mb - 256Kb, 128Mb + 768Kb[ as free
+ * space. Without stealing bitmap free space into extent entry space,
+ * we would have all this free space represented by 2 entries in the
+ * cache:
+ *
+ * extent entry covering range: [128Mb - 256Kb, 128Mb[
+ * bitmap entry covering range: [128Mb, 128Mb + 768Kb[
+ *
+ * Attempting to allocate the whole free space (1Mb) would fail, because
+ * we can't allocate from multiple entries.
+ * With the bitmap free space stealing, we get a single extent entry
+ * that represents the 1Mb free space, and therefore we're able to
+ * allocate the whole free space at once.
+ */
+ if (!test_check_exists(cache, SZ_128M - SZ_256K, SZ_1M)) {
+ test_err("expected region not marked as free");
+ return -ENOENT;
+ }
+
+ if (cache->free_space_ctl->free_space != (SZ_1M + sectorsize)) {
+ test_err("cache free space is not 1Mb + %u", sectorsize);
+ return -EINVAL;
+ }
+
+ offset = btrfs_find_space_for_alloc(cache,
+ 0, SZ_1M, 0,
+ &max_extent_size);
+ if (offset != (SZ_128M - SZ_256K)) {
+ test_err(
+ "failed to allocate 1Mb from space cache, returned offset is: %llu",
+ offset);
+ return -EINVAL;
+ }
+
+ /*
+ * All that remains is a sectorsize free space region in a bitmap.
+ * Confirm.
+ */
+ ret = check_num_extents_and_bitmaps(cache, 1, 1);
+ if (ret)
+ return ret;
+
+ if (cache->free_space_ctl->free_space != sectorsize) {
+ test_err("cache free space is not %u", sectorsize);
+ return -EINVAL;
+ }
+
+ offset = btrfs_find_space_for_alloc(cache,
+ 0, sectorsize, 0,
+ &max_extent_size);
+ if (offset != (SZ_128M + SZ_16M)) {
+ test_err("failed to allocate %u, returned offset : %llu",
+ sectorsize, offset);
+ return -EINVAL;
+ }
+
+ ret = check_cache_empty(cache);
+ if (ret)
+ return ret;
+
+ btrfs_remove_free_space_cache(cache);
+
+ /*
+ * Now test a similar scenario, but where our extent entry is located
+ * to the right of the bitmap entry, so that we can check that stealing
+ * space from a bitmap to the front of an extent entry works.
+ */
+
+ /*
+ * Extent entry covering free space range [128Mb + 128Kb, 128Mb + 256Kb[
+ */
+ ret = test_add_free_space_entry(cache, SZ_128M + SZ_128K, SZ_128K, 0);
+ if (ret) {
+ test_err("couldn't add extent entry %d", ret);
+ return ret;
+ }
+
+ /* Bitmap entry covering free space range [0, 128Mb - 512Kb[ */
+ ret = test_add_free_space_entry(cache, 0, SZ_128M - SZ_512K, 1);
+ if (ret) {
+ test_err("couldn't add bitmap entry %d", ret);
+ return ret;
+ }
+
+ ret = check_num_extents_and_bitmaps(cache, 2, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Now make only the last 256Kb of the bitmap marked as free, so that
+ * we end up with only the following ranges marked as free space:
+ *
+ * [128Mb + 128b, 128Mb + 256Kb[
+ * [128Mb - 768Kb, 128Mb - 512Kb[
+ */
+ ret = btrfs_remove_free_space(cache, 0, SZ_128M - 768 * SZ_1K);
+ if (ret) {
+ test_err("failed to free part of bitmap space %d", ret);
+ return ret;
+ }
+
+ /* Confirm that only those 2 ranges are marked as free. */
+ if (!test_check_exists(cache, SZ_128M + SZ_128K, SZ_128K)) {
+ test_err("free space range missing");
+ return -ENOENT;
+ }
+ if (!test_check_exists(cache, SZ_128M - 768 * SZ_1K, SZ_256K)) {
+ test_err("free space range missing");
+ return -ENOENT;
+ }
+
+ /*
+ * Confirm that the bitmap range [0, 128Mb - 768Kb[ isn't marked
+ * as free anymore.
+ */
+ if (test_check_exists(cache, 0, SZ_128M - 768 * SZ_1K)) {
+ test_err("bitmap region not removed from space cache");
+ return -EINVAL;
+ }
+
+ /*
+ * Confirm that the region [128Mb - 512Kb, 128Mb[, which is
+ * covered by the bitmap, isn't marked as free.
+ */
+ if (test_check_exists(cache, SZ_128M - SZ_512K, SZ_512K)) {
+ test_err("invalid bitmap region marked as free");
+ return -EINVAL;
+ }
+
+ /*
+ * Now lets mark the region [128Mb - 512Kb, 128Mb[ as free too. But,
+ * lets make sure the free space cache marks it as free in the bitmap,
+ * and doesn't insert a new extent entry to represent this region.
+ */
+ ret = btrfs_add_free_space(cache, SZ_128M - SZ_512K, SZ_512K);
+ if (ret) {
+ test_err("error adding free space: %d", ret);
+ return ret;
+ }
+ /* Confirm the region is marked as free. */
+ if (!test_check_exists(cache, SZ_128M - SZ_512K, SZ_512K)) {
+ test_err("bitmap region not marked as free");
+ return -ENOENT;
+ }
+
+ /*
+ * Confirm that no new extent entries or bitmap entries were added to
+ * the cache after adding that free space region.
+ */
+ ret = check_num_extents_and_bitmaps(cache, 2, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Now lets add a small free space region to the left of the previous
+ * one, which is not contiguous with it and is part of the bitmap too.
+ * The goal is to test that the bitmap entry space stealing doesn't
+ * steal this space region.
+ */
+ ret = btrfs_add_free_space(cache, SZ_32M, 2 * sectorsize);
+ if (ret) {
+ test_err("error adding free space: %d", ret);
+ return ret;
+ }
+
+ /*
+ * Now mark the region [128Mb, 128Mb + 128Kb[ as free too. This will
+ * expand the range covered by the existing extent entry that represents
+ * the free space [128Mb + 128Kb, 128Mb + 256Kb[.
+ */
+ ret = btrfs_add_free_space(cache, SZ_128M, SZ_128K);
+ if (ret) {
+ test_err("error adding free space: %d", ret);
+ return ret;
+ }
+ /* Confirm the region is marked as free. */
+ if (!test_check_exists(cache, SZ_128M, SZ_128K)) {
+ test_err("extent region not marked as free");
+ return -ENOENT;
+ }
+
+ /*
+ * Confirm that our extent entry didn't stole all free space from the
+ * bitmap, because of the small 2 * sectorsize free space region.
+ */
+ ret = check_num_extents_and_bitmaps(cache, 2, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * So now we have the range [128Mb - 768Kb, 128Mb + 256Kb[ as free
+ * space. Without stealing bitmap free space into extent entry space,
+ * we would have all this free space represented by 2 entries in the
+ * cache:
+ *
+ * extent entry covering range: [128Mb, 128Mb + 256Kb[
+ * bitmap entry covering range: [128Mb - 768Kb, 128Mb[
+ *
+ * Attempting to allocate the whole free space (1Mb) would fail, because
+ * we can't allocate from multiple entries.
+ * With the bitmap free space stealing, we get a single extent entry
+ * that represents the 1Mb free space, and therefore we're able to
+ * allocate the whole free space at once.
+ */
+ if (!test_check_exists(cache, SZ_128M - 768 * SZ_1K, SZ_1M)) {
+ test_err("expected region not marked as free");
+ return -ENOENT;
+ }
+
+ if (cache->free_space_ctl->free_space != (SZ_1M + 2 * sectorsize)) {
+ test_err("cache free space is not 1Mb + %u", 2 * sectorsize);
+ return -EINVAL;
+ }
+
+ offset = btrfs_find_space_for_alloc(cache, 0, SZ_1M, 0,
+ &max_extent_size);
+ if (offset != (SZ_128M - 768 * SZ_1K)) {
+ test_err(
+ "failed to allocate 1Mb from space cache, returned offset is: %llu",
+ offset);
+ return -EINVAL;
+ }
+
+ /*
+ * All that remains is 2 * sectorsize free space region
+ * in a bitmap. Confirm.
+ */
+ ret = check_num_extents_and_bitmaps(cache, 1, 1);
+ if (ret)
+ return ret;
+
+ if (cache->free_space_ctl->free_space != 2 * sectorsize) {
+ test_err("cache free space is not %u", 2 * sectorsize);
+ return -EINVAL;
+ }
+
+ offset = btrfs_find_space_for_alloc(cache,
+ 0, 2 * sectorsize, 0,
+ &max_extent_size);
+ if (offset != SZ_32M) {
+ test_err("failed to allocate %u, offset: %llu",
+ 2 * sectorsize, offset);
+ return -EINVAL;
+ }
+
+ ret = check_cache_empty(cache);
+ if (ret)
+ return ret;
+
+ cache->free_space_ctl->op = orig_free_space_ops;
+ btrfs_remove_free_space_cache(cache);
+
+ return 0;
+}
+
+static bool bytes_index_use_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info)
+{
+ return true;
+}
+
+static int test_bytes_index(struct btrfs_block_group *cache, u32 sectorsize)
+{
+ const struct btrfs_free_space_op test_free_space_ops = {
+ .use_bitmap = bytes_index_use_bitmap,
+ };
+ const struct btrfs_free_space_op *orig_free_space_ops;
+ struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
+ struct btrfs_free_space *entry;
+ struct rb_node *node;
+ u64 offset, max_extent_size, bytes;
+ int ret, i;
+
+ test_msg("running bytes index tests");
+
+ /* First just validate that it does everything in order. */
+ offset = 0;
+ for (i = 0; i < 10; i++) {
+ bytes = (i + 1) * SZ_1M;
+ ret = test_add_free_space_entry(cache, offset, bytes, 0);
+ if (ret) {
+ test_err("couldn't add extent entry %d\n", ret);
+ return ret;
+ }
+ offset += bytes + sectorsize;
+ }
+
+ for (node = rb_first_cached(&ctl->free_space_bytes), i = 9; node;
+ node = rb_next(node), i--) {
+ entry = rb_entry(node, struct btrfs_free_space, bytes_index);
+ bytes = (i + 1) * SZ_1M;
+ if (entry->bytes != bytes) {
+ test_err("invalid bytes index order, found %llu expected %llu",
+ entry->bytes, bytes);
+ return -EINVAL;
+ }
+ }
+
+ /* Now validate bitmaps do the correct thing. */
+ btrfs_remove_free_space_cache(cache);
+ for (i = 0; i < 2; i++) {
+ offset = i * BITS_PER_BITMAP * sectorsize;
+ bytes = (i + 1) * SZ_1M;
+ ret = test_add_free_space_entry(cache, offset, bytes, 1);
+ if (ret) {
+ test_err("couldn't add bitmap entry");
+ return ret;
+ }
+ }
+
+ for (node = rb_first_cached(&ctl->free_space_bytes), i = 1; node;
+ node = rb_next(node), i--) {
+ entry = rb_entry(node, struct btrfs_free_space, bytes_index);
+ bytes = (i + 1) * SZ_1M;
+ if (entry->bytes != bytes) {
+ test_err("invalid bytes index order, found %llu expected %llu",
+ entry->bytes, bytes);
+ return -EINVAL;
+ }
+ }
+
+ /* Now validate bitmaps with different ->max_extent_size. */
+ btrfs_remove_free_space_cache(cache);
+ orig_free_space_ops = cache->free_space_ctl->op;
+ cache->free_space_ctl->op = &test_free_space_ops;
+
+ ret = test_add_free_space_entry(cache, 0, sectorsize, 1);
+ if (ret) {
+ test_err("couldn't add bitmap entry");
+ return ret;
+ }
+
+ offset = BITS_PER_BITMAP * sectorsize;
+ ret = test_add_free_space_entry(cache, offset, sectorsize, 1);
+ if (ret) {
+ test_err("couldn't add bitmap_entry");
+ return ret;
+ }
+
+ /*
+ * Now set a bunch of sectorsize extents in the first entry so it's
+ * ->bytes is large.
+ */
+ for (i = 2; i < 20; i += 2) {
+ offset = sectorsize * i;
+ ret = btrfs_add_free_space(cache, offset, sectorsize);
+ if (ret) {
+ test_err("error populating sparse bitmap %d", ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Now set a contiguous extent in the second bitmap so its
+ * ->max_extent_size is larger than the first bitmaps.
+ */
+ offset = (BITS_PER_BITMAP * sectorsize) + sectorsize;
+ ret = btrfs_add_free_space(cache, offset, sectorsize);
+ if (ret) {
+ test_err("error adding contiguous extent %d", ret);
+ return ret;
+ }
+
+ /*
+ * Since we don't set ->max_extent_size unless we search everything
+ * should be indexed on bytes.
+ */
+ entry = rb_entry(rb_first_cached(&ctl->free_space_bytes),
+ struct btrfs_free_space, bytes_index);
+ if (entry->bytes != (10 * sectorsize)) {
+ test_err("error, wrong entry in the first slot in bytes_index");
+ return -EINVAL;
+ }
+
+ max_extent_size = 0;
+ offset = btrfs_find_space_for_alloc(cache, cache->start, sectorsize * 3,
+ 0, &max_extent_size);
+ if (offset != 0) {
+ test_err("found space to alloc even though we don't have enough space");
+ return -EINVAL;
+ }
+
+ if (max_extent_size != (2 * sectorsize)) {
+ test_err("got the wrong max_extent size %llu expected %llu",
+ max_extent_size, (unsigned long long)(2 * sectorsize));
+ return -EINVAL;
+ }
+
+ /*
+ * The search should have re-arranged the bytes index to use the
+ * ->max_extent_size, validate it's now what we expect it to be.
+ */
+ entry = rb_entry(rb_first_cached(&ctl->free_space_bytes),
+ struct btrfs_free_space, bytes_index);
+ if (entry->bytes != (2 * sectorsize)) {
+ test_err("error, the bytes index wasn't recalculated properly");
+ return -EINVAL;
+ }
+
+ /* Add another sectorsize to re-arrange the tree back to ->bytes. */
+ offset = (BITS_PER_BITMAP * sectorsize) - sectorsize;
+ ret = btrfs_add_free_space(cache, offset, sectorsize);
+ if (ret) {
+ test_err("error adding extent to the sparse entry %d", ret);
+ return ret;
+ }
+
+ entry = rb_entry(rb_first_cached(&ctl->free_space_bytes),
+ struct btrfs_free_space, bytes_index);
+ if (entry->bytes != (11 * sectorsize)) {
+ test_err("error, wrong entry in the first slot in bytes_index");
+ return -EINVAL;
+ }
+
+ /*
+ * Now make sure we find our correct entry after searching that will
+ * result in a re-arranging of the tree.
+ */
+ max_extent_size = 0;
+ offset = btrfs_find_space_for_alloc(cache, cache->start, sectorsize * 2,
+ 0, &max_extent_size);
+ if (offset != (BITS_PER_BITMAP * sectorsize)) {
+ test_err("error, found %llu instead of %llu for our alloc",
+ offset,
+ (unsigned long long)(BITS_PER_BITMAP * sectorsize));
+ return -EINVAL;
+ }
+
+ cache->free_space_ctl->op = orig_free_space_ops;
+ btrfs_remove_free_space_cache(cache);
+ return 0;
+}
+
+int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_block_group *cache;
+ struct btrfs_root *root = NULL;
+ int ret = -ENOMEM;
+
+ test_msg("running btrfs free space cache tests");
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ return -ENOMEM;
+ }
+
+ /*
+ * For ppc64 (with 64k page size), bytes per bitmap might be
+ * larger than 1G. To make bitmap test available in ppc64,
+ * alloc dummy block group whose size cross bitmaps.
+ */
+ cache = btrfs_alloc_dummy_block_group(fs_info,
+ BITS_PER_BITMAP * sectorsize + PAGE_SIZE);
+ if (!cache) {
+ test_std_err(TEST_ALLOC_BLOCK_GROUP);
+ btrfs_free_dummy_fs_info(fs_info);
+ return 0;
+ }
+
+ root = btrfs_alloc_dummy_root(fs_info);
+ if (IS_ERR(root)) {
+ test_std_err(TEST_ALLOC_ROOT);
+ ret = PTR_ERR(root);
+ goto out;
+ }
+
+ root->root_key.objectid = BTRFS_EXTENT_TREE_OBJECTID;
+ root->root_key.type = BTRFS_ROOT_ITEM_KEY;
+ root->root_key.offset = 0;
+ btrfs_global_root_insert(root);
+
+ ret = test_extents(cache);
+ if (ret)
+ goto out;
+ ret = test_bitmaps(cache, sectorsize);
+ if (ret)
+ goto out;
+ ret = test_bitmaps_and_extents(cache, sectorsize);
+ if (ret)
+ goto out;
+
+ ret = test_steal_space_from_bitmap_to_extent(cache, sectorsize);
+ if (ret)
+ goto out;
+ ret = test_bytes_index(cache, sectorsize);
+out:
+ btrfs_free_dummy_block_group(cache);
+ btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+}
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
new file mode 100644
index 0000000000..b61972046f
--- /dev/null
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -0,0 +1,589 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015 Facebook. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include "btrfs-tests.h"
+#include "../ctree.h"
+#include "../disk-io.h"
+#include "../free-space-tree.h"
+#include "../transaction.h"
+#include "../block-group.h"
+#include "../accessors.h"
+
+struct free_space_extent {
+ u64 start;
+ u64 length;
+};
+
+static int __check_free_space_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group *cache,
+ struct btrfs_path *path,
+ const struct free_space_extent * const extents,
+ unsigned int num_extents)
+{
+ struct btrfs_free_space_info *info;
+ struct btrfs_key key;
+ int prev_bit = 0, bit;
+ u64 extent_start = 0, offset, end;
+ u32 flags, extent_count;
+ unsigned int i;
+ int ret;
+
+ info = search_free_space_info(trans, cache, path, 0);
+ if (IS_ERR(info)) {
+ test_err("could not find free space info");
+ ret = PTR_ERR(info);
+ goto out;
+ }
+ flags = btrfs_free_space_flags(path->nodes[0], info);
+ extent_count = btrfs_free_space_extent_count(path->nodes[0], info);
+
+ if (extent_count != num_extents) {
+ test_err("extent count is wrong");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
+ if (path->slots[0] != 0)
+ goto invalid;
+ end = cache->start + cache->length;
+ i = 0;
+ while (++path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ if (key.type != BTRFS_FREE_SPACE_BITMAP_KEY)
+ goto invalid;
+ offset = key.objectid;
+ while (offset < key.objectid + key.offset) {
+ bit = free_space_test_bit(cache, path, offset);
+ if (prev_bit == 0 && bit == 1) {
+ extent_start = offset;
+ } else if (prev_bit == 1 && bit == 0) {
+ if (i >= num_extents ||
+ extent_start != extents[i].start ||
+ offset - extent_start != extents[i].length)
+ goto invalid;
+ i++;
+ }
+ prev_bit = bit;
+ offset += fs_info->sectorsize;
+ }
+ }
+ if (prev_bit == 1) {
+ if (i >= num_extents ||
+ extent_start != extents[i].start ||
+ end - extent_start != extents[i].length)
+ goto invalid;
+ i++;
+ }
+ if (i != num_extents)
+ goto invalid;
+ } else {
+ if (btrfs_header_nritems(path->nodes[0]) != num_extents + 1 ||
+ path->slots[0] != 0)
+ goto invalid;
+ for (i = 0; i < num_extents; i++) {
+ path->slots[0]++;
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY ||
+ key.objectid != extents[i].start ||
+ key.offset != extents[i].length)
+ goto invalid;
+ }
+ }
+
+ ret = 0;
+out:
+ btrfs_release_path(path);
+ return ret;
+invalid:
+ test_err("free space tree is invalid");
+ ret = -EINVAL;
+ goto out;
+}
+
+static int check_free_space_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group *cache,
+ struct btrfs_path *path,
+ const struct free_space_extent * const extents,
+ unsigned int num_extents)
+{
+ struct btrfs_free_space_info *info;
+ u32 flags;
+ int ret;
+
+ info = search_free_space_info(trans, cache, path, 0);
+ if (IS_ERR(info)) {
+ test_err("could not find free space info");
+ btrfs_release_path(path);
+ return PTR_ERR(info);
+ }
+ flags = btrfs_free_space_flags(path->nodes[0], info);
+ btrfs_release_path(path);
+
+ ret = __check_free_space_extents(trans, fs_info, cache, path, extents,
+ num_extents);
+ if (ret)
+ return ret;
+
+ /* Flip it to the other format and check that for good measure. */
+ if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
+ ret = convert_free_space_to_extents(trans, cache, path);
+ if (ret) {
+ test_err("could not convert to extents");
+ return ret;
+ }
+ } else {
+ ret = convert_free_space_to_bitmaps(trans, cache, path);
+ if (ret) {
+ test_err("could not convert to bitmaps");
+ return ret;
+ }
+ }
+ return __check_free_space_extents(trans, fs_info, cache, path, extents,
+ num_extents);
+}
+
+static int test_empty_block_group(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group *cache,
+ struct btrfs_path *path,
+ u32 alignment)
+{
+ const struct free_space_extent extents[] = {
+ {cache->start, cache->length},
+ };
+
+ return check_free_space_extents(trans, fs_info, cache, path,
+ extents, ARRAY_SIZE(extents));
+}
+
+static int test_remove_all(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group *cache,
+ struct btrfs_path *path,
+ u32 alignment)
+{
+ const struct free_space_extent extents[] = {};
+ int ret;
+
+ ret = __remove_from_free_space_tree(trans, cache, path,
+ cache->start,
+ cache->length);
+ if (ret) {
+ test_err("could not remove free space");
+ return ret;
+ }
+
+ return check_free_space_extents(trans, fs_info, cache, path,
+ extents, ARRAY_SIZE(extents));
+}
+
+static int test_remove_beginning(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group *cache,
+ struct btrfs_path *path,
+ u32 alignment)
+{
+ const struct free_space_extent extents[] = {
+ {cache->start + alignment, cache->length - alignment},
+ };
+ int ret;
+
+ ret = __remove_from_free_space_tree(trans, cache, path,
+ cache->start, alignment);
+ if (ret) {
+ test_err("could not remove free space");
+ return ret;
+ }
+
+ return check_free_space_extents(trans, fs_info, cache, path,
+ extents, ARRAY_SIZE(extents));
+
+}
+
+static int test_remove_end(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group *cache,
+ struct btrfs_path *path,
+ u32 alignment)
+{
+ const struct free_space_extent extents[] = {
+ {cache->start, cache->length - alignment},
+ };
+ int ret;
+
+ ret = __remove_from_free_space_tree(trans, cache, path,
+ cache->start + cache->length - alignment,
+ alignment);
+ if (ret) {
+ test_err("could not remove free space");
+ return ret;
+ }
+
+ return check_free_space_extents(trans, fs_info, cache, path,
+ extents, ARRAY_SIZE(extents));
+}
+
+static int test_remove_middle(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group *cache,
+ struct btrfs_path *path,
+ u32 alignment)
+{
+ const struct free_space_extent extents[] = {
+ {cache->start, alignment},
+ {cache->start + 2 * alignment, cache->length - 2 * alignment},
+ };
+ int ret;
+
+ ret = __remove_from_free_space_tree(trans, cache, path,
+ cache->start + alignment,
+ alignment);
+ if (ret) {
+ test_err("could not remove free space");
+ return ret;
+ }
+
+ return check_free_space_extents(trans, fs_info, cache, path,
+ extents, ARRAY_SIZE(extents));
+}
+
+static int test_merge_left(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group *cache,
+ struct btrfs_path *path,
+ u32 alignment)
+{
+ const struct free_space_extent extents[] = {
+ {cache->start, 2 * alignment},
+ };
+ int ret;
+
+ ret = __remove_from_free_space_tree(trans, cache, path,
+ cache->start, cache->length);
+ if (ret) {
+ test_err("could not remove free space");
+ return ret;
+ }
+
+ ret = __add_to_free_space_tree(trans, cache, path, cache->start,
+ alignment);
+ if (ret) {
+ test_err("could not add free space");
+ return ret;
+ }
+
+ ret = __add_to_free_space_tree(trans, cache, path,
+ cache->start + alignment,
+ alignment);
+ if (ret) {
+ test_err("could not add free space");
+ return ret;
+ }
+
+ return check_free_space_extents(trans, fs_info, cache, path,
+ extents, ARRAY_SIZE(extents));
+}
+
+static int test_merge_right(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group *cache,
+ struct btrfs_path *path,
+ u32 alignment)
+{
+ const struct free_space_extent extents[] = {
+ {cache->start + alignment, 2 * alignment},
+ };
+ int ret;
+
+ ret = __remove_from_free_space_tree(trans, cache, path,
+ cache->start, cache->length);
+ if (ret) {
+ test_err("could not remove free space");
+ return ret;
+ }
+
+ ret = __add_to_free_space_tree(trans, cache, path,
+ cache->start + 2 * alignment,
+ alignment);
+ if (ret) {
+ test_err("could not add free space");
+ return ret;
+ }
+
+ ret = __add_to_free_space_tree(trans, cache, path,
+ cache->start + alignment,
+ alignment);
+ if (ret) {
+ test_err("could not add free space");
+ return ret;
+ }
+
+ return check_free_space_extents(trans, fs_info, cache, path,
+ extents, ARRAY_SIZE(extents));
+}
+
+static int test_merge_both(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group *cache,
+ struct btrfs_path *path,
+ u32 alignment)
+{
+ const struct free_space_extent extents[] = {
+ {cache->start, 3 * alignment},
+ };
+ int ret;
+
+ ret = __remove_from_free_space_tree(trans, cache, path,
+ cache->start, cache->length);
+ if (ret) {
+ test_err("could not remove free space");
+ return ret;
+ }
+
+ ret = __add_to_free_space_tree(trans, cache, path, cache->start,
+ alignment);
+ if (ret) {
+ test_err("could not add free space");
+ return ret;
+ }
+
+ ret = __add_to_free_space_tree(trans, cache, path,
+ cache->start + 2 * alignment, alignment);
+ if (ret) {
+ test_err("could not add free space");
+ return ret;
+ }
+
+ ret = __add_to_free_space_tree(trans, cache, path,
+ cache->start + alignment, alignment);
+ if (ret) {
+ test_err("could not add free space");
+ return ret;
+ }
+
+ return check_free_space_extents(trans, fs_info, cache, path,
+ extents, ARRAY_SIZE(extents));
+}
+
+static int test_merge_none(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group *cache,
+ struct btrfs_path *path,
+ u32 alignment)
+{
+ const struct free_space_extent extents[] = {
+ {cache->start, alignment},
+ {cache->start + 2 * alignment, alignment},
+ {cache->start + 4 * alignment, alignment},
+ };
+ int ret;
+
+ ret = __remove_from_free_space_tree(trans, cache, path,
+ cache->start, cache->length);
+ if (ret) {
+ test_err("could not remove free space");
+ return ret;
+ }
+
+ ret = __add_to_free_space_tree(trans, cache, path, cache->start,
+ alignment);
+ if (ret) {
+ test_err("could not add free space");
+ return ret;
+ }
+
+ ret = __add_to_free_space_tree(trans, cache, path,
+ cache->start + 4 * alignment, alignment);
+ if (ret) {
+ test_err("could not add free space");
+ return ret;
+ }
+
+ ret = __add_to_free_space_tree(trans, cache, path,
+ cache->start + 2 * alignment, alignment);
+ if (ret) {
+ test_err("could not add free space");
+ return ret;
+ }
+
+ return check_free_space_extents(trans, fs_info, cache, path,
+ extents, ARRAY_SIZE(extents));
+}
+
+typedef int (*test_func_t)(struct btrfs_trans_handle *,
+ struct btrfs_fs_info *,
+ struct btrfs_block_group *,
+ struct btrfs_path *,
+ u32 alignment);
+
+static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
+ u32 nodesize, u32 alignment)
+{
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_root *root = NULL;
+ struct btrfs_block_group *cache = NULL;
+ struct btrfs_trans_handle trans;
+ struct btrfs_path *path = NULL;
+ int ret;
+
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ root = btrfs_alloc_dummy_root(fs_info);
+ if (IS_ERR(root)) {
+ test_std_err(TEST_ALLOC_ROOT);
+ ret = PTR_ERR(root);
+ goto out;
+ }
+
+ btrfs_set_super_compat_ro_flags(root->fs_info->super_copy,
+ BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE);
+ root->root_key.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
+ root->root_key.type = BTRFS_ROOT_ITEM_KEY;
+ root->root_key.offset = 0;
+ btrfs_global_root_insert(root);
+ root->fs_info->tree_root = root;
+
+ root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
+ if (IS_ERR(root->node)) {
+ test_std_err(TEST_ALLOC_EXTENT_BUFFER);
+ ret = PTR_ERR(root->node);
+ goto out;
+ }
+ btrfs_set_header_level(root->node, 0);
+ btrfs_set_header_nritems(root->node, 0);
+ root->alloc_bytenr += 2 * nodesize;
+
+ cache = btrfs_alloc_dummy_block_group(fs_info, 8 * alignment);
+ if (!cache) {
+ test_std_err(TEST_ALLOC_BLOCK_GROUP);
+ ret = -ENOMEM;
+ goto out;
+ }
+ cache->bitmap_low_thresh = 0;
+ cache->bitmap_high_thresh = (u32)-1;
+ set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags);
+ cache->fs_info = root->fs_info;
+
+ btrfs_init_dummy_trans(&trans, root->fs_info);
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ test_std_err(TEST_ALLOC_ROOT);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = add_block_group_free_space(&trans, cache);
+ if (ret) {
+ test_err("could not add block group free space");
+ goto out;
+ }
+
+ if (bitmaps) {
+ ret = convert_free_space_to_bitmaps(&trans, cache, path);
+ if (ret) {
+ test_err("could not convert block group to bitmaps");
+ goto out;
+ }
+ }
+
+ ret = test_func(&trans, root->fs_info, cache, path, alignment);
+ if (ret)
+ goto out;
+
+ ret = remove_block_group_free_space(&trans, cache);
+ if (ret) {
+ test_err("could not remove block group free space");
+ goto out;
+ }
+
+ if (btrfs_header_nritems(root->node) != 0) {
+ test_err("free space tree has leftover items");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ btrfs_free_path(path);
+ btrfs_free_dummy_block_group(cache);
+ btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+}
+
+static int run_test_both_formats(test_func_t test_func, u32 sectorsize,
+ u32 nodesize, u32 alignment)
+{
+ int test_ret = 0;
+ int ret;
+
+ ret = run_test(test_func, 0, sectorsize, nodesize, alignment);
+ if (ret) {
+ test_err(
+ "%ps failed with extents, sectorsize=%u, nodesize=%u, alignment=%u",
+ test_func, sectorsize, nodesize, alignment);
+ test_ret = ret;
+ }
+
+ ret = run_test(test_func, 1, sectorsize, nodesize, alignment);
+ if (ret) {
+ test_err(
+ "%ps failed with bitmaps, sectorsize=%u, nodesize=%u, alignment=%u",
+ test_func, sectorsize, nodesize, alignment);
+ test_ret = ret;
+ }
+
+ return test_ret;
+}
+
+int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
+{
+ test_func_t tests[] = {
+ test_empty_block_group,
+ test_remove_all,
+ test_remove_beginning,
+ test_remove_end,
+ test_remove_middle,
+ test_merge_left,
+ test_merge_right,
+ test_merge_both,
+ test_merge_none,
+ };
+ u32 bitmap_alignment;
+ int test_ret = 0;
+ int i;
+
+ /*
+ * Align some operations to a page to flush out bugs in the extent
+ * buffer bitmap handling of highmem.
+ */
+ bitmap_alignment = BTRFS_FREE_SPACE_BITMAP_BITS * PAGE_SIZE;
+
+ test_msg("running free space tree tests");
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ int ret;
+
+ ret = run_test_both_formats(tests[i], sectorsize, nodesize,
+ sectorsize);
+ if (ret)
+ test_ret = ret;
+
+ ret = run_test_both_formats(tests[i], sectorsize, nodesize,
+ bitmap_alignment);
+ if (ret)
+ test_ret = ret;
+ }
+
+ return test_ret;
+}
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
new file mode 100644
index 0000000000..492d69d2fa
--- /dev/null
+++ b/fs/btrfs/tests/inode-tests.c
@@ -0,0 +1,1108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 Fusion IO. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include "btrfs-tests.h"
+#include "../ctree.h"
+#include "../btrfs_inode.h"
+#include "../disk-io.h"
+#include "../extent_io.h"
+#include "../volumes.h"
+#include "../compression.h"
+#include "../accessors.h"
+
+static void insert_extent(struct btrfs_root *root, u64 start, u64 len,
+ u64 ram_bytes, u64 offset, u64 disk_bytenr,
+ u64 disk_len, u32 type, u8 compression, int slot)
+{
+ struct btrfs_path path;
+ struct btrfs_file_extent_item *fi;
+ struct extent_buffer *leaf = root->node;
+ struct btrfs_key key;
+ u32 value_len = sizeof(struct btrfs_file_extent_item);
+
+ if (type == BTRFS_FILE_EXTENT_INLINE)
+ value_len += len;
+ memset(&path, 0, sizeof(path));
+
+ path.nodes[0] = leaf;
+ path.slots[0] = slot;
+
+ key.objectid = BTRFS_FIRST_FREE_OBJECTID;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = start;
+
+ /*
+ * Passing a NULL trans handle is fine here, we have a dummy root eb
+ * and the tree is a single node (level 0).
+ */
+ btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
+ fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+ btrfs_set_file_extent_generation(leaf, fi, 1);
+ btrfs_set_file_extent_type(leaf, fi, type);
+ btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
+ btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_len);
+ btrfs_set_file_extent_offset(leaf, fi, offset);
+ btrfs_set_file_extent_num_bytes(leaf, fi, len);
+ btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
+ btrfs_set_file_extent_compression(leaf, fi, compression);
+ btrfs_set_file_extent_encryption(leaf, fi, 0);
+ btrfs_set_file_extent_other_encoding(leaf, fi, 0);
+}
+
+static void insert_inode_item_key(struct btrfs_root *root)
+{
+ struct btrfs_path path;
+ struct extent_buffer *leaf = root->node;
+ struct btrfs_key key;
+ u32 value_len = 0;
+
+ memset(&path, 0, sizeof(path));
+
+ path.nodes[0] = leaf;
+ path.slots[0] = 0;
+
+ key.objectid = BTRFS_INODE_ITEM_KEY;
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+
+ /*
+ * Passing a NULL trans handle is fine here, we have a dummy root eb
+ * and the tree is a single node (level 0).
+ */
+ btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
+}
+
+/*
+ * Build the most complicated map of extents the earth has ever seen. We want
+ * this so we can test all of the corner cases of btrfs_get_extent. Here is a
+ * diagram of how the extents will look though this may not be possible we still
+ * want to make sure everything acts normally (the last number is not inclusive)
+ *
+ * [0 - 6][ 6 - 4096 ][ 4096 - 4100][4100 - 8195][8195 - 12291]
+ * [inline][hole but no extent][ hole ][ regular ][regular1 split]
+ *
+ * [12291 - 16387][16387 - 24579][24579 - 28675][ 28675 - 32771][32771 - 36867 ]
+ * [ hole ][regular1 split][ prealloc ][ prealloc1 ][prealloc1 written]
+ *
+ * [36867 - 45059][45059 - 53251][53251 - 57347][57347 - 61443][61443- 69635]
+ * [ prealloc1 ][ compressed ][ compressed1 ][ regular ][ compressed1]
+ *
+ * [69635-73731][ 73731 - 86019 ][86019-90115]
+ * [ regular ][ hole but no extent][ regular ]
+ */
+static void setup_file_extents(struct btrfs_root *root, u32 sectorsize)
+{
+ int slot = 0;
+ u64 disk_bytenr = SZ_1M;
+ u64 offset = 0;
+
+ /*
+ * Tree-checker has strict limits on inline extents that they can only
+ * exist at file offset 0, thus we can only have one inline file extent
+ * at most.
+ */
+ insert_extent(root, offset, 6, 6, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0,
+ slot);
+ slot++;
+ offset = sectorsize;
+
+ /* Now another hole */
+ insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0,
+ slot);
+ slot++;
+ offset += 4;
+
+ /* Now for a regular extent */
+ insert_extent(root, offset, sectorsize - 1, sectorsize - 1, 0,
+ disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
+ slot++;
+ disk_bytenr += sectorsize;
+ offset += sectorsize - 1;
+
+ /*
+ * Now for 3 extents that were split from a hole punch so we test
+ * offsets properly.
+ */
+ insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+ 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
+ slot++;
+ offset += sectorsize;
+ insert_extent(root, offset, sectorsize, sectorsize, 0, 0, 0,
+ BTRFS_FILE_EXTENT_REG, 0, slot);
+ slot++;
+ offset += sectorsize;
+ insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+ 2 * sectorsize, disk_bytenr, 4 * sectorsize,
+ BTRFS_FILE_EXTENT_REG, 0, slot);
+ slot++;
+ offset += 2 * sectorsize;
+ disk_bytenr += 4 * sectorsize;
+
+ /* Now for a unwritten prealloc extent */
+ insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+ sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
+ slot++;
+ offset += sectorsize;
+
+ /*
+ * We want to jack up disk_bytenr a little more so the em stuff doesn't
+ * merge our records.
+ */
+ disk_bytenr += 2 * sectorsize;
+
+ /*
+ * Now for a partially written prealloc extent, basically the same as
+ * the hole punch example above. Ram_bytes never changes when you mark
+ * extents written btw.
+ */
+ insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+ 4 * sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
+ slot++;
+ offset += sectorsize;
+ insert_extent(root, offset, sectorsize, 4 * sectorsize, sectorsize,
+ disk_bytenr, 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0,
+ slot);
+ slot++;
+ offset += sectorsize;
+ insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+ 2 * sectorsize, disk_bytenr, 4 * sectorsize,
+ BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
+ slot++;
+ offset += 2 * sectorsize;
+ disk_bytenr += 4 * sectorsize;
+
+ /* Now a normal compressed extent */
+ insert_extent(root, offset, 2 * sectorsize, 2 * sectorsize, 0,
+ disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG,
+ BTRFS_COMPRESS_ZLIB, slot);
+ slot++;
+ offset += 2 * sectorsize;
+ /* No merges */
+ disk_bytenr += 2 * sectorsize;
+
+ /* Now a split compressed extent */
+ insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+ sectorsize, BTRFS_FILE_EXTENT_REG,
+ BTRFS_COMPRESS_ZLIB, slot);
+ slot++;
+ offset += sectorsize;
+ insert_extent(root, offset, sectorsize, sectorsize, 0,
+ disk_bytenr + sectorsize, sectorsize,
+ BTRFS_FILE_EXTENT_REG, 0, slot);
+ slot++;
+ offset += sectorsize;
+ insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+ 2 * sectorsize, disk_bytenr, sectorsize,
+ BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
+ slot++;
+ offset += 2 * sectorsize;
+ disk_bytenr += 2 * sectorsize;
+
+ /* Now extents that have a hole but no hole extent */
+ insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+ sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
+ slot++;
+ offset += 4 * sectorsize;
+ disk_bytenr += sectorsize;
+ insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+ sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
+}
+
+static unsigned long prealloc_only = 0;
+static unsigned long compressed_only = 0;
+static unsigned long vacancy_only = 0;
+
+static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_fs_info *fs_info = NULL;
+ struct inode *inode = NULL;
+ struct btrfs_root *root = NULL;
+ struct extent_map *em = NULL;
+ u64 orig_start;
+ u64 disk_bytenr;
+ u64 offset;
+ int ret = -ENOMEM;
+
+ test_msg("running btrfs_get_extent tests");
+
+ inode = btrfs_new_test_inode();
+ if (!inode) {
+ test_std_err(TEST_ALLOC_INODE);
+ return ret;
+ }
+
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ goto out;
+ }
+
+ root = btrfs_alloc_dummy_root(fs_info);
+ if (IS_ERR(root)) {
+ test_std_err(TEST_ALLOC_ROOT);
+ goto out;
+ }
+
+ root->node = alloc_dummy_extent_buffer(fs_info, nodesize);
+ if (!root->node) {
+ test_std_err(TEST_ALLOC_ROOT);
+ goto out;
+ }
+
+ btrfs_set_header_nritems(root->node, 0);
+ btrfs_set_header_level(root->node, 0);
+ ret = -EINVAL;
+
+ /* First with no extents */
+ BTRFS_I(inode)->root = root;
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, sectorsize);
+ if (IS_ERR(em)) {
+ em = NULL;
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start != EXTENT_MAP_HOLE) {
+ test_err("expected a hole, got %llu", em->block_start);
+ goto out;
+ }
+ free_extent_map(em);
+ btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
+
+ /*
+ * All of the magic numbers are based on the mapping setup in
+ * setup_file_extents, so if you change anything there you need to
+ * update the comment and update the expected values below.
+ */
+ setup_file_extents(root, sectorsize);
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, (u64)-1);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start != EXTENT_MAP_INLINE) {
+ test_err("expected an inline, got %llu", em->block_start);
+ goto out;
+ }
+
+ /*
+ * For inline extent, we always round up the em to sectorsize, as
+ * they are either:
+ *
+ * a) a hidden hole
+ * The range will be zeroed at inline extent read time.
+ *
+ * b) a file extent with unaligned bytenr
+ * Tree checker will reject it.
+ */
+ if (em->start != 0 || em->len != sectorsize) {
+ test_err(
+ "unexpected extent wanted start 0 len %u, got start %llu len %llu",
+ sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != 0) {
+ test_err("unexpected flags set, want 0 have %lu", em->flags);
+ goto out;
+ }
+ /*
+ * We don't test anything else for inline since it doesn't get set
+ * unless we have a page for it to write into. Maybe we should change
+ * this?
+ */
+ offset = em->start + em->len;
+ free_extent_map(em);
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start != EXTENT_MAP_HOLE) {
+ test_err("expected a hole, got %llu", em->block_start);
+ goto out;
+ }
+ if (em->start != offset || em->len != 4) {
+ test_err(
+ "unexpected extent wanted start %llu len 4, got start %llu len %llu",
+ offset, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != 0) {
+ test_err("unexpected flags set, want 0 have %lu", em->flags);
+ goto out;
+ }
+ offset = em->start + em->len;
+ free_extent_map(em);
+
+ /* Regular extent */
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->block_start);
+ goto out;
+ }
+ if (em->start != offset || em->len != sectorsize - 1) {
+ test_err(
+ "unexpected extent wanted start %llu len 4095, got start %llu len %llu",
+ offset, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != 0) {
+ test_err("unexpected flags set, want 0 have %lu", em->flags);
+ goto out;
+ }
+ if (em->orig_start != em->start) {
+ test_err("wrong orig offset, want %llu, have %llu", em->start,
+ em->orig_start);
+ goto out;
+ }
+ offset = em->start + em->len;
+ free_extent_map(em);
+
+ /* The next 3 are split extents */
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->block_start);
+ goto out;
+ }
+ if (em->start != offset || em->len != sectorsize) {
+ test_err(
+ "unexpected extent start %llu len %u, got start %llu len %llu",
+ offset, sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != 0) {
+ test_err("unexpected flags set, want 0 have %lu", em->flags);
+ goto out;
+ }
+ if (em->orig_start != em->start) {
+ test_err("wrong orig offset, want %llu, have %llu", em->start,
+ em->orig_start);
+ goto out;
+ }
+ disk_bytenr = em->block_start;
+ orig_start = em->start;
+ offset = em->start + em->len;
+ free_extent_map(em);
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start != EXTENT_MAP_HOLE) {
+ test_err("expected a hole, got %llu", em->block_start);
+ goto out;
+ }
+ if (em->start != offset || em->len != sectorsize) {
+ test_err(
+ "unexpected extent wanted start %llu len %u, got start %llu len %llu",
+ offset, sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != 0) {
+ test_err("unexpected flags set, want 0 have %lu", em->flags);
+ goto out;
+ }
+ offset = em->start + em->len;
+ free_extent_map(em);
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->block_start);
+ goto out;
+ }
+ if (em->start != offset || em->len != 2 * sectorsize) {
+ test_err(
+ "unexpected extent wanted start %llu len %u, got start %llu len %llu",
+ offset, 2 * sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != 0) {
+ test_err("unexpected flags set, want 0 have %lu", em->flags);
+ goto out;
+ }
+ if (em->orig_start != orig_start) {
+ test_err("wrong orig offset, want %llu, have %llu",
+ orig_start, em->orig_start);
+ goto out;
+ }
+ disk_bytenr += (em->start - orig_start);
+ if (em->block_start != disk_bytenr) {
+ test_err("wrong block start, want %llu, have %llu",
+ disk_bytenr, em->block_start);
+ goto out;
+ }
+ offset = em->start + em->len;
+ free_extent_map(em);
+
+ /* Prealloc extent */
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->block_start);
+ goto out;
+ }
+ if (em->start != offset || em->len != sectorsize) {
+ test_err(
+ "unexpected extent wanted start %llu len %u, got start %llu len %llu",
+ offset, sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != prealloc_only) {
+ test_err("unexpected flags set, want %lu have %lu",
+ prealloc_only, em->flags);
+ goto out;
+ }
+ if (em->orig_start != em->start) {
+ test_err("wrong orig offset, want %llu, have %llu", em->start,
+ em->orig_start);
+ goto out;
+ }
+ offset = em->start + em->len;
+ free_extent_map(em);
+
+ /* The next 3 are a half written prealloc extent */
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->block_start);
+ goto out;
+ }
+ if (em->start != offset || em->len != sectorsize) {
+ test_err(
+ "unexpected extent wanted start %llu len %u, got start %llu len %llu",
+ offset, sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != prealloc_only) {
+ test_err("unexpected flags set, want %lu have %lu",
+ prealloc_only, em->flags);
+ goto out;
+ }
+ if (em->orig_start != em->start) {
+ test_err("wrong orig offset, want %llu, have %llu", em->start,
+ em->orig_start);
+ goto out;
+ }
+ disk_bytenr = em->block_start;
+ orig_start = em->start;
+ offset = em->start + em->len;
+ free_extent_map(em);
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start >= EXTENT_MAP_HOLE) {
+ test_err("expected a real extent, got %llu", em->block_start);
+ goto out;
+ }
+ if (em->start != offset || em->len != sectorsize) {
+ test_err(
+ "unexpected extent wanted start %llu len %u, got start %llu len %llu",
+ offset, sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != 0) {
+ test_err("unexpected flags set, want 0 have %lu", em->flags);
+ goto out;
+ }
+ if (em->orig_start != orig_start) {
+ test_err("unexpected orig offset, wanted %llu, have %llu",
+ orig_start, em->orig_start);
+ goto out;
+ }
+ if (em->block_start != (disk_bytenr + (em->start - em->orig_start))) {
+ test_err("unexpected block start, wanted %llu, have %llu",
+ disk_bytenr + (em->start - em->orig_start),
+ em->block_start);
+ goto out;
+ }
+ offset = em->start + em->len;
+ free_extent_map(em);
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->block_start);
+ goto out;
+ }
+ if (em->start != offset || em->len != 2 * sectorsize) {
+ test_err(
+ "unexpected extent wanted start %llu len %u, got start %llu len %llu",
+ offset, 2 * sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != prealloc_only) {
+ test_err("unexpected flags set, want %lu have %lu",
+ prealloc_only, em->flags);
+ goto out;
+ }
+ if (em->orig_start != orig_start) {
+ test_err("wrong orig offset, want %llu, have %llu", orig_start,
+ em->orig_start);
+ goto out;
+ }
+ if (em->block_start != (disk_bytenr + (em->start - em->orig_start))) {
+ test_err("unexpected block start, wanted %llu, have %llu",
+ disk_bytenr + (em->start - em->orig_start),
+ em->block_start);
+ goto out;
+ }
+ offset = em->start + em->len;
+ free_extent_map(em);
+
+ /* Now for the compressed extent */
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->block_start);
+ goto out;
+ }
+ if (em->start != offset || em->len != 2 * sectorsize) {
+ test_err(
+ "unexpected extent wanted start %llu len %u, got start %llu len %llu",
+ offset, 2 * sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != compressed_only) {
+ test_err("unexpected flags set, want %lu have %lu",
+ compressed_only, em->flags);
+ goto out;
+ }
+ if (em->orig_start != em->start) {
+ test_err("wrong orig offset, want %llu, have %llu",
+ em->start, em->orig_start);
+ goto out;
+ }
+ if (em->compress_type != BTRFS_COMPRESS_ZLIB) {
+ test_err("unexpected compress type, wanted %d, got %d",
+ BTRFS_COMPRESS_ZLIB, em->compress_type);
+ goto out;
+ }
+ offset = em->start + em->len;
+ free_extent_map(em);
+
+ /* Split compressed extent */
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->block_start);
+ goto out;
+ }
+ if (em->start != offset || em->len != sectorsize) {
+ test_err(
+ "unexpected extent wanted start %llu len %u, got start %llu len %llu",
+ offset, sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != compressed_only) {
+ test_err("unexpected flags set, want %lu have %lu",
+ compressed_only, em->flags);
+ goto out;
+ }
+ if (em->orig_start != em->start) {
+ test_err("wrong orig offset, want %llu, have %llu",
+ em->start, em->orig_start);
+ goto out;
+ }
+ if (em->compress_type != BTRFS_COMPRESS_ZLIB) {
+ test_err("unexpected compress type, wanted %d, got %d",
+ BTRFS_COMPRESS_ZLIB, em->compress_type);
+ goto out;
+ }
+ disk_bytenr = em->block_start;
+ orig_start = em->start;
+ offset = em->start + em->len;
+ free_extent_map(em);
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->block_start);
+ goto out;
+ }
+ if (em->start != offset || em->len != sectorsize) {
+ test_err(
+ "unexpected extent wanted start %llu len %u, got start %llu len %llu",
+ offset, sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != 0) {
+ test_err("unexpected flags set, want 0 have %lu", em->flags);
+ goto out;
+ }
+ if (em->orig_start != em->start) {
+ test_err("wrong orig offset, want %llu, have %llu", em->start,
+ em->orig_start);
+ goto out;
+ }
+ offset = em->start + em->len;
+ free_extent_map(em);
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start != disk_bytenr) {
+ test_err("block start does not match, want %llu got %llu",
+ disk_bytenr, em->block_start);
+ goto out;
+ }
+ if (em->start != offset || em->len != 2 * sectorsize) {
+ test_err(
+ "unexpected extent wanted start %llu len %u, got start %llu len %llu",
+ offset, 2 * sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != compressed_only) {
+ test_err("unexpected flags set, want %lu have %lu",
+ compressed_only, em->flags);
+ goto out;
+ }
+ if (em->orig_start != orig_start) {
+ test_err("wrong orig offset, want %llu, have %llu",
+ em->start, orig_start);
+ goto out;
+ }
+ if (em->compress_type != BTRFS_COMPRESS_ZLIB) {
+ test_err("unexpected compress type, wanted %d, got %d",
+ BTRFS_COMPRESS_ZLIB, em->compress_type);
+ goto out;
+ }
+ offset = em->start + em->len;
+ free_extent_map(em);
+
+ /* A hole between regular extents but no hole extent */
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset + 6, sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->block_start);
+ goto out;
+ }
+ if (em->start != offset || em->len != sectorsize) {
+ test_err(
+ "unexpected extent wanted start %llu len %u, got start %llu len %llu",
+ offset, sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != 0) {
+ test_err("unexpected flags set, want 0 have %lu", em->flags);
+ goto out;
+ }
+ if (em->orig_start != em->start) {
+ test_err("wrong orig offset, want %llu, have %llu", em->start,
+ em->orig_start);
+ goto out;
+ }
+ offset = em->start + em->len;
+ free_extent_map(em);
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, SZ_4M);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start != EXTENT_MAP_HOLE) {
+ test_err("expected a hole extent, got %llu", em->block_start);
+ goto out;
+ }
+ /*
+ * Currently we just return a length that we requested rather than the
+ * length of the actual hole, if this changes we'll have to change this
+ * test.
+ */
+ if (em->start != offset || em->len != 3 * sectorsize) {
+ test_err(
+ "unexpected extent wanted start %llu len %u, got start %llu len %llu",
+ offset, 3 * sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != vacancy_only) {
+ test_err("unexpected flags set, want %lu have %lu",
+ vacancy_only, em->flags);
+ goto out;
+ }
+ if (em->orig_start != em->start) {
+ test_err("wrong orig offset, want %llu, have %llu", em->start,
+ em->orig_start);
+ goto out;
+ }
+ offset = em->start + em->len;
+ free_extent_map(em);
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+ test_err("expected a real extent, got %llu", em->block_start);
+ goto out;
+ }
+ if (em->start != offset || em->len != sectorsize) {
+ test_err(
+ "unexpected extent wanted start %llu len %u, got start %llu len %llu",
+ offset, sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != 0) {
+ test_err("unexpected flags set, want 0 have %lu", em->flags);
+ goto out;
+ }
+ if (em->orig_start != em->start) {
+ test_err("wrong orig offset, want %llu, have %llu", em->start,
+ em->orig_start);
+ goto out;
+ }
+ ret = 0;
+out:
+ if (!IS_ERR(em))
+ free_extent_map(em);
+ iput(inode);
+ btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+}
+
+static int test_hole_first(u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_fs_info *fs_info = NULL;
+ struct inode *inode = NULL;
+ struct btrfs_root *root = NULL;
+ struct extent_map *em = NULL;
+ int ret = -ENOMEM;
+
+ test_msg("running hole first btrfs_get_extent test");
+
+ inode = btrfs_new_test_inode();
+ if (!inode) {
+ test_std_err(TEST_ALLOC_INODE);
+ return ret;
+ }
+
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ goto out;
+ }
+
+ root = btrfs_alloc_dummy_root(fs_info);
+ if (IS_ERR(root)) {
+ test_std_err(TEST_ALLOC_ROOT);
+ goto out;
+ }
+
+ root->node = alloc_dummy_extent_buffer(fs_info, nodesize);
+ if (!root->node) {
+ test_std_err(TEST_ALLOC_ROOT);
+ goto out;
+ }
+
+ btrfs_set_header_nritems(root->node, 0);
+ btrfs_set_header_level(root->node, 0);
+ BTRFS_I(inode)->root = root;
+ ret = -EINVAL;
+
+ /*
+ * Need a blank inode item here just so we don't confuse
+ * btrfs_get_extent.
+ */
+ insert_inode_item_key(root);
+ insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize,
+ sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, 2 * sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start != EXTENT_MAP_HOLE) {
+ test_err("expected a hole, got %llu", em->block_start);
+ goto out;
+ }
+ if (em->start != 0 || em->len != sectorsize) {
+ test_err(
+ "unexpected extent wanted start 0 len %u, got start %llu len %llu",
+ sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != vacancy_only) {
+ test_err("wrong flags, wanted %lu, have %lu", vacancy_only,
+ em->flags);
+ goto out;
+ }
+ free_extent_map(em);
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, sectorsize, 2 * sectorsize);
+ if (IS_ERR(em)) {
+ test_err("got an error when we shouldn't have");
+ goto out;
+ }
+ if (em->block_start != sectorsize) {
+ test_err("expected a real extent, got %llu", em->block_start);
+ goto out;
+ }
+ if (em->start != sectorsize || em->len != sectorsize) {
+ test_err(
+ "unexpected extent wanted start %u len %u, got start %llu len %llu",
+ sectorsize, sectorsize, em->start, em->len);
+ goto out;
+ }
+ if (em->flags != 0) {
+ test_err("unexpected flags set, wanted 0 got %lu",
+ em->flags);
+ goto out;
+ }
+ ret = 0;
+out:
+ if (!IS_ERR(em))
+ free_extent_map(em);
+ iput(inode);
+ btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+}
+
+static int test_extent_accounting(u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_fs_info *fs_info = NULL;
+ struct inode *inode = NULL;
+ struct btrfs_root *root = NULL;
+ int ret = -ENOMEM;
+
+ test_msg("running outstanding_extents tests");
+
+ inode = btrfs_new_test_inode();
+ if (!inode) {
+ test_std_err(TEST_ALLOC_INODE);
+ return ret;
+ }
+
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ goto out;
+ }
+
+ root = btrfs_alloc_dummy_root(fs_info);
+ if (IS_ERR(root)) {
+ test_std_err(TEST_ALLOC_ROOT);
+ goto out;
+ }
+
+ BTRFS_I(inode)->root = root;
+
+ /* [BTRFS_MAX_EXTENT_SIZE] */
+ ret = btrfs_set_extent_delalloc(BTRFS_I(inode), 0,
+ BTRFS_MAX_EXTENT_SIZE - 1, 0, NULL);
+ if (ret) {
+ test_err("btrfs_set_extent_delalloc returned %d", ret);
+ goto out;
+ }
+ if (BTRFS_I(inode)->outstanding_extents != 1) {
+ ret = -EINVAL;
+ test_err("miscount, wanted 1, got %u",
+ BTRFS_I(inode)->outstanding_extents);
+ goto out;
+ }
+
+ /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
+ ret = btrfs_set_extent_delalloc(BTRFS_I(inode), BTRFS_MAX_EXTENT_SIZE,
+ BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
+ 0, NULL);
+ if (ret) {
+ test_err("btrfs_set_extent_delalloc returned %d", ret);
+ goto out;
+ }
+ if (BTRFS_I(inode)->outstanding_extents != 2) {
+ ret = -EINVAL;
+ test_err("miscount, wanted 2, got %u",
+ BTRFS_I(inode)->outstanding_extents);
+ goto out;
+ }
+
+ /* [BTRFS_MAX_EXTENT_SIZE/2][sectorsize HOLE][the rest] */
+ ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
+ BTRFS_MAX_EXTENT_SIZE >> 1,
+ (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+ EXTENT_UPTODATE, NULL);
+ if (ret) {
+ test_err("clear_extent_bit returned %d", ret);
+ goto out;
+ }
+ if (BTRFS_I(inode)->outstanding_extents != 2) {
+ ret = -EINVAL;
+ test_err("miscount, wanted 2, got %u",
+ BTRFS_I(inode)->outstanding_extents);
+ goto out;
+ }
+
+ /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
+ ret = btrfs_set_extent_delalloc(BTRFS_I(inode), BTRFS_MAX_EXTENT_SIZE >> 1,
+ (BTRFS_MAX_EXTENT_SIZE >> 1)
+ + sectorsize - 1,
+ 0, NULL);
+ if (ret) {
+ test_err("btrfs_set_extent_delalloc returned %d", ret);
+ goto out;
+ }
+ if (BTRFS_I(inode)->outstanding_extents != 2) {
+ ret = -EINVAL;
+ test_err("miscount, wanted 2, got %u",
+ BTRFS_I(inode)->outstanding_extents);
+ goto out;
+ }
+
+ /*
+ * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize HOLE][BTRFS_MAX_EXTENT_SIZE+sectorsize]
+ */
+ ret = btrfs_set_extent_delalloc(BTRFS_I(inode),
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
+ (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
+ 0, NULL);
+ if (ret) {
+ test_err("btrfs_set_extent_delalloc returned %d", ret);
+ goto out;
+ }
+ if (BTRFS_I(inode)->outstanding_extents != 4) {
+ ret = -EINVAL;
+ test_err("miscount, wanted 4, got %u",
+ BTRFS_I(inode)->outstanding_extents);
+ goto out;
+ }
+
+ /*
+ * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize][BTRFS_MAX_EXTENT_SIZE+sectorsize]
+ */
+ ret = btrfs_set_extent_delalloc(BTRFS_I(inode),
+ BTRFS_MAX_EXTENT_SIZE + sectorsize,
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL);
+ if (ret) {
+ test_err("btrfs_set_extent_delalloc returned %d", ret);
+ goto out;
+ }
+ if (BTRFS_I(inode)->outstanding_extents != 3) {
+ ret = -EINVAL;
+ test_err("miscount, wanted 3, got %u",
+ BTRFS_I(inode)->outstanding_extents);
+ goto out;
+ }
+
+ /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */
+ ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
+ BTRFS_MAX_EXTENT_SIZE + sectorsize,
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+ EXTENT_UPTODATE, NULL);
+ if (ret) {
+ test_err("clear_extent_bit returned %d", ret);
+ goto out;
+ }
+ if (BTRFS_I(inode)->outstanding_extents != 4) {
+ ret = -EINVAL;
+ test_err("miscount, wanted 4, got %u",
+ BTRFS_I(inode)->outstanding_extents);
+ goto out;
+ }
+
+ /*
+ * Refill the hole again just for good measure, because I thought it
+ * might fail and I'd rather satisfy my paranoia at this point.
+ */
+ ret = btrfs_set_extent_delalloc(BTRFS_I(inode),
+ BTRFS_MAX_EXTENT_SIZE + sectorsize,
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL);
+ if (ret) {
+ test_err("btrfs_set_extent_delalloc returned %d", ret);
+ goto out;
+ }
+ if (BTRFS_I(inode)->outstanding_extents != 3) {
+ ret = -EINVAL;
+ test_err("miscount, wanted 3, got %u",
+ BTRFS_I(inode)->outstanding_extents);
+ goto out;
+ }
+
+ /* Empty */
+ ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+ EXTENT_UPTODATE, NULL);
+ if (ret) {
+ test_err("clear_extent_bit returned %d", ret);
+ goto out;
+ }
+ if (BTRFS_I(inode)->outstanding_extents) {
+ ret = -EINVAL;
+ test_err("miscount, wanted 0, got %u",
+ BTRFS_I(inode)->outstanding_extents);
+ goto out;
+ }
+ ret = 0;
+out:
+ if (ret)
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+ EXTENT_UPTODATE, NULL);
+ iput(inode);
+ btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+}
+
+int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
+{
+ int ret;
+
+ test_msg("running inode tests");
+
+ set_bit(EXTENT_FLAG_COMPRESSED, &compressed_only);
+ set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only);
+
+ ret = test_btrfs_get_extent(sectorsize, nodesize);
+ if (ret)
+ return ret;
+ ret = test_hole_first(sectorsize, nodesize);
+ if (ret)
+ return ret;
+ return test_extent_accounting(sectorsize, nodesize);
+}
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
new file mode 100644
index 0000000000..3fc8dc3fd9
--- /dev/null
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -0,0 +1,559 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 Facebook. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include "btrfs-tests.h"
+#include "../ctree.h"
+#include "../transaction.h"
+#include "../disk-io.h"
+#include "../qgroup.h"
+#include "../backref.h"
+#include "../fs.h"
+#include "../accessors.h"
+
+static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
+ u64 num_bytes, u64 parent, u64 root_objectid)
+{
+ struct btrfs_trans_handle trans;
+ struct btrfs_extent_item *item;
+ struct btrfs_extent_inline_ref *iref;
+ struct btrfs_tree_block_info *block_info;
+ struct btrfs_path *path;
+ struct extent_buffer *leaf;
+ struct btrfs_key ins;
+ u32 size = sizeof(*item) + sizeof(*iref) + sizeof(*block_info);
+ int ret;
+
+ btrfs_init_dummy_trans(&trans, NULL);
+
+ ins.objectid = bytenr;
+ ins.type = BTRFS_EXTENT_ITEM_KEY;
+ ins.offset = num_bytes;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ test_std_err(TEST_ALLOC_ROOT);
+ return -ENOMEM;
+ }
+
+ ret = btrfs_insert_empty_item(&trans, root, path, &ins, size);
+ if (ret) {
+ test_err("couldn't insert ref %d", ret);
+ btrfs_free_path(path);
+ return ret;
+ }
+
+ leaf = path->nodes[0];
+ item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
+ btrfs_set_extent_refs(leaf, item, 1);
+ btrfs_set_extent_generation(leaf, item, 1);
+ btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_TREE_BLOCK);
+ block_info = (struct btrfs_tree_block_info *)(item + 1);
+ btrfs_set_tree_block_level(leaf, block_info, 0);
+ iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
+ if (parent > 0) {
+ btrfs_set_extent_inline_ref_type(leaf, iref,
+ BTRFS_SHARED_BLOCK_REF_KEY);
+ btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
+ } else {
+ btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_TREE_BLOCK_REF_KEY);
+ btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
+ }
+ btrfs_free_path(path);
+ return 0;
+}
+
+static int add_tree_ref(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
+ u64 parent, u64 root_objectid)
+{
+ struct btrfs_trans_handle trans;
+ struct btrfs_extent_item *item;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ u64 refs;
+ int ret;
+
+ btrfs_init_dummy_trans(&trans, NULL);
+
+ key.objectid = bytenr;
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = num_bytes;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ test_std_err(TEST_ALLOC_ROOT);
+ return -ENOMEM;
+ }
+
+ ret = btrfs_search_slot(&trans, root, &key, path, 0, 1);
+ if (ret) {
+ test_err("couldn't find extent ref");
+ btrfs_free_path(path);
+ return ret;
+ }
+
+ item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_extent_item);
+ refs = btrfs_extent_refs(path->nodes[0], item);
+ btrfs_set_extent_refs(path->nodes[0], item, refs + 1);
+ btrfs_release_path(path);
+
+ key.objectid = bytenr;
+ if (parent) {
+ key.type = BTRFS_SHARED_BLOCK_REF_KEY;
+ key.offset = parent;
+ } else {
+ key.type = BTRFS_TREE_BLOCK_REF_KEY;
+ key.offset = root_objectid;
+ }
+
+ ret = btrfs_insert_empty_item(&trans, root, path, &key, 0);
+ if (ret)
+ test_err("failed to insert backref");
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int remove_extent_item(struct btrfs_root *root, u64 bytenr,
+ u64 num_bytes)
+{
+ struct btrfs_trans_handle trans;
+ struct btrfs_key key;
+ struct btrfs_path *path;
+ int ret;
+
+ btrfs_init_dummy_trans(&trans, NULL);
+
+ key.objectid = bytenr;
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = num_bytes;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ test_std_err(TEST_ALLOC_ROOT);
+ return -ENOMEM;
+ }
+
+ ret = btrfs_search_slot(&trans, root, &key, path, -1, 1);
+ if (ret) {
+ test_err("didn't find our key %d", ret);
+ btrfs_free_path(path);
+ return ret;
+ }
+ btrfs_del_item(&trans, root, path);
+ btrfs_free_path(path);
+ return 0;
+}
+
+static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
+ u64 num_bytes, u64 parent, u64 root_objectid)
+{
+ struct btrfs_trans_handle trans;
+ struct btrfs_extent_item *item;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ u64 refs;
+ int ret;
+
+ btrfs_init_dummy_trans(&trans, NULL);
+
+ key.objectid = bytenr;
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = num_bytes;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ test_std_err(TEST_ALLOC_ROOT);
+ return -ENOMEM;
+ }
+
+ ret = btrfs_search_slot(&trans, root, &key, path, 0, 1);
+ if (ret) {
+ test_err("couldn't find extent ref");
+ btrfs_free_path(path);
+ return ret;
+ }
+
+ item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_extent_item);
+ refs = btrfs_extent_refs(path->nodes[0], item);
+ btrfs_set_extent_refs(path->nodes[0], item, refs - 1);
+ btrfs_release_path(path);
+
+ key.objectid = bytenr;
+ if (parent) {
+ key.type = BTRFS_SHARED_BLOCK_REF_KEY;
+ key.offset = parent;
+ } else {
+ key.type = BTRFS_TREE_BLOCK_REF_KEY;
+ key.offset = root_objectid;
+ }
+
+ ret = btrfs_search_slot(&trans, root, &key, path, -1, 1);
+ if (ret) {
+ test_err("couldn't find backref %d", ret);
+ btrfs_free_path(path);
+ return ret;
+ }
+ btrfs_del_item(&trans, root, path);
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int test_no_shared_qgroup(struct btrfs_root *root,
+ u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_backref_walk_ctx ctx = { 0 };
+ struct btrfs_trans_handle trans;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct ulist *old_roots = NULL;
+ struct ulist *new_roots = NULL;
+ int ret;
+
+ btrfs_init_dummy_trans(&trans, fs_info);
+
+ test_msg("running qgroup add/remove tests");
+ ret = btrfs_create_qgroup(&trans, BTRFS_FS_TREE_OBJECTID);
+ if (ret) {
+ test_err("couldn't create a qgroup %d", ret);
+ return ret;
+ }
+
+ ctx.bytenr = nodesize;
+ ctx.trans = &trans;
+ ctx.fs_info = fs_info;
+
+ /*
+ * Since the test trans doesn't have the complicated delayed refs,
+ * we can only call btrfs_qgroup_account_extent() directly to test
+ * quota.
+ */
+ ret = btrfs_find_all_roots(&ctx, false);
+ if (ret) {
+ test_err("couldn't find old roots: %d", ret);
+ return ret;
+ }
+ old_roots = ctx.roots;
+ ctx.roots = NULL;
+
+ ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
+ BTRFS_FS_TREE_OBJECTID);
+ if (ret) {
+ ulist_free(old_roots);
+ return ret;
+ }
+
+ ret = btrfs_find_all_roots(&ctx, false);
+ if (ret) {
+ ulist_free(old_roots);
+ test_err("couldn't find old roots: %d", ret);
+ return ret;
+ }
+ new_roots = ctx.roots;
+ ctx.roots = NULL;
+
+ ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
+ new_roots);
+ if (ret) {
+ test_err("couldn't account space for a qgroup %d", ret);
+ return ret;
+ }
+
+ /* btrfs_qgroup_account_extent() always frees the ulists passed to it. */
+ old_roots = NULL;
+ new_roots = NULL;
+
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+ nodesize, nodesize)) {
+ test_err("qgroup counts didn't match expected values");
+ return -EINVAL;
+ }
+
+ ret = btrfs_find_all_roots(&ctx, false);
+ if (ret) {
+ test_err("couldn't find old roots: %d", ret);
+ return ret;
+ }
+ old_roots = ctx.roots;
+ ctx.roots = NULL;
+
+ ret = remove_extent_item(root, nodesize, nodesize);
+ if (ret) {
+ ulist_free(old_roots);
+ return -EINVAL;
+ }
+
+ ret = btrfs_find_all_roots(&ctx, false);
+ if (ret) {
+ ulist_free(old_roots);
+ test_err("couldn't find old roots: %d", ret);
+ return ret;
+ }
+ new_roots = ctx.roots;
+ ctx.roots = NULL;
+
+ ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
+ new_roots);
+ if (ret) {
+ test_err("couldn't account space for a qgroup %d", ret);
+ return -EINVAL;
+ }
+
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, 0, 0)) {
+ test_err("qgroup counts didn't match expected values");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Add a ref for two different roots to make sure the shared value comes out
+ * right, also remove one of the roots and make sure the exclusive count is
+ * adjusted properly.
+ */
+static int test_multiple_refs(struct btrfs_root *root,
+ u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_backref_walk_ctx ctx = { 0 };
+ struct btrfs_trans_handle trans;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct ulist *old_roots = NULL;
+ struct ulist *new_roots = NULL;
+ int ret;
+
+ btrfs_init_dummy_trans(&trans, fs_info);
+
+ test_msg("running qgroup multiple refs test");
+
+ /*
+ * We have BTRFS_FS_TREE_OBJECTID created already from the
+ * previous test.
+ */
+ ret = btrfs_create_qgroup(&trans, BTRFS_FIRST_FREE_OBJECTID);
+ if (ret) {
+ test_err("couldn't create a qgroup %d", ret);
+ return ret;
+ }
+
+ ctx.bytenr = nodesize;
+ ctx.trans = &trans;
+ ctx.fs_info = fs_info;
+
+ ret = btrfs_find_all_roots(&ctx, false);
+ if (ret) {
+ test_err("couldn't find old roots: %d", ret);
+ return ret;
+ }
+ old_roots = ctx.roots;
+ ctx.roots = NULL;
+
+ ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
+ BTRFS_FS_TREE_OBJECTID);
+ if (ret) {
+ ulist_free(old_roots);
+ return ret;
+ }
+
+ ret = btrfs_find_all_roots(&ctx, false);
+ if (ret) {
+ ulist_free(old_roots);
+ test_err("couldn't find old roots: %d", ret);
+ return ret;
+ }
+ new_roots = ctx.roots;
+ ctx.roots = NULL;
+
+ ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
+ new_roots);
+ if (ret) {
+ test_err("couldn't account space for a qgroup %d", ret);
+ return ret;
+ }
+
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+ nodesize, nodesize)) {
+ test_err("qgroup counts didn't match expected values");
+ return -EINVAL;
+ }
+
+ ret = btrfs_find_all_roots(&ctx, false);
+ if (ret) {
+ test_err("couldn't find old roots: %d", ret);
+ return ret;
+ }
+ old_roots = ctx.roots;
+ ctx.roots = NULL;
+
+ ret = add_tree_ref(root, nodesize, nodesize, 0,
+ BTRFS_FIRST_FREE_OBJECTID);
+ if (ret) {
+ ulist_free(old_roots);
+ return ret;
+ }
+
+ ret = btrfs_find_all_roots(&ctx, false);
+ if (ret) {
+ ulist_free(old_roots);
+ test_err("couldn't find old roots: %d", ret);
+ return ret;
+ }
+ new_roots = ctx.roots;
+ ctx.roots = NULL;
+
+ ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
+ new_roots);
+ if (ret) {
+ test_err("couldn't account space for a qgroup %d", ret);
+ return ret;
+ }
+
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+ nodesize, 0)) {
+ test_err("qgroup counts didn't match expected values");
+ return -EINVAL;
+ }
+
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID,
+ nodesize, 0)) {
+ test_err("qgroup counts didn't match expected values");
+ return -EINVAL;
+ }
+
+ ret = btrfs_find_all_roots(&ctx, false);
+ if (ret) {
+ test_err("couldn't find old roots: %d", ret);
+ return ret;
+ }
+ old_roots = ctx.roots;
+ ctx.roots = NULL;
+
+ ret = remove_extent_ref(root, nodesize, nodesize, 0,
+ BTRFS_FIRST_FREE_OBJECTID);
+ if (ret) {
+ ulist_free(old_roots);
+ return ret;
+ }
+
+ ret = btrfs_find_all_roots(&ctx, false);
+ if (ret) {
+ ulist_free(old_roots);
+ test_err("couldn't find old roots: %d", ret);
+ return ret;
+ }
+ new_roots = ctx.roots;
+ ctx.roots = NULL;
+
+ ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
+ new_roots);
+ if (ret) {
+ test_err("couldn't account space for a qgroup %d", ret);
+ return ret;
+ }
+
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID,
+ 0, 0)) {
+ test_err("qgroup counts didn't match expected values");
+ return -EINVAL;
+ }
+
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+ nodesize, nodesize)) {
+ test_err("qgroup counts didn't match expected values");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_fs_info *fs_info = NULL;
+ struct btrfs_root *root;
+ struct btrfs_root *tmp_root;
+ int ret = 0;
+
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ return -ENOMEM;
+ }
+
+ root = btrfs_alloc_dummy_root(fs_info);
+ if (IS_ERR(root)) {
+ test_std_err(TEST_ALLOC_ROOT);
+ ret = PTR_ERR(root);
+ goto out;
+ }
+
+ /* We are using this root as our extent root */
+ root->root_key.objectid = BTRFS_EXTENT_TREE_OBJECTID;
+ root->root_key.type = BTRFS_ROOT_ITEM_KEY;
+ root->root_key.offset = 0;
+ btrfs_global_root_insert(root);
+
+ /*
+ * Some of the paths we test assume we have a filled out fs_info, so we
+ * just need to add the root in there so we don't panic.
+ */
+ root->fs_info->tree_root = root;
+ root->fs_info->quota_root = root;
+ set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+
+ /*
+ * Can't use bytenr 0, some things freak out
+ * *cough*backref walking code*cough*
+ */
+ root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
+ if (IS_ERR(root->node)) {
+ test_err("couldn't allocate dummy buffer");
+ ret = PTR_ERR(root->node);
+ goto out;
+ }
+ btrfs_set_header_level(root->node, 0);
+ btrfs_set_header_nritems(root->node, 0);
+ root->alloc_bytenr += 2 * nodesize;
+
+ tmp_root = btrfs_alloc_dummy_root(fs_info);
+ if (IS_ERR(tmp_root)) {
+ test_std_err(TEST_ALLOC_ROOT);
+ ret = PTR_ERR(tmp_root);
+ goto out;
+ }
+
+ tmp_root->root_key.objectid = BTRFS_FS_TREE_OBJECTID;
+ root->fs_info->fs_root = tmp_root;
+ ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
+ if (ret) {
+ test_err("couldn't insert fs root %d", ret);
+ goto out;
+ }
+ btrfs_put_root(tmp_root);
+
+ tmp_root = btrfs_alloc_dummy_root(fs_info);
+ if (IS_ERR(tmp_root)) {
+ test_std_err(TEST_ALLOC_ROOT);
+ ret = PTR_ERR(tmp_root);
+ goto out;
+ }
+
+ tmp_root->root_key.objectid = BTRFS_FIRST_FREE_OBJECTID;
+ ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
+ if (ret) {
+ test_err("couldn't insert fs root %d", ret);
+ goto out;
+ }
+ btrfs_put_root(tmp_root);
+
+ test_msg("running qgroup tests");
+ ret = test_no_shared_qgroup(root, sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = test_multiple_refs(root, sectorsize, nodesize);
+out:
+ btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+}