summaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/md/bcache
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/md/bcache')
-rw-r--r--drivers/md/bcache/Kconfig38
-rw-r--r--drivers/md/bcache/Makefile7
-rw-r--r--drivers/md/bcache/alloc.c736
-rw-r--r--drivers/md/bcache/bcache.h1049
-rw-r--r--drivers/md/bcache/bcache_ondisk.h446
-rw-r--r--drivers/md/bcache/bset.c1390
-rw-r--r--drivers/md/bcache/bset.h593
-rw-r--r--drivers/md/bcache/btree.c2821
-rw-r--r--drivers/md/bcache/btree.h417
-rw-r--r--drivers/md/bcache/closure.c207
-rw-r--r--drivers/md/bcache/closure.h378
-rw-r--r--drivers/md/bcache/debug.c263
-rw-r--r--drivers/md/bcache/debug.h35
-rw-r--r--drivers/md/bcache/extents.c630
-rw-r--r--drivers/md/bcache/extents.h15
-rw-r--r--drivers/md/bcache/features.c75
-rw-r--r--drivers/md/bcache/features.h113
-rw-r--r--drivers/md/bcache/io.c175
-rw-r--r--drivers/md/bcache/journal.c1000
-rw-r--r--drivers/md/bcache/journal.h188
-rw-r--r--drivers/md/bcache/movinggc.c252
-rw-r--r--drivers/md/bcache/request.c1345
-rw-r--r--drivers/md/bcache/request.h47
-rw-r--r--drivers/md/bcache/stats.c234
-rw-r--r--drivers/md/bcache/stats.h62
-rw-r--r--drivers/md/bcache/super.c2934
-rw-r--r--drivers/md/bcache/sysfs.c1203
-rw-r--r--drivers/md/bcache/sysfs.h142
-rw-r--r--drivers/md/bcache/trace.c53
-rw-r--r--drivers/md/bcache/util.c287
-rw-r--r--drivers/md/bcache/util.h562
-rw-r--r--drivers/md/bcache/writeback.c1100
-rw-r--r--drivers/md/bcache/writeback.h155
33 files changed, 18952 insertions, 0 deletions
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
new file mode 100644
index 0000000000..529c9d04e9
--- /dev/null
+++ b/drivers/md/bcache/Kconfig
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config BCACHE
+ tristate "Block device as cache"
+ select BLOCK_HOLDER_DEPRECATED if SYSFS
+ select CRC64
+ help
+ Allows a block device to be used as cache for other devices; uses
+ a btree for indexing and the layout is optimized for SSDs.
+
+ See Documentation/admin-guide/bcache.rst for details.
+
+config BCACHE_DEBUG
+ bool "Bcache debugging"
+ depends on BCACHE
+ help
+ Don't select this option unless you're a developer
+
+ Enables extra debugging tools, allows expensive runtime checks to be
+ turned on.
+
+config BCACHE_CLOSURES_DEBUG
+ bool "Debug closures"
+ depends on BCACHE
+ select DEBUG_FS
+ help
+ Keeps all active closures in a linked list and provides a debugfs
+ interface to list them, which makes it possible to see asynchronous
+ operations that get stuck.
+
+config BCACHE_ASYNC_REGISTRATION
+ bool "Asynchronous device registration"
+ depends on BCACHE
+ help
+ Add a sysfs file /sys/fs/bcache/register_async. Writing registering
+ device path into this file will returns immediately and the real
+ registration work is handled in kernel work queue in asynchronous
+ way.
diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile
new file mode 100644
index 0000000000..5b87e59676
--- /dev/null
+++ b/drivers/md/bcache/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_BCACHE) += bcache.o
+
+bcache-y := alloc.o bset.o btree.o closure.o debug.o extents.o\
+ io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\
+ util.o writeback.o features.o
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
new file mode 100644
index 0000000000..ce13c272c3
--- /dev/null
+++ b/drivers/md/bcache/alloc.c
@@ -0,0 +1,736 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Primary bucket allocation code
+ *
+ * Copyright 2012 Google, Inc.
+ *
+ * Allocation in bcache is done in terms of buckets:
+ *
+ * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
+ * btree pointers - they must match for the pointer to be considered valid.
+ *
+ * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
+ * bucket simply by incrementing its gen.
+ *
+ * The gens (along with the priorities; it's really the gens are important but
+ * the code is named as if it's the priorities) are written in an arbitrary list
+ * of buckets on disk, with a pointer to them in the journal header.
+ *
+ * When we invalidate a bucket, we have to write its new gen to disk and wait
+ * for that write to complete before we use it - otherwise after a crash we
+ * could have pointers that appeared to be good but pointed to data that had
+ * been overwritten.
+ *
+ * Since the gens and priorities are all stored contiguously on disk, we can
+ * batch this up: We fill up the free_inc list with freshly invalidated buckets,
+ * call prio_write(), and when prio_write() finishes we pull buckets off the
+ * free_inc list and optionally discard them.
+ *
+ * free_inc isn't the only freelist - if it was, we'd often to sleep while
+ * priorities and gens were being written before we could allocate. c->free is a
+ * smaller freelist, and buckets on that list are always ready to be used.
+ *
+ * If we've got discards enabled, that happens when a bucket moves from the
+ * free_inc list to the free list.
+ *
+ * There is another freelist, because sometimes we have buckets that we know
+ * have nothing pointing into them - these we can reuse without waiting for
+ * priorities to be rewritten. These come from freed btree nodes and buckets
+ * that garbage collection discovered no longer had valid keys pointing into
+ * them (because they were overwritten). That's the unused list - buckets on the
+ * unused list move to the free list, optionally being discarded in the process.
+ *
+ * It's also important to ensure that gens don't wrap around - with respect to
+ * either the oldest gen in the btree or the gen on disk. This is quite
+ * difficult to do in practice, but we explicitly guard against it anyways - if
+ * a bucket is in danger of wrapping around we simply skip invalidating it that
+ * time around, and we garbage collect or rewrite the priorities sooner than we
+ * would have otherwise.
+ *
+ * bch_bucket_alloc() allocates a single bucket from a specific cache.
+ *
+ * bch_bucket_alloc_set() allocates one bucket from different caches
+ * out of a cache set.
+ *
+ * free_some_buckets() drives all the processes described above. It's called
+ * from bch_bucket_alloc() and a few other places that need to make sure free
+ * buckets are ready.
+ *
+ * invalidate_buckets_(lru|fifo)() find buckets that are available to be
+ * invalidated, and then invalidate them and stick them on the free_inc list -
+ * in either lru or fifo order.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/random.h>
+#include <trace/events/bcache.h>
+
+#define MAX_OPEN_BUCKETS 128
+
+/* Bucket heap / gen */
+
+uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
+{
+ uint8_t ret = ++b->gen;
+
+ ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
+ WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
+
+ return ret;
+}
+
+void bch_rescale_priorities(struct cache_set *c, int sectors)
+{
+ struct cache *ca;
+ struct bucket *b;
+ unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
+ int r;
+
+ atomic_sub(sectors, &c->rescale);
+
+ do {
+ r = atomic_read(&c->rescale);
+
+ if (r >= 0)
+ return;
+ } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
+
+ mutex_lock(&c->bucket_lock);
+
+ c->min_prio = USHRT_MAX;
+
+ ca = c->cache;
+ for_each_bucket(b, ca)
+ if (b->prio &&
+ b->prio != BTREE_PRIO &&
+ !atomic_read(&b->pin)) {
+ b->prio--;
+ c->min_prio = min(c->min_prio, b->prio);
+ }
+
+ mutex_unlock(&c->bucket_lock);
+}
+
+/*
+ * Background allocation thread: scans for buckets to be invalidated,
+ * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
+ * then optionally issues discard commands to the newly free buckets, then puts
+ * them on the various freelists.
+ */
+
+static inline bool can_inc_bucket_gen(struct bucket *b)
+{
+ return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
+}
+
+bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
+{
+ BUG_ON(!ca->set->gc_mark_valid);
+
+ return (!GC_MARK(b) ||
+ GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
+ !atomic_read(&b->pin) &&
+ can_inc_bucket_gen(b);
+}
+
+void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
+{
+ lockdep_assert_held(&ca->set->bucket_lock);
+ BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
+
+ if (GC_SECTORS_USED(b))
+ trace_bcache_invalidate(ca, b - ca->buckets);
+
+ bch_inc_gen(ca, b);
+ b->prio = INITIAL_PRIO;
+ atomic_inc(&b->pin);
+}
+
+static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
+{
+ __bch_invalidate_one_bucket(ca, b);
+
+ fifo_push(&ca->free_inc, b - ca->buckets);
+}
+
+/*
+ * Determines what order we're going to reuse buckets, smallest bucket_prio()
+ * first: we also take into account the number of sectors of live data in that
+ * bucket, and in order for that multiply to make sense we have to scale bucket
+ *
+ * Thus, we scale the bucket priorities so that the bucket with the smallest
+ * prio is worth 1/8th of what INITIAL_PRIO is worth.
+ */
+
+#define bucket_prio(b) \
+({ \
+ unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
+ \
+ (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
+})
+
+#define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
+#define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
+
+static void invalidate_buckets_lru(struct cache *ca)
+{
+ struct bucket *b;
+ ssize_t i;
+
+ ca->heap.used = 0;
+
+ for_each_bucket(b, ca) {
+ if (!bch_can_invalidate_bucket(ca, b))
+ continue;
+
+ if (!heap_full(&ca->heap))
+ heap_add(&ca->heap, b, bucket_max_cmp);
+ else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
+ ca->heap.data[0] = b;
+ heap_sift(&ca->heap, 0, bucket_max_cmp);
+ }
+ }
+
+ for (i = ca->heap.used / 2 - 1; i >= 0; --i)
+ heap_sift(&ca->heap, i, bucket_min_cmp);
+
+ while (!fifo_full(&ca->free_inc)) {
+ if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
+ /*
+ * We don't want to be calling invalidate_buckets()
+ * multiple times when it can't do anything
+ */
+ ca->invalidate_needs_gc = 1;
+ wake_up_gc(ca->set);
+ return;
+ }
+
+ bch_invalidate_one_bucket(ca, b);
+ }
+}
+
+static void invalidate_buckets_fifo(struct cache *ca)
+{
+ struct bucket *b;
+ size_t checked = 0;
+
+ while (!fifo_full(&ca->free_inc)) {
+ if (ca->fifo_last_bucket < ca->sb.first_bucket ||
+ ca->fifo_last_bucket >= ca->sb.nbuckets)
+ ca->fifo_last_bucket = ca->sb.first_bucket;
+
+ b = ca->buckets + ca->fifo_last_bucket++;
+
+ if (bch_can_invalidate_bucket(ca, b))
+ bch_invalidate_one_bucket(ca, b);
+
+ if (++checked >= ca->sb.nbuckets) {
+ ca->invalidate_needs_gc = 1;
+ wake_up_gc(ca->set);
+ return;
+ }
+ }
+}
+
+static void invalidate_buckets_random(struct cache *ca)
+{
+ struct bucket *b;
+ size_t checked = 0;
+
+ while (!fifo_full(&ca->free_inc)) {
+ size_t n;
+
+ get_random_bytes(&n, sizeof(n));
+
+ n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
+ n += ca->sb.first_bucket;
+
+ b = ca->buckets + n;
+
+ if (bch_can_invalidate_bucket(ca, b))
+ bch_invalidate_one_bucket(ca, b);
+
+ if (++checked >= ca->sb.nbuckets / 2) {
+ ca->invalidate_needs_gc = 1;
+ wake_up_gc(ca->set);
+ return;
+ }
+ }
+}
+
+static void invalidate_buckets(struct cache *ca)
+{
+ BUG_ON(ca->invalidate_needs_gc);
+
+ switch (CACHE_REPLACEMENT(&ca->sb)) {
+ case CACHE_REPLACEMENT_LRU:
+ invalidate_buckets_lru(ca);
+ break;
+ case CACHE_REPLACEMENT_FIFO:
+ invalidate_buckets_fifo(ca);
+ break;
+ case CACHE_REPLACEMENT_RANDOM:
+ invalidate_buckets_random(ca);
+ break;
+ }
+}
+
+#define allocator_wait(ca, cond) \
+do { \
+ while (1) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if (cond) \
+ break; \
+ \
+ mutex_unlock(&(ca)->set->bucket_lock); \
+ if (kthread_should_stop() || \
+ test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
+ set_current_state(TASK_RUNNING); \
+ goto out; \
+ } \
+ \
+ schedule(); \
+ mutex_lock(&(ca)->set->bucket_lock); \
+ } \
+ __set_current_state(TASK_RUNNING); \
+} while (0)
+
+static int bch_allocator_push(struct cache *ca, long bucket)
+{
+ unsigned int i;
+
+ /* Prios/gens are actually the most important reserve */
+ if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
+ return true;
+
+ for (i = 0; i < RESERVE_NR; i++)
+ if (fifo_push(&ca->free[i], bucket))
+ return true;
+
+ return false;
+}
+
+static int bch_allocator_thread(void *arg)
+{
+ struct cache *ca = arg;
+
+ mutex_lock(&ca->set->bucket_lock);
+
+ while (1) {
+ /*
+ * First, we pull buckets off of the unused and free_inc lists,
+ * possibly issue discards to them, then we add the bucket to
+ * the free list:
+ */
+ while (1) {
+ long bucket;
+
+ if (!fifo_pop(&ca->free_inc, bucket))
+ break;
+
+ if (ca->discard) {
+ mutex_unlock(&ca->set->bucket_lock);
+ blkdev_issue_discard(ca->bdev,
+ bucket_to_sector(ca->set, bucket),
+ ca->sb.bucket_size, GFP_KERNEL);
+ mutex_lock(&ca->set->bucket_lock);
+ }
+
+ allocator_wait(ca, bch_allocator_push(ca, bucket));
+ wake_up(&ca->set->btree_cache_wait);
+ wake_up(&ca->set->bucket_wait);
+ }
+
+ /*
+ * We've run out of free buckets, we need to find some buckets
+ * we can invalidate. First, invalidate them in memory and add
+ * them to the free_inc list:
+ */
+
+retry_invalidate:
+ allocator_wait(ca, ca->set->gc_mark_valid &&
+ !ca->invalidate_needs_gc);
+ invalidate_buckets(ca);
+
+ /*
+ * Now, we write their new gens to disk so we can start writing
+ * new stuff to them:
+ */
+ allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
+ if (CACHE_SYNC(&ca->sb)) {
+ /*
+ * This could deadlock if an allocation with a btree
+ * node locked ever blocked - having the btree node
+ * locked would block garbage collection, but here we're
+ * waiting on garbage collection before we invalidate
+ * and free anything.
+ *
+ * But this should be safe since the btree code always
+ * uses btree_check_reserve() before allocating now, and
+ * if it fails it blocks without btree nodes locked.
+ */
+ if (!fifo_full(&ca->free_inc))
+ goto retry_invalidate;
+
+ if (bch_prio_write(ca, false) < 0) {
+ ca->invalidate_needs_gc = 1;
+ wake_up_gc(ca->set);
+ }
+ }
+ }
+out:
+ wait_for_kthread_stop();
+ return 0;
+}
+
+/* Allocation */
+
+long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
+{
+ DEFINE_WAIT(w);
+ struct bucket *b;
+ long r;
+
+
+ /* No allocation if CACHE_SET_IO_DISABLE bit is set */
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
+ return -1;
+
+ /* fastpath */
+ if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
+ fifo_pop(&ca->free[reserve], r))
+ goto out;
+
+ if (!wait) {
+ trace_bcache_alloc_fail(ca, reserve);
+ return -1;
+ }
+
+ do {
+ prepare_to_wait(&ca->set->bucket_wait, &w,
+ TASK_UNINTERRUPTIBLE);
+
+ mutex_unlock(&ca->set->bucket_lock);
+ schedule();
+ mutex_lock(&ca->set->bucket_lock);
+ } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
+ !fifo_pop(&ca->free[reserve], r));
+
+ finish_wait(&ca->set->bucket_wait, &w);
+out:
+ if (ca->alloc_thread)
+ wake_up_process(ca->alloc_thread);
+
+ trace_bcache_alloc(ca, reserve);
+
+ if (expensive_debug_checks(ca->set)) {
+ size_t iter;
+ long i;
+ unsigned int j;
+
+ for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
+ BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
+
+ for (j = 0; j < RESERVE_NR; j++)
+ fifo_for_each(i, &ca->free[j], iter)
+ BUG_ON(i == r);
+ fifo_for_each(i, &ca->free_inc, iter)
+ BUG_ON(i == r);
+ }
+
+ b = ca->buckets + r;
+
+ BUG_ON(atomic_read(&b->pin) != 1);
+
+ SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
+
+ if (reserve <= RESERVE_PRIO) {
+ SET_GC_MARK(b, GC_MARK_METADATA);
+ SET_GC_MOVE(b, 0);
+ b->prio = BTREE_PRIO;
+ } else {
+ SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+ SET_GC_MOVE(b, 0);
+ b->prio = INITIAL_PRIO;
+ }
+
+ if (ca->set->avail_nbuckets > 0) {
+ ca->set->avail_nbuckets--;
+ bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
+ }
+
+ return r;
+}
+
+void __bch_bucket_free(struct cache *ca, struct bucket *b)
+{
+ SET_GC_MARK(b, 0);
+ SET_GC_SECTORS_USED(b, 0);
+
+ if (ca->set->avail_nbuckets < ca->set->nbuckets) {
+ ca->set->avail_nbuckets++;
+ bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
+ }
+}
+
+void bch_bucket_free(struct cache_set *c, struct bkey *k)
+{
+ unsigned int i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ __bch_bucket_free(c->cache, PTR_BUCKET(c, k, i));
+}
+
+int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+ struct bkey *k, bool wait)
+{
+ struct cache *ca;
+ long b;
+
+ /* No allocation if CACHE_SET_IO_DISABLE bit is set */
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
+ return -1;
+
+ lockdep_assert_held(&c->bucket_lock);
+
+ bkey_init(k);
+
+ ca = c->cache;
+ b = bch_bucket_alloc(ca, reserve, wait);
+ if (b == -1)
+ goto err;
+
+ k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
+ bucket_to_sector(c, b),
+ ca->sb.nr_this_dev);
+
+ SET_KEY_PTRS(k, 1);
+
+ return 0;
+err:
+ bch_bucket_free(c, k);
+ bkey_put(c, k);
+ return -1;
+}
+
+int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+ struct bkey *k, bool wait)
+{
+ int ret;
+
+ mutex_lock(&c->bucket_lock);
+ ret = __bch_bucket_alloc_set(c, reserve, k, wait);
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+}
+
+/* Sector allocator */
+
+struct open_bucket {
+ struct list_head list;
+ unsigned int last_write_point;
+ unsigned int sectors_free;
+ BKEY_PADDED(key);
+};
+
+/*
+ * We keep multiple buckets open for writes, and try to segregate different
+ * write streams for better cache utilization: first we try to segregate flash
+ * only volume write streams from cached devices, secondly we look for a bucket
+ * where the last write to it was sequential with the current write, and
+ * failing that we look for a bucket that was last used by the same task.
+ *
+ * The ideas is if you've got multiple tasks pulling data into the cache at the
+ * same time, you'll get better cache utilization if you try to segregate their
+ * data and preserve locality.
+ *
+ * For example, dirty sectors of flash only volume is not reclaimable, if their
+ * dirty sectors mixed with dirty sectors of cached device, such buckets will
+ * be marked as dirty and won't be reclaimed, though the dirty data of cached
+ * device have been written back to backend device.
+ *
+ * And say you've starting Firefox at the same time you're copying a
+ * bunch of files. Firefox will likely end up being fairly hot and stay in the
+ * cache awhile, but the data you copied might not be; if you wrote all that
+ * data to the same buckets it'd get invalidated at the same time.
+ *
+ * Both of those tasks will be doing fairly random IO so we can't rely on
+ * detecting sequential IO to segregate their data, but going off of the task
+ * should be a sane heuristic.
+ */
+static struct open_bucket *pick_data_bucket(struct cache_set *c,
+ const struct bkey *search,
+ unsigned int write_point,
+ struct bkey *alloc)
+{
+ struct open_bucket *ret, *ret_task = NULL;
+
+ list_for_each_entry_reverse(ret, &c->data_buckets, list)
+ if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
+ UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
+ continue;
+ else if (!bkey_cmp(&ret->key, search))
+ goto found;
+ else if (ret->last_write_point == write_point)
+ ret_task = ret;
+
+ ret = ret_task ?: list_first_entry(&c->data_buckets,
+ struct open_bucket, list);
+found:
+ if (!ret->sectors_free && KEY_PTRS(alloc)) {
+ ret->sectors_free = c->cache->sb.bucket_size;
+ bkey_copy(&ret->key, alloc);
+ bkey_init(alloc);
+ }
+
+ if (!ret->sectors_free)
+ ret = NULL;
+
+ return ret;
+}
+
+/*
+ * Allocates some space in the cache to write to, and k to point to the newly
+ * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
+ * end of the newly allocated space).
+ *
+ * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
+ * sectors were actually allocated.
+ *
+ * If s->writeback is true, will not fail.
+ */
+bool bch_alloc_sectors(struct cache_set *c,
+ struct bkey *k,
+ unsigned int sectors,
+ unsigned int write_point,
+ unsigned int write_prio,
+ bool wait)
+{
+ struct open_bucket *b;
+ BKEY_PADDED(key) alloc;
+ unsigned int i;
+
+ /*
+ * We might have to allocate a new bucket, which we can't do with a
+ * spinlock held. So if we have to allocate, we drop the lock, allocate
+ * and then retry. KEY_PTRS() indicates whether alloc points to
+ * allocated bucket(s).
+ */
+
+ bkey_init(&alloc.key);
+ spin_lock(&c->data_bucket_lock);
+
+ while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
+ unsigned int watermark = write_prio
+ ? RESERVE_MOVINGGC
+ : RESERVE_NONE;
+
+ spin_unlock(&c->data_bucket_lock);
+
+ if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
+ return false;
+
+ spin_lock(&c->data_bucket_lock);
+ }
+
+ /*
+ * If we had to allocate, we might race and not need to allocate the
+ * second time we call pick_data_bucket(). If we allocated a bucket but
+ * didn't use it, drop the refcount bch_bucket_alloc_set() took:
+ */
+ if (KEY_PTRS(&alloc.key))
+ bkey_put(c, &alloc.key);
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ EBUG_ON(ptr_stale(c, &b->key, i));
+
+ /* Set up the pointer to the space we're allocating: */
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ k->ptr[i] = b->key.ptr[i];
+
+ sectors = min(sectors, b->sectors_free);
+
+ SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
+ SET_KEY_SIZE(k, sectors);
+ SET_KEY_PTRS(k, KEY_PTRS(&b->key));
+
+ /*
+ * Move b to the end of the lru, and keep track of what this bucket was
+ * last used for:
+ */
+ list_move_tail(&b->list, &c->data_buckets);
+ bkey_copy_key(&b->key, k);
+ b->last_write_point = write_point;
+
+ b->sectors_free -= sectors;
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++) {
+ SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
+
+ atomic_long_add(sectors,
+ &c->cache->sectors_written);
+ }
+
+ if (b->sectors_free < c->cache->sb.block_size)
+ b->sectors_free = 0;
+
+ /*
+ * k takes refcounts on the buckets it points to until it's inserted
+ * into the btree, but if we're done with this bucket we just transfer
+ * get_data_bucket()'s refcount.
+ */
+ if (b->sectors_free)
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
+
+ spin_unlock(&c->data_bucket_lock);
+ return true;
+}
+
+/* Init */
+
+void bch_open_buckets_free(struct cache_set *c)
+{
+ struct open_bucket *b;
+
+ while (!list_empty(&c->data_buckets)) {
+ b = list_first_entry(&c->data_buckets,
+ struct open_bucket, list);
+ list_del(&b->list);
+ kfree(b);
+ }
+}
+
+int bch_open_buckets_alloc(struct cache_set *c)
+{
+ int i;
+
+ spin_lock_init(&c->data_bucket_lock);
+
+ for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
+ struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
+
+ if (!b)
+ return -ENOMEM;
+
+ list_add(&b->list, &c->data_buckets);
+ }
+
+ return 0;
+}
+
+int bch_cache_allocator_start(struct cache *ca)
+{
+ struct task_struct *k = kthread_run(bch_allocator_thread,
+ ca, "bcache_allocator");
+ if (IS_ERR(k))
+ return PTR_ERR(k);
+
+ ca->alloc_thread = k;
+ return 0;
+}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
new file mode 100644
index 0000000000..83eb7f27db
--- /dev/null
+++ b/drivers/md/bcache/bcache.h
@@ -0,0 +1,1049 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHE_H
+#define _BCACHE_H
+
+/*
+ * SOME HIGH LEVEL CODE DOCUMENTATION:
+ *
+ * Bcache mostly works with cache sets, cache devices, and backing devices.
+ *
+ * Support for multiple cache devices hasn't quite been finished off yet, but
+ * it's about 95% plumbed through. A cache set and its cache devices is sort of
+ * like a md raid array and its component devices. Most of the code doesn't care
+ * about individual cache devices, the main abstraction is the cache set.
+ *
+ * Multiple cache devices is intended to give us the ability to mirror dirty
+ * cached data and metadata, without mirroring clean cached data.
+ *
+ * Backing devices are different, in that they have a lifetime independent of a
+ * cache set. When you register a newly formatted backing device it'll come up
+ * in passthrough mode, and then you can attach and detach a backing device from
+ * a cache set at runtime - while it's mounted and in use. Detaching implicitly
+ * invalidates any cached data for that backing device.
+ *
+ * A cache set can have multiple (many) backing devices attached to it.
+ *
+ * There's also flash only volumes - this is the reason for the distinction
+ * between struct cached_dev and struct bcache_device. A flash only volume
+ * works much like a bcache device that has a backing device, except the
+ * "cached" data is always dirty. The end result is that we get thin
+ * provisioning with very little additional code.
+ *
+ * Flash only volumes work but they're not production ready because the moving
+ * garbage collector needs more work. More on that later.
+ *
+ * BUCKETS/ALLOCATION:
+ *
+ * Bcache is primarily designed for caching, which means that in normal
+ * operation all of our available space will be allocated. Thus, we need an
+ * efficient way of deleting things from the cache so we can write new things to
+ * it.
+ *
+ * To do this, we first divide the cache device up into buckets. A bucket is the
+ * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
+ * works efficiently.
+ *
+ * Each bucket has a 16 bit priority, and an 8 bit generation associated with
+ * it. The gens and priorities for all the buckets are stored contiguously and
+ * packed on disk (in a linked list of buckets - aside from the superblock, all
+ * of bcache's metadata is stored in buckets).
+ *
+ * The priority is used to implement an LRU. We reset a bucket's priority when
+ * we allocate it or on cache it, and every so often we decrement the priority
+ * of each bucket. It could be used to implement something more sophisticated,
+ * if anyone ever gets around to it.
+ *
+ * The generation is used for invalidating buckets. Each pointer also has an 8
+ * bit generation embedded in it; for a pointer to be considered valid, its gen
+ * must match the gen of the bucket it points into. Thus, to reuse a bucket all
+ * we have to do is increment its gen (and write its new gen to disk; we batch
+ * this up).
+ *
+ * Bcache is entirely COW - we never write twice to a bucket, even buckets that
+ * contain metadata (including btree nodes).
+ *
+ * THE BTREE:
+ *
+ * Bcache is in large part design around the btree.
+ *
+ * At a high level, the btree is just an index of key -> ptr tuples.
+ *
+ * Keys represent extents, and thus have a size field. Keys also have a variable
+ * number of pointers attached to them (potentially zero, which is handy for
+ * invalidating the cache).
+ *
+ * The key itself is an inode:offset pair. The inode number corresponds to a
+ * backing device or a flash only volume. The offset is the ending offset of the
+ * extent within the inode - not the starting offset; this makes lookups
+ * slightly more convenient.
+ *
+ * Pointers contain the cache device id, the offset on that device, and an 8 bit
+ * generation number. More on the gen later.
+ *
+ * Index lookups are not fully abstracted - cache lookups in particular are
+ * still somewhat mixed in with the btree code, but things are headed in that
+ * direction.
+ *
+ * Updates are fairly well abstracted, though. There are two different ways of
+ * updating the btree; insert and replace.
+ *
+ * BTREE_INSERT will just take a list of keys and insert them into the btree -
+ * overwriting (possibly only partially) any extents they overlap with. This is
+ * used to update the index after a write.
+ *
+ * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
+ * overwriting a key that matches another given key. This is used for inserting
+ * data into the cache after a cache miss, and for background writeback, and for
+ * the moving garbage collector.
+ *
+ * There is no "delete" operation; deleting things from the index is
+ * accomplished by either by invalidating pointers (by incrementing a bucket's
+ * gen) or by inserting a key with 0 pointers - which will overwrite anything
+ * previously present at that location in the index.
+ *
+ * This means that there are always stale/invalid keys in the btree. They're
+ * filtered out by the code that iterates through a btree node, and removed when
+ * a btree node is rewritten.
+ *
+ * BTREE NODES:
+ *
+ * Our unit of allocation is a bucket, and we can't arbitrarily allocate and
+ * free smaller than a bucket - so, that's how big our btree nodes are.
+ *
+ * (If buckets are really big we'll only use part of the bucket for a btree node
+ * - no less than 1/4th - but a bucket still contains no more than a single
+ * btree node. I'd actually like to change this, but for now we rely on the
+ * bucket's gen for deleting btree nodes when we rewrite/split a node.)
+ *
+ * Anyways, btree nodes are big - big enough to be inefficient with a textbook
+ * btree implementation.
+ *
+ * The way this is solved is that btree nodes are internally log structured; we
+ * can append new keys to an existing btree node without rewriting it. This
+ * means each set of keys we write is sorted, but the node is not.
+ *
+ * We maintain this log structure in memory - keeping 1Mb of keys sorted would
+ * be expensive, and we have to distinguish between the keys we have written and
+ * the keys we haven't. So to do a lookup in a btree node, we have to search
+ * each sorted set. But we do merge written sets together lazily, so the cost of
+ * these extra searches is quite low (normally most of the keys in a btree node
+ * will be in one big set, and then there'll be one or two sets that are much
+ * smaller).
+ *
+ * This log structure makes bcache's btree more of a hybrid between a
+ * conventional btree and a compacting data structure, with some of the
+ * advantages of both.
+ *
+ * GARBAGE COLLECTION:
+ *
+ * We can't just invalidate any bucket - it might contain dirty data or
+ * metadata. If it once contained dirty data, other writes might overwrite it
+ * later, leaving no valid pointers into that bucket in the index.
+ *
+ * Thus, the primary purpose of garbage collection is to find buckets to reuse.
+ * It also counts how much valid data it each bucket currently contains, so that
+ * allocation can reuse buckets sooner when they've been mostly overwritten.
+ *
+ * It also does some things that are really internal to the btree
+ * implementation. If a btree node contains pointers that are stale by more than
+ * some threshold, it rewrites the btree node to avoid the bucket's generation
+ * wrapping around. It also merges adjacent btree nodes if they're empty enough.
+ *
+ * THE JOURNAL:
+ *
+ * Bcache's journal is not necessary for consistency; we always strictly
+ * order metadata writes so that the btree and everything else is consistent on
+ * disk in the event of an unclean shutdown, and in fact bcache had writeback
+ * caching (with recovery from unclean shutdown) before journalling was
+ * implemented.
+ *
+ * Rather, the journal is purely a performance optimization; we can't complete a
+ * write until we've updated the index on disk, otherwise the cache would be
+ * inconsistent in the event of an unclean shutdown. This means that without the
+ * journal, on random write workloads we constantly have to update all the leaf
+ * nodes in the btree, and those writes will be mostly empty (appending at most
+ * a few keys each) - highly inefficient in terms of amount of metadata writes,
+ * and it puts more strain on the various btree resorting/compacting code.
+ *
+ * The journal is just a log of keys we've inserted; on startup we just reinsert
+ * all the keys in the open journal entries. That means that when we're updating
+ * a node in the btree, we can wait until a 4k block of keys fills up before
+ * writing them out.
+ *
+ * For simplicity, we only journal updates to leaf nodes; updates to parent
+ * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
+ * the complexity to deal with journalling them (in particular, journal replay)
+ * - updates to non leaf nodes just happen synchronously (see btree_split()).
+ */
+
+#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
+
+#include <linux/bio.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+
+#include "bcache_ondisk.h"
+#include "bset.h"
+#include "util.h"
+#include "closure.h"
+
+struct bucket {
+ atomic_t pin;
+ uint16_t prio;
+ uint8_t gen;
+ uint8_t last_gc; /* Most out of date gen in the btree */
+ uint16_t gc_mark; /* Bitfield used by GC. See below for field */
+};
+
+/*
+ * I'd use bitfields for these, but I don't trust the compiler not to screw me
+ * as multiple threads touch struct bucket without locking
+ */
+
+BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
+#define GC_MARK_RECLAIMABLE 1
+#define GC_MARK_DIRTY 2
+#define GC_MARK_METADATA 3
+#define GC_SECTORS_USED_SIZE 13
+#define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE))
+BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
+BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
+
+#include "journal.h"
+#include "stats.h"
+struct search;
+struct btree;
+struct keybuf;
+
+struct keybuf_key {
+ struct rb_node node;
+ BKEY_PADDED(key);
+ void *private;
+};
+
+struct keybuf {
+ struct bkey last_scanned;
+ spinlock_t lock;
+
+ /*
+ * Beginning and end of range in rb tree - so that we can skip taking
+ * lock and checking the rb tree when we need to check for overlapping
+ * keys.
+ */
+ struct bkey start;
+ struct bkey end;
+
+ struct rb_root keys;
+
+#define KEYBUF_NR 500
+ DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
+};
+
+struct bcache_device {
+ struct closure cl;
+
+ struct kobject kobj;
+
+ struct cache_set *c;
+ unsigned int id;
+#define BCACHEDEVNAME_SIZE 12
+ char name[BCACHEDEVNAME_SIZE];
+
+ struct gendisk *disk;
+
+ unsigned long flags;
+#define BCACHE_DEV_CLOSING 0
+#define BCACHE_DEV_DETACHING 1
+#define BCACHE_DEV_UNLINK_DONE 2
+#define BCACHE_DEV_WB_RUNNING 3
+#define BCACHE_DEV_RATE_DW_RUNNING 4
+ int nr_stripes;
+#define BCH_MIN_STRIPE_SZ ((4 << 20) >> SECTOR_SHIFT)
+ unsigned int stripe_size;
+ atomic_t *stripe_sectors_dirty;
+ unsigned long *full_dirty_stripes;
+
+ struct bio_set bio_split;
+
+ unsigned int data_csum:1;
+
+ int (*cache_miss)(struct btree *b, struct search *s,
+ struct bio *bio, unsigned int sectors);
+ int (*ioctl)(struct bcache_device *d, blk_mode_t mode,
+ unsigned int cmd, unsigned long arg);
+};
+
+struct io {
+ /* Used to track sequential IO so it can be skipped */
+ struct hlist_node hash;
+ struct list_head lru;
+
+ unsigned long jiffies;
+ unsigned int sequential;
+ sector_t last;
+};
+
+enum stop_on_failure {
+ BCH_CACHED_DEV_STOP_AUTO = 0,
+ BCH_CACHED_DEV_STOP_ALWAYS,
+ BCH_CACHED_DEV_STOP_MODE_MAX,
+};
+
+struct cached_dev {
+ struct list_head list;
+ struct bcache_device disk;
+ struct block_device *bdev;
+
+ struct cache_sb sb;
+ struct cache_sb_disk *sb_disk;
+ struct bio sb_bio;
+ struct bio_vec sb_bv[1];
+ struct closure sb_write;
+ struct semaphore sb_write_mutex;
+
+ /* Refcount on the cache set. Always nonzero when we're caching. */
+ refcount_t count;
+ struct work_struct detach;
+
+ /*
+ * Device might not be running if it's dirty and the cache set hasn't
+ * showed up yet.
+ */
+ atomic_t running;
+
+ /*
+ * Writes take a shared lock from start to finish; scanning for dirty
+ * data to refill the rb tree requires an exclusive lock.
+ */
+ struct rw_semaphore writeback_lock;
+
+ /*
+ * Nonzero, and writeback has a refcount (d->count), iff there is dirty
+ * data in the cache. Protected by writeback_lock; must have an
+ * shared lock to set and exclusive lock to clear.
+ */
+ atomic_t has_dirty;
+
+#define BCH_CACHE_READA_ALL 0
+#define BCH_CACHE_READA_META_ONLY 1
+ unsigned int cache_readahead_policy;
+ struct bch_ratelimit writeback_rate;
+ struct delayed_work writeback_rate_update;
+
+ /* Limit number of writeback bios in flight */
+ struct semaphore in_flight;
+ struct task_struct *writeback_thread;
+ struct workqueue_struct *writeback_write_wq;
+
+ struct keybuf writeback_keys;
+
+ struct task_struct *status_update_thread;
+ /*
+ * Order the write-half of writeback operations strongly in dispatch
+ * order. (Maintain LBA order; don't allow reads completing out of
+ * order to re-order the writes...)
+ */
+ struct closure_waitlist writeback_ordering_wait;
+ atomic_t writeback_sequence_next;
+
+ /* For tracking sequential IO */
+#define RECENT_IO_BITS 7
+#define RECENT_IO (1 << RECENT_IO_BITS)
+ struct io io[RECENT_IO];
+ struct hlist_head io_hash[RECENT_IO + 1];
+ struct list_head io_lru;
+ spinlock_t io_lock;
+
+ struct cache_accounting accounting;
+
+ /* The rest of this all shows up in sysfs */
+ unsigned int sequential_cutoff;
+
+ unsigned int io_disable:1;
+ unsigned int verify:1;
+ unsigned int bypass_torture_test:1;
+
+ unsigned int partial_stripes_expensive:1;
+ unsigned int writeback_metadata:1;
+ unsigned int writeback_running:1;
+ unsigned int writeback_consider_fragment:1;
+ unsigned char writeback_percent;
+ unsigned int writeback_delay;
+
+ uint64_t writeback_rate_target;
+ int64_t writeback_rate_proportional;
+ int64_t writeback_rate_integral;
+ int64_t writeback_rate_integral_scaled;
+ int32_t writeback_rate_change;
+
+ unsigned int writeback_rate_update_seconds;
+ unsigned int writeback_rate_i_term_inverse;
+ unsigned int writeback_rate_p_term_inverse;
+ unsigned int writeback_rate_fp_term_low;
+ unsigned int writeback_rate_fp_term_mid;
+ unsigned int writeback_rate_fp_term_high;
+ unsigned int writeback_rate_minimum;
+
+ enum stop_on_failure stop_when_cache_set_failed;
+#define DEFAULT_CACHED_DEV_ERROR_LIMIT 64
+ atomic_t io_errors;
+ unsigned int error_limit;
+ unsigned int offline_seconds;
+
+ /*
+ * Retry to update writeback_rate if contention happens for
+ * down_read(dc->writeback_lock) in update_writeback_rate()
+ */
+#define BCH_WBRATE_UPDATE_MAX_SKIPS 15
+ unsigned int rate_update_retry;
+};
+
+enum alloc_reserve {
+ RESERVE_BTREE,
+ RESERVE_PRIO,
+ RESERVE_MOVINGGC,
+ RESERVE_NONE,
+ RESERVE_NR,
+};
+
+struct cache {
+ struct cache_set *set;
+ struct cache_sb sb;
+ struct cache_sb_disk *sb_disk;
+ struct bio sb_bio;
+ struct bio_vec sb_bv[1];
+
+ struct kobject kobj;
+ struct block_device *bdev;
+
+ struct task_struct *alloc_thread;
+
+ struct closure prio;
+ struct prio_set *disk_buckets;
+
+ /*
+ * When allocating new buckets, prio_write() gets first dibs - since we
+ * may not be allocate at all without writing priorities and gens.
+ * prio_last_buckets[] contains the last buckets we wrote priorities to
+ * (so gc can mark them as metadata), prio_buckets[] contains the
+ * buckets allocated for the next prio write.
+ */
+ uint64_t *prio_buckets;
+ uint64_t *prio_last_buckets;
+
+ /*
+ * free: Buckets that are ready to be used
+ *
+ * free_inc: Incoming buckets - these are buckets that currently have
+ * cached data in them, and we can't reuse them until after we write
+ * their new gen to disk. After prio_write() finishes writing the new
+ * gens/prios, they'll be moved to the free list (and possibly discarded
+ * in the process)
+ */
+ DECLARE_FIFO(long, free)[RESERVE_NR];
+ DECLARE_FIFO(long, free_inc);
+
+ size_t fifo_last_bucket;
+
+ /* Allocation stuff: */
+ struct bucket *buckets;
+
+ DECLARE_HEAP(struct bucket *, heap);
+
+ /*
+ * If nonzero, we know we aren't going to find any buckets to invalidate
+ * until a gc finishes - otherwise we could pointlessly burn a ton of
+ * cpu
+ */
+ unsigned int invalidate_needs_gc;
+
+ bool discard; /* Get rid of? */
+
+ struct journal_device journal;
+
+ /* The rest of this all shows up in sysfs */
+#define IO_ERROR_SHIFT 20
+ atomic_t io_errors;
+ atomic_t io_count;
+
+ atomic_long_t meta_sectors_written;
+ atomic_long_t btree_sectors_written;
+ atomic_long_t sectors_written;
+};
+
+struct gc_stat {
+ size_t nodes;
+ size_t nodes_pre;
+ size_t key_bytes;
+
+ size_t nkeys;
+ uint64_t data; /* sectors */
+ unsigned int in_use; /* percent */
+};
+
+/*
+ * Flag bits, for how the cache set is shutting down, and what phase it's at:
+ *
+ * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
+ * all the backing devices first (their cached data gets invalidated, and they
+ * won't automatically reattach).
+ *
+ * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
+ * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
+ * flushing dirty data).
+ *
+ * CACHE_SET_RUNNING means all cache devices have been registered and journal
+ * replay is complete.
+ *
+ * CACHE_SET_IO_DISABLE is set when bcache is stopping the whold cache set, all
+ * external and internal I/O should be denied when this flag is set.
+ *
+ */
+#define CACHE_SET_UNREGISTERING 0
+#define CACHE_SET_STOPPING 1
+#define CACHE_SET_RUNNING 2
+#define CACHE_SET_IO_DISABLE 3
+
+struct cache_set {
+ struct closure cl;
+
+ struct list_head list;
+ struct kobject kobj;
+ struct kobject internal;
+ struct dentry *debug;
+ struct cache_accounting accounting;
+
+ unsigned long flags;
+ atomic_t idle_counter;
+ atomic_t at_max_writeback_rate;
+
+ struct cache *cache;
+
+ struct bcache_device **devices;
+ unsigned int devices_max_used;
+ atomic_t attached_dev_nr;
+ struct list_head cached_devs;
+ uint64_t cached_dev_sectors;
+ atomic_long_t flash_dev_dirty_sectors;
+ struct closure caching;
+
+ struct closure sb_write;
+ struct semaphore sb_write_mutex;
+
+ mempool_t search;
+ mempool_t bio_meta;
+ struct bio_set bio_split;
+
+ /* For the btree cache */
+ struct shrinker shrink;
+
+ /* For the btree cache and anything allocation related */
+ struct mutex bucket_lock;
+
+ /* log2(bucket_size), in sectors */
+ unsigned short bucket_bits;
+
+ /* log2(block_size), in sectors */
+ unsigned short block_bits;
+
+ /*
+ * Default number of pages for a new btree node - may be less than a
+ * full bucket
+ */
+ unsigned int btree_pages;
+
+ /*
+ * Lists of struct btrees; lru is the list for structs that have memory
+ * allocated for actual btree node, freed is for structs that do not.
+ *
+ * We never free a struct btree, except on shutdown - we just put it on
+ * the btree_cache_freed list and reuse it later. This simplifies the
+ * code, and it doesn't cost us much memory as the memory usage is
+ * dominated by buffers that hold the actual btree node data and those
+ * can be freed - and the number of struct btrees allocated is
+ * effectively bounded.
+ *
+ * btree_cache_freeable effectively is a small cache - we use it because
+ * high order page allocations can be rather expensive, and it's quite
+ * common to delete and allocate btree nodes in quick succession. It
+ * should never grow past ~2-3 nodes in practice.
+ */
+ struct list_head btree_cache;
+ struct list_head btree_cache_freeable;
+ struct list_head btree_cache_freed;
+
+ /* Number of elements in btree_cache + btree_cache_freeable lists */
+ unsigned int btree_cache_used;
+
+ /*
+ * If we need to allocate memory for a new btree node and that
+ * allocation fails, we can cannibalize another node in the btree cache
+ * to satisfy the allocation - lock to guarantee only one thread does
+ * this at a time:
+ */
+ wait_queue_head_t btree_cache_wait;
+ struct task_struct *btree_cache_alloc_lock;
+ spinlock_t btree_cannibalize_lock;
+
+ /*
+ * When we free a btree node, we increment the gen of the bucket the
+ * node is in - but we can't rewrite the prios and gens until we
+ * finished whatever it is we were doing, otherwise after a crash the
+ * btree node would be freed but for say a split, we might not have the
+ * pointers to the new nodes inserted into the btree yet.
+ *
+ * This is a refcount that blocks prio_write() until the new keys are
+ * written.
+ */
+ atomic_t prio_blocked;
+ wait_queue_head_t bucket_wait;
+
+ /*
+ * For any bio we don't skip we subtract the number of sectors from
+ * rescale; when it hits 0 we rescale all the bucket priorities.
+ */
+ atomic_t rescale;
+ /*
+ * used for GC, identify if any front side I/Os is inflight
+ */
+ atomic_t search_inflight;
+ /*
+ * When we invalidate buckets, we use both the priority and the amount
+ * of good data to determine which buckets to reuse first - to weight
+ * those together consistently we keep track of the smallest nonzero
+ * priority of any bucket.
+ */
+ uint16_t min_prio;
+
+ /*
+ * max(gen - last_gc) for all buckets. When it gets too big we have to
+ * gc to keep gens from wrapping around.
+ */
+ uint8_t need_gc;
+ struct gc_stat gc_stats;
+ size_t nbuckets;
+ size_t avail_nbuckets;
+
+ struct task_struct *gc_thread;
+ /* Where in the btree gc currently is */
+ struct bkey gc_done;
+
+ /*
+ * For automatical garbage collection after writeback completed, this
+ * varialbe is used as bit fields,
+ * - 0000 0001b (BCH_ENABLE_AUTO_GC): enable gc after writeback
+ * - 0000 0010b (BCH_DO_AUTO_GC): do gc after writeback
+ * This is an optimization for following write request after writeback
+ * finished, but read hit rate dropped due to clean data on cache is
+ * discarded. Unless user explicitly sets it via sysfs, it won't be
+ * enabled.
+ */
+#define BCH_ENABLE_AUTO_GC 1
+#define BCH_DO_AUTO_GC 2
+ uint8_t gc_after_writeback;
+
+ /*
+ * The allocation code needs gc_mark in struct bucket to be correct, but
+ * it's not while a gc is in progress. Protected by bucket_lock.
+ */
+ int gc_mark_valid;
+
+ /* Counts how many sectors bio_insert has added to the cache */
+ atomic_t sectors_to_gc;
+ wait_queue_head_t gc_wait;
+
+ struct keybuf moving_gc_keys;
+ /* Number of moving GC bios in flight */
+ struct semaphore moving_in_flight;
+
+ struct workqueue_struct *moving_gc_wq;
+
+ struct btree *root;
+
+#ifdef CONFIG_BCACHE_DEBUG
+ struct btree *verify_data;
+ struct bset *verify_ondisk;
+ struct mutex verify_lock;
+#endif
+
+ uint8_t set_uuid[16];
+ unsigned int nr_uuids;
+ struct uuid_entry *uuids;
+ BKEY_PADDED(uuid_bucket);
+ struct closure uuid_write;
+ struct semaphore uuid_write_mutex;
+
+ /*
+ * A btree node on disk could have too many bsets for an iterator to fit
+ * on the stack - have to dynamically allocate them.
+ * bch_cache_set_alloc() will make sure the pool can allocate iterators
+ * equipped with enough room that can host
+ * (sb.bucket_size / sb.block_size)
+ * btree_iter_sets, which is more than static MAX_BSETS.
+ */
+ mempool_t fill_iter;
+
+ struct bset_sort_state sort;
+
+ /* List of buckets we're currently writing data to */
+ struct list_head data_buckets;
+ spinlock_t data_bucket_lock;
+
+ struct journal journal;
+
+#define CONGESTED_MAX 1024
+ unsigned int congested_last_us;
+ atomic_t congested;
+
+ /* The rest of this all shows up in sysfs */
+ unsigned int congested_read_threshold_us;
+ unsigned int congested_write_threshold_us;
+
+ struct time_stats btree_gc_time;
+ struct time_stats btree_split_time;
+ struct time_stats btree_read_time;
+
+ atomic_long_t cache_read_races;
+ atomic_long_t writeback_keys_done;
+ atomic_long_t writeback_keys_failed;
+
+ atomic_long_t reclaim;
+ atomic_long_t reclaimed_journal_buckets;
+ atomic_long_t flush_write;
+
+ enum {
+ ON_ERROR_UNREGISTER,
+ ON_ERROR_PANIC,
+ } on_error;
+#define DEFAULT_IO_ERROR_LIMIT 8
+ unsigned int error_limit;
+ unsigned int error_decay;
+
+ unsigned short journal_delay_ms;
+ bool expensive_debug_checks;
+ unsigned int verify:1;
+ unsigned int key_merging_disabled:1;
+ unsigned int gc_always_rewrite:1;
+ unsigned int shrinker_disabled:1;
+ unsigned int copy_gc_enabled:1;
+ unsigned int idle_max_writeback_rate_enabled:1;
+
+#define BUCKET_HASH_BITS 12
+ struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
+};
+
+struct bbio {
+ unsigned int submit_time_us;
+ union {
+ struct bkey key;
+ uint64_t _pad[3];
+ /*
+ * We only need pad = 3 here because we only ever carry around a
+ * single pointer - i.e. the pointer we're doing io to/from.
+ */
+ };
+ struct bio bio;
+};
+
+#define BTREE_PRIO USHRT_MAX
+#define INITIAL_PRIO 32768U
+
+#define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE)
+#define btree_blocks(b) \
+ ((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
+
+#define btree_default_blocks(c) \
+ ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
+
+#define bucket_bytes(ca) ((ca)->sb.bucket_size << 9)
+#define block_bytes(ca) ((ca)->sb.block_size << 9)
+
+static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
+{
+ unsigned int n, max_pages;
+
+ max_pages = min_t(unsigned int,
+ __rounddown_pow_of_two(USHRT_MAX) / PAGE_SECTORS,
+ MAX_ORDER_NR_PAGES);
+
+ n = sb->bucket_size / PAGE_SECTORS;
+ if (n > max_pages)
+ n = max_pages;
+
+ return n;
+}
+
+static inline unsigned int meta_bucket_bytes(struct cache_sb *sb)
+{
+ return meta_bucket_pages(sb) << PAGE_SHIFT;
+}
+
+#define prios_per_bucket(ca) \
+ ((meta_bucket_bytes(&(ca)->sb) - sizeof(struct prio_set)) / \
+ sizeof(struct bucket_disk))
+
+#define prio_buckets(ca) \
+ DIV_ROUND_UP((size_t) (ca)->sb.nbuckets, prios_per_bucket(ca))
+
+static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
+{
+ return s >> c->bucket_bits;
+}
+
+static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
+{
+ return ((sector_t) b) << c->bucket_bits;
+}
+
+static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
+{
+ return s & (c->cache->sb.bucket_size - 1);
+}
+
+static inline size_t PTR_BUCKET_NR(struct cache_set *c,
+ const struct bkey *k,
+ unsigned int ptr)
+{
+ return sector_to_bucket(c, PTR_OFFSET(k, ptr));
+}
+
+static inline struct bucket *PTR_BUCKET(struct cache_set *c,
+ const struct bkey *k,
+ unsigned int ptr)
+{
+ return c->cache->buckets + PTR_BUCKET_NR(c, k, ptr);
+}
+
+static inline uint8_t gen_after(uint8_t a, uint8_t b)
+{
+ uint8_t r = a - b;
+
+ return r > 128U ? 0 : r;
+}
+
+static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
+ unsigned int i)
+{
+ return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
+}
+
+static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
+ unsigned int i)
+{
+ return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && c->cache;
+}
+
+/* Btree key macros */
+
+/*
+ * This is used for various on disk data structures - cache_sb, prio_set, bset,
+ * jset: The checksum is _always_ the first 8 bytes of these structs
+ */
+#define csum_set(i) \
+ bch_crc64(((void *) (i)) + sizeof(uint64_t), \
+ ((void *) bset_bkey_last(i)) - \
+ (((void *) (i)) + sizeof(uint64_t)))
+
+/* Error handling macros */
+
+#define btree_bug(b, ...) \
+do { \
+ if (bch_cache_set_error((b)->c, __VA_ARGS__)) \
+ dump_stack(); \
+} while (0)
+
+#define cache_bug(c, ...) \
+do { \
+ if (bch_cache_set_error(c, __VA_ARGS__)) \
+ dump_stack(); \
+} while (0)
+
+#define btree_bug_on(cond, b, ...) \
+do { \
+ if (cond) \
+ btree_bug(b, __VA_ARGS__); \
+} while (0)
+
+#define cache_bug_on(cond, c, ...) \
+do { \
+ if (cond) \
+ cache_bug(c, __VA_ARGS__); \
+} while (0)
+
+#define cache_set_err_on(cond, c, ...) \
+do { \
+ if (cond) \
+ bch_cache_set_error(c, __VA_ARGS__); \
+} while (0)
+
+/* Looping macros */
+
+#define for_each_bucket(b, ca) \
+ for (b = (ca)->buckets + (ca)->sb.first_bucket; \
+ b < (ca)->buckets + (ca)->sb.nbuckets; b++)
+
+static inline void cached_dev_put(struct cached_dev *dc)
+{
+ if (refcount_dec_and_test(&dc->count))
+ schedule_work(&dc->detach);
+}
+
+static inline bool cached_dev_get(struct cached_dev *dc)
+{
+ if (!refcount_inc_not_zero(&dc->count))
+ return false;
+
+ /* Paired with the mb in cached_dev_attach */
+ smp_mb__after_atomic();
+ return true;
+}
+
+/*
+ * bucket_gc_gen() returns the difference between the bucket's current gen and
+ * the oldest gen of any pointer into that bucket in the btree (last_gc).
+ */
+
+static inline uint8_t bucket_gc_gen(struct bucket *b)
+{
+ return b->gen - b->last_gc;
+}
+
+#define BUCKET_GC_GEN_MAX 96U
+
+#define kobj_attribute_write(n, fn) \
+ static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn)
+
+#define kobj_attribute_rw(n, show, store) \
+ static struct kobj_attribute ksysfs_##n = \
+ __ATTR(n, 0600, show, store)
+
+static inline void wake_up_allocators(struct cache_set *c)
+{
+ struct cache *ca = c->cache;
+
+ wake_up_process(ca->alloc_thread);
+}
+
+static inline void closure_bio_submit(struct cache_set *c,
+ struct bio *bio,
+ struct closure *cl)
+{
+ closure_get(cl);
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) {
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
+ return;
+ }
+ submit_bio_noacct(bio);
+}
+
+/*
+ * Prevent the kthread exits directly, and make sure when kthread_stop()
+ * is called to stop a kthread, it is still alive. If a kthread might be
+ * stopped by CACHE_SET_IO_DISABLE bit set, wait_for_kthread_stop() is
+ * necessary before the kthread returns.
+ */
+static inline void wait_for_kthread_stop(void)
+{
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+}
+
+/* Forward declarations */
+
+void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
+void bch_count_io_errors(struct cache *ca, blk_status_t error,
+ int is_read, const char *m);
+void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
+ blk_status_t error, const char *m);
+void bch_bbio_endio(struct cache_set *c, struct bio *bio,
+ blk_status_t error, const char *m);
+void bch_bbio_free(struct bio *bio, struct cache_set *c);
+struct bio *bch_bbio_alloc(struct cache_set *c);
+
+void __bch_submit_bbio(struct bio *bio, struct cache_set *c);
+void bch_submit_bbio(struct bio *bio, struct cache_set *c,
+ struct bkey *k, unsigned int ptr);
+
+uint8_t bch_inc_gen(struct cache *ca, struct bucket *b);
+void bch_rescale_priorities(struct cache_set *c, int sectors);
+
+bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b);
+void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b);
+
+void __bch_bucket_free(struct cache *ca, struct bucket *b);
+void bch_bucket_free(struct cache_set *c, struct bkey *k);
+
+long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
+int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+ struct bkey *k, bool wait);
+int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+ struct bkey *k, bool wait);
+bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
+ unsigned int sectors, unsigned int write_point,
+ unsigned int write_prio, bool wait);
+bool bch_cached_dev_error(struct cached_dev *dc);
+
+__printf(2, 3)
+bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
+
+int bch_prio_write(struct cache *ca, bool wait);
+void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
+
+extern struct workqueue_struct *bcache_wq;
+extern struct workqueue_struct *bch_journal_wq;
+extern struct workqueue_struct *bch_flush_wq;
+extern struct mutex bch_register_lock;
+extern struct list_head bch_cache_sets;
+
+extern const struct kobj_type bch_cached_dev_ktype;
+extern const struct kobj_type bch_flash_dev_ktype;
+extern const struct kobj_type bch_cache_set_ktype;
+extern const struct kobj_type bch_cache_set_internal_ktype;
+extern const struct kobj_type bch_cache_ktype;
+
+void bch_cached_dev_release(struct kobject *kobj);
+void bch_flash_dev_release(struct kobject *kobj);
+void bch_cache_set_release(struct kobject *kobj);
+void bch_cache_release(struct kobject *kobj);
+
+int bch_uuid_write(struct cache_set *c);
+void bcache_write_super(struct cache_set *c);
+
+int bch_flash_dev_create(struct cache_set *c, uint64_t size);
+
+int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+ uint8_t *set_uuid);
+void bch_cached_dev_detach(struct cached_dev *dc);
+int bch_cached_dev_run(struct cached_dev *dc);
+void bcache_device_stop(struct bcache_device *d);
+
+void bch_cache_set_unregister(struct cache_set *c);
+void bch_cache_set_stop(struct cache_set *c);
+
+struct cache_set *bch_cache_set_alloc(struct cache_sb *sb);
+void bch_btree_cache_free(struct cache_set *c);
+int bch_btree_cache_alloc(struct cache_set *c);
+void bch_moving_init_cache_set(struct cache_set *c);
+int bch_open_buckets_alloc(struct cache_set *c);
+void bch_open_buckets_free(struct cache_set *c);
+
+int bch_cache_allocator_start(struct cache *ca);
+
+void bch_debug_exit(void);
+void bch_debug_init(void);
+void bch_request_exit(void);
+int bch_request_init(void);
+void bch_btree_exit(void);
+int bch_btree_init(void);
+
+#endif /* _BCACHE_H */
diff --git a/drivers/md/bcache/bcache_ondisk.h b/drivers/md/bcache/bcache_ondisk.h
new file mode 100644
index 0000000000..6620a7f8ff
--- /dev/null
+++ b/drivers/md/bcache/bcache_ondisk.h
@@ -0,0 +1,446 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_BCACHE_H
+#define _LINUX_BCACHE_H
+
+/*
+ * Bcache on disk data structures
+ */
+
+#include <linux/types.h>
+
+#define BITMASK(name, type, field, offset, size) \
+static inline __u64 name(const type *k) \
+{ return (k->field >> offset) & ~(~0ULL << size); } \
+ \
+static inline void SET_##name(type *k, __u64 v) \
+{ \
+ k->field &= ~(~(~0ULL << size) << offset); \
+ k->field |= (v & ~(~0ULL << size)) << offset; \
+}
+
+/* Btree keys - all units are in sectors */
+
+struct bkey {
+ __u64 high;
+ __u64 low;
+ __u64 ptr[];
+};
+
+#define KEY_FIELD(name, field, offset, size) \
+ BITMASK(name, struct bkey, field, offset, size)
+
+#define PTR_FIELD(name, offset, size) \
+static inline __u64 name(const struct bkey *k, unsigned int i) \
+{ return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
+ \
+static inline void SET_##name(struct bkey *k, unsigned int i, __u64 v) \
+{ \
+ k->ptr[i] &= ~(~(~0ULL << size) << offset); \
+ k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
+}
+
+#define KEY_SIZE_BITS 16
+#define KEY_MAX_U64S 8
+
+KEY_FIELD(KEY_PTRS, high, 60, 3)
+KEY_FIELD(__PAD0, high, 58, 2)
+KEY_FIELD(KEY_CSUM, high, 56, 2)
+KEY_FIELD(__PAD1, high, 55, 1)
+KEY_FIELD(KEY_DIRTY, high, 36, 1)
+
+KEY_FIELD(KEY_SIZE, high, 20, KEY_SIZE_BITS)
+KEY_FIELD(KEY_INODE, high, 0, 20)
+
+/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
+
+static inline __u64 KEY_OFFSET(const struct bkey *k)
+{
+ return k->low;
+}
+
+static inline void SET_KEY_OFFSET(struct bkey *k, __u64 v)
+{
+ k->low = v;
+}
+
+/*
+ * The high bit being set is a relic from when we used it to do binary
+ * searches - it told you where a key started. It's not used anymore,
+ * and can probably be safely dropped.
+ */
+#define KEY(inode, offset, size) \
+((struct bkey) { \
+ .high = (1ULL << 63) | ((__u64) (size) << 20) | (inode), \
+ .low = (offset) \
+})
+
+#define ZERO_KEY KEY(0, 0, 0)
+
+#define MAX_KEY_INODE (~(~0 << 20))
+#define MAX_KEY_OFFSET (~0ULL >> 1)
+#define MAX_KEY KEY(MAX_KEY_INODE, MAX_KEY_OFFSET, 0)
+
+#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
+#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
+
+#define PTR_DEV_BITS 12
+
+PTR_FIELD(PTR_DEV, 51, PTR_DEV_BITS)
+PTR_FIELD(PTR_OFFSET, 8, 43)
+PTR_FIELD(PTR_GEN, 0, 8)
+
+#define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
+
+#define MAKE_PTR(gen, offset, dev) \
+ ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
+
+/* Bkey utility code */
+
+static inline unsigned long bkey_u64s(const struct bkey *k)
+{
+ return (sizeof(struct bkey) / sizeof(__u64)) + KEY_PTRS(k);
+}
+
+static inline unsigned long bkey_bytes(const struct bkey *k)
+{
+ return bkey_u64s(k) * sizeof(__u64);
+}
+
+#define bkey_copy(_dest, _src) unsafe_memcpy(_dest, _src, bkey_bytes(_src), \
+ /* bkey is always padded */)
+
+static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
+{
+ SET_KEY_INODE(dest, KEY_INODE(src));
+ SET_KEY_OFFSET(dest, KEY_OFFSET(src));
+}
+
+static inline struct bkey *bkey_next(const struct bkey *k)
+{
+ __u64 *d = (void *) k;
+
+ return (struct bkey *) (d + bkey_u64s(k));
+}
+
+static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
+{
+ __u64 *d = (void *) k;
+
+ return (struct bkey *) (d + nr_keys);
+}
+/* Enough for a key with 6 pointers */
+#define BKEY_PAD 8
+
+#define BKEY_PADDED(key) \
+ union { struct bkey key; __u64 key ## _pad[BKEY_PAD]; }
+
+/* Superblock */
+
+/* Version 0: Cache device
+ * Version 1: Backing device
+ * Version 2: Seed pointer into btree node checksum
+ * Version 3: Cache device with new UUID format
+ * Version 4: Backing device with data offset
+ */
+#define BCACHE_SB_VERSION_CDEV 0
+#define BCACHE_SB_VERSION_BDEV 1
+#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
+#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
+#define BCACHE_SB_VERSION_CDEV_WITH_FEATURES 5
+#define BCACHE_SB_VERSION_BDEV_WITH_FEATURES 6
+#define BCACHE_SB_MAX_VERSION 6
+
+#define SB_SECTOR 8
+#define SB_OFFSET (SB_SECTOR << SECTOR_SHIFT)
+#define SB_SIZE 4096
+#define SB_LABEL_SIZE 32
+#define SB_JOURNAL_BUCKETS 256U
+/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
+#define MAX_CACHES_PER_SET 8
+
+#define BDEV_DATA_START_DEFAULT 16 /* sectors */
+
+struct cache_sb_disk {
+ __le64 csum;
+ __le64 offset; /* sector where this sb was written */
+ __le64 version;
+
+ __u8 magic[16];
+
+ __u8 uuid[16];
+ union {
+ __u8 set_uuid[16];
+ __le64 set_magic;
+ };
+ __u8 label[SB_LABEL_SIZE];
+
+ __le64 flags;
+ __le64 seq;
+
+ __le64 feature_compat;
+ __le64 feature_incompat;
+ __le64 feature_ro_compat;
+
+ __le64 pad[5];
+
+ union {
+ struct {
+ /* Cache devices */
+ __le64 nbuckets; /* device size */
+
+ __le16 block_size; /* sectors */
+ __le16 bucket_size; /* sectors */
+
+ __le16 nr_in_set;
+ __le16 nr_this_dev;
+ };
+ struct {
+ /* Backing devices */
+ __le64 data_offset;
+
+ /*
+ * block_size from the cache device section is still used by
+ * backing devices, so don't add anything here until we fix
+ * things to not need it for backing devices anymore
+ */
+ };
+ };
+
+ __le32 last_mount; /* time overflow in y2106 */
+
+ __le16 first_bucket;
+ union {
+ __le16 njournal_buckets;
+ __le16 keys;
+ };
+ __le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
+ __le16 obso_bucket_size_hi; /* obsoleted */
+};
+
+/*
+ * This is for in-memory bcache super block.
+ * NOTE: cache_sb is NOT exactly mapping to cache_sb_disk, the member
+ * size, ordering and even whole struct size may be different
+ * from cache_sb_disk.
+ */
+struct cache_sb {
+ __u64 offset; /* sector where this sb was written */
+ __u64 version;
+
+ __u8 magic[16];
+
+ __u8 uuid[16];
+ union {
+ __u8 set_uuid[16];
+ __u64 set_magic;
+ };
+ __u8 label[SB_LABEL_SIZE];
+
+ __u64 flags;
+ __u64 seq;
+
+ __u64 feature_compat;
+ __u64 feature_incompat;
+ __u64 feature_ro_compat;
+
+ union {
+ struct {
+ /* Cache devices */
+ __u64 nbuckets; /* device size */
+
+ __u16 block_size; /* sectors */
+ __u16 nr_in_set;
+ __u16 nr_this_dev;
+ __u32 bucket_size; /* sectors */
+ };
+ struct {
+ /* Backing devices */
+ __u64 data_offset;
+
+ /*
+ * block_size from the cache device section is still used by
+ * backing devices, so don't add anything here until we fix
+ * things to not need it for backing devices anymore
+ */
+ };
+ };
+
+ __u32 last_mount; /* time overflow in y2106 */
+
+ __u16 first_bucket;
+ union {
+ __u16 njournal_buckets;
+ __u16 keys;
+ };
+ __u64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
+};
+
+static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
+{
+ return sb->version == BCACHE_SB_VERSION_BDEV
+ || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET
+ || sb->version == BCACHE_SB_VERSION_BDEV_WITH_FEATURES;
+}
+
+BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
+BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
+BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
+#define CACHE_REPLACEMENT_LRU 0U
+#define CACHE_REPLACEMENT_FIFO 1U
+#define CACHE_REPLACEMENT_RANDOM 2U
+
+BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
+#define CACHE_MODE_WRITETHROUGH 0U
+#define CACHE_MODE_WRITEBACK 1U
+#define CACHE_MODE_WRITEAROUND 2U
+#define CACHE_MODE_NONE 3U
+BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
+#define BDEV_STATE_NONE 0U
+#define BDEV_STATE_CLEAN 1U
+#define BDEV_STATE_DIRTY 2U
+#define BDEV_STATE_STALE 3U
+
+/*
+ * Magic numbers
+ *
+ * The various other data structures have their own magic numbers, which are
+ * xored with the first part of the cache set's UUID
+ */
+
+#define JSET_MAGIC 0x245235c1a3625032ULL
+#define PSET_MAGIC 0x6750e15f87337f91ULL
+#define BSET_MAGIC 0x90135c78b99e07f5ULL
+
+static inline __u64 jset_magic(struct cache_sb *sb)
+{
+ return sb->set_magic ^ JSET_MAGIC;
+}
+
+static inline __u64 pset_magic(struct cache_sb *sb)
+{
+ return sb->set_magic ^ PSET_MAGIC;
+}
+
+static inline __u64 bset_magic(struct cache_sb *sb)
+{
+ return sb->set_magic ^ BSET_MAGIC;
+}
+
+/*
+ * Journal
+ *
+ * On disk format for a journal entry:
+ * seq is monotonically increasing; every journal entry has its own unique
+ * sequence number.
+ *
+ * last_seq is the oldest journal entry that still has keys the btree hasn't
+ * flushed to disk yet.
+ *
+ * version is for on disk format changes.
+ */
+
+#define BCACHE_JSET_VERSION_UUIDv1 1
+#define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
+#define BCACHE_JSET_VERSION 1
+
+struct jset {
+ __u64 csum;
+ __u64 magic;
+ __u64 seq;
+ __u32 version;
+ __u32 keys;
+
+ __u64 last_seq;
+
+ BKEY_PADDED(uuid_bucket);
+ BKEY_PADDED(btree_root);
+ __u16 btree_level;
+ __u16 pad[3];
+
+ __u64 prio_bucket[MAX_CACHES_PER_SET];
+
+ union {
+ DECLARE_FLEX_ARRAY(struct bkey, start);
+ DECLARE_FLEX_ARRAY(__u64, d);
+ };
+};
+
+/* Bucket prios/gens */
+
+struct prio_set {
+ __u64 csum;
+ __u64 magic;
+ __u64 seq;
+ __u32 version;
+ __u32 pad;
+
+ __u64 next_bucket;
+
+ struct bucket_disk {
+ __u16 prio;
+ __u8 gen;
+ } __attribute((packed)) data[];
+};
+
+/* UUIDS - per backing device/flash only volume metadata */
+
+struct uuid_entry {
+ union {
+ struct {
+ __u8 uuid[16];
+ __u8 label[32];
+ __u32 first_reg; /* time overflow in y2106 */
+ __u32 last_reg;
+ __u32 invalidated;
+
+ __u32 flags;
+ /* Size of flash only volumes */
+ __u64 sectors;
+ };
+
+ __u8 pad[128];
+ };
+};
+
+BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
+
+/* Btree nodes */
+
+/* Version 1: Seed pointer into btree node checksum
+ */
+#define BCACHE_BSET_CSUM 1
+#define BCACHE_BSET_VERSION 1
+
+/*
+ * Btree nodes
+ *
+ * On disk a btree node is a list/log of these; within each set the keys are
+ * sorted
+ */
+struct bset {
+ __u64 csum;
+ __u64 magic;
+ __u64 seq;
+ __u32 version;
+ __u32 keys;
+
+ union {
+ DECLARE_FLEX_ARRAY(struct bkey, start);
+ DECLARE_FLEX_ARRAY(__u64, d);
+ };
+};
+
+/* OBSOLETE */
+
+/* UUIDS - per backing device/flash only volume metadata */
+
+struct uuid_entry_v0 {
+ __u8 uuid[16];
+ __u8 label[32];
+ __u32 first_reg;
+ __u32 last_reg;
+ __u32 invalidated;
+ __u32 pad;
+};
+
+#endif /* _LINUX_BCACHE_H */
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
new file mode 100644
index 0000000000..2bba4d6aaa
--- /dev/null
+++ b/drivers/md/bcache/bset.c
@@ -0,0 +1,1390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Code for working with individual keys, and sorted sets of keys with in a
+ * btree node
+ *
+ * Copyright 2012 Google, Inc.
+ */
+
+#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
+
+#include "util.h"
+#include "bset.h"
+
+#include <linux/console.h>
+#include <linux/sched/clock.h>
+#include <linux/random.h>
+#include <linux/prefetch.h>
+
+#ifdef CONFIG_BCACHE_DEBUG
+
+void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set)
+{
+ struct bkey *k, *next;
+
+ for (k = i->start; k < bset_bkey_last(i); k = next) {
+ next = bkey_next(k);
+
+ pr_err("block %u key %u/%u: ", set,
+ (unsigned int) ((u64 *) k - i->d), i->keys);
+
+ if (b->ops->key_dump)
+ b->ops->key_dump(b, k);
+ else
+ pr_cont("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
+
+ if (next < bset_bkey_last(i) &&
+ bkey_cmp(k, b->ops->is_extents ?
+ &START_KEY(next) : next) > 0)
+ pr_err("Key skipped backwards\n");
+ }
+}
+
+void bch_dump_bucket(struct btree_keys *b)
+{
+ unsigned int i;
+
+ console_lock();
+ for (i = 0; i <= b->nsets; i++)
+ bch_dump_bset(b, b->set[i].data,
+ bset_sector_offset(b, b->set[i].data));
+ console_unlock();
+}
+
+int __bch_count_data(struct btree_keys *b)
+{
+ unsigned int ret = 0;
+ struct btree_iter iter;
+ struct bkey *k;
+
+ if (b->ops->is_extents)
+ for_each_key(b, k, &iter)
+ ret += KEY_SIZE(k);
+ return ret;
+}
+
+void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
+{
+ va_list args;
+ struct bkey *k, *p = NULL;
+ struct btree_iter iter;
+ const char *err;
+
+ for_each_key(b, k, &iter) {
+ if (b->ops->is_extents) {
+ err = "Keys out of order";
+ if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
+ goto bug;
+
+ if (bch_ptr_invalid(b, k))
+ continue;
+
+ err = "Overlapping keys";
+ if (p && bkey_cmp(p, &START_KEY(k)) > 0)
+ goto bug;
+ } else {
+ if (bch_ptr_bad(b, k))
+ continue;
+
+ err = "Duplicate keys";
+ if (p && !bkey_cmp(p, k))
+ goto bug;
+ }
+ p = k;
+ }
+#if 0
+ err = "Key larger than btree node key";
+ if (p && bkey_cmp(p, &b->key) > 0)
+ goto bug;
+#endif
+ return;
+bug:
+ bch_dump_bucket(b);
+
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+
+ panic("bch_check_keys error: %s:\n", err);
+}
+
+static void bch_btree_iter_next_check(struct btree_iter *iter)
+{
+ struct bkey *k = iter->data->k, *next = bkey_next(k);
+
+ if (next < iter->data->end &&
+ bkey_cmp(k, iter->b->ops->is_extents ?
+ &START_KEY(next) : next) > 0) {
+ bch_dump_bucket(iter->b);
+ panic("Key skipped backwards\n");
+ }
+}
+
+#else
+
+static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
+
+#endif
+
+/* Keylists */
+
+int __bch_keylist_realloc(struct keylist *l, unsigned int u64s)
+{
+ size_t oldsize = bch_keylist_nkeys(l);
+ size_t newsize = oldsize + u64s;
+ uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
+ uint64_t *new_keys;
+
+ newsize = roundup_pow_of_two(newsize);
+
+ if (newsize <= KEYLIST_INLINE ||
+ roundup_pow_of_two(oldsize) == newsize)
+ return 0;
+
+ new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
+
+ if (!new_keys)
+ return -ENOMEM;
+
+ if (!old_keys)
+ memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
+
+ l->keys_p = new_keys;
+ l->top_p = new_keys + oldsize;
+
+ return 0;
+}
+
+/* Pop the top key of keylist by pointing l->top to its previous key */
+struct bkey *bch_keylist_pop(struct keylist *l)
+{
+ struct bkey *k = l->keys;
+
+ if (k == l->top)
+ return NULL;
+
+ while (bkey_next(k) != l->top)
+ k = bkey_next(k);
+
+ return l->top = k;
+}
+
+/* Pop the bottom key of keylist and update l->top_p */
+void bch_keylist_pop_front(struct keylist *l)
+{
+ l->top_p -= bkey_u64s(l->keys);
+
+ memmove(l->keys,
+ bkey_next(l->keys),
+ bch_keylist_bytes(l));
+}
+
+/* Key/pointer manipulation */
+
+void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
+ unsigned int i)
+{
+ BUG_ON(i > KEY_PTRS(src));
+
+ /* Only copy the header, key, and one pointer. */
+ memcpy(dest, src, 2 * sizeof(uint64_t));
+ dest->ptr[0] = src->ptr[i];
+ SET_KEY_PTRS(dest, 1);
+ /* We didn't copy the checksum so clear that bit. */
+ SET_KEY_CSUM(dest, 0);
+}
+
+bool __bch_cut_front(const struct bkey *where, struct bkey *k)
+{
+ unsigned int i, len = 0;
+
+ if (bkey_cmp(where, &START_KEY(k)) <= 0)
+ return false;
+
+ if (bkey_cmp(where, k) < 0)
+ len = KEY_OFFSET(k) - KEY_OFFSET(where);
+ else
+ bkey_copy_key(k, where);
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len);
+
+ BUG_ON(len > KEY_SIZE(k));
+ SET_KEY_SIZE(k, len);
+ return true;
+}
+
+bool __bch_cut_back(const struct bkey *where, struct bkey *k)
+{
+ unsigned int len = 0;
+
+ if (bkey_cmp(where, k) >= 0)
+ return false;
+
+ BUG_ON(KEY_INODE(where) != KEY_INODE(k));
+
+ if (bkey_cmp(where, &START_KEY(k)) > 0)
+ len = KEY_OFFSET(where) - KEY_START(k);
+
+ bkey_copy_key(k, where);
+
+ BUG_ON(len > KEY_SIZE(k));
+ SET_KEY_SIZE(k, len);
+ return true;
+}
+
+/* Auxiliary search trees */
+
+/* 32 bits total: */
+#define BKEY_MID_BITS 3
+#define BKEY_EXPONENT_BITS 7
+#define BKEY_MANTISSA_BITS (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS)
+#define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1)
+
+struct bkey_float {
+ unsigned int exponent:BKEY_EXPONENT_BITS;
+ unsigned int m:BKEY_MID_BITS;
+ unsigned int mantissa:BKEY_MANTISSA_BITS;
+} __packed;
+
+/*
+ * BSET_CACHELINE was originally intended to match the hardware cacheline size -
+ * it used to be 64, but I realized the lookup code would touch slightly less
+ * memory if it was 128.
+ *
+ * It definites the number of bytes (in struct bset) per struct bkey_float in
+ * the auxiliar search tree - when we're done searching the bset_float tree we
+ * have this many bytes left that we do a linear search over.
+ *
+ * Since (after level 5) every level of the bset_tree is on a new cacheline,
+ * we're touching one fewer cacheline in the bset tree in exchange for one more
+ * cacheline in the linear search - but the linear search might stop before it
+ * gets to the second cacheline.
+ */
+
+#define BSET_CACHELINE 128
+
+/* Space required for the btree node keys */
+static inline size_t btree_keys_bytes(struct btree_keys *b)
+{
+ return PAGE_SIZE << b->page_order;
+}
+
+static inline size_t btree_keys_cachelines(struct btree_keys *b)
+{
+ return btree_keys_bytes(b) / BSET_CACHELINE;
+}
+
+/* Space required for the auxiliary search trees */
+static inline size_t bset_tree_bytes(struct btree_keys *b)
+{
+ return btree_keys_cachelines(b) * sizeof(struct bkey_float);
+}
+
+/* Space required for the prev pointers */
+static inline size_t bset_prev_bytes(struct btree_keys *b)
+{
+ return btree_keys_cachelines(b) * sizeof(uint8_t);
+}
+
+/* Memory allocation */
+
+void bch_btree_keys_free(struct btree_keys *b)
+{
+ struct bset_tree *t = b->set;
+
+ if (bset_prev_bytes(b) < PAGE_SIZE)
+ kfree(t->prev);
+ else
+ free_pages((unsigned long) t->prev,
+ get_order(bset_prev_bytes(b)));
+
+ if (bset_tree_bytes(b) < PAGE_SIZE)
+ kfree(t->tree);
+ else
+ free_pages((unsigned long) t->tree,
+ get_order(bset_tree_bytes(b)));
+
+ free_pages((unsigned long) t->data, b->page_order);
+
+ t->prev = NULL;
+ t->tree = NULL;
+ t->data = NULL;
+}
+
+int bch_btree_keys_alloc(struct btree_keys *b,
+ unsigned int page_order,
+ gfp_t gfp)
+{
+ struct bset_tree *t = b->set;
+
+ BUG_ON(t->data);
+
+ b->page_order = page_order;
+
+ t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order);
+ if (!t->data)
+ goto err;
+
+ t->tree = bset_tree_bytes(b) < PAGE_SIZE
+ ? kmalloc(bset_tree_bytes(b), gfp)
+ : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
+ if (!t->tree)
+ goto err;
+
+ t->prev = bset_prev_bytes(b) < PAGE_SIZE
+ ? kmalloc(bset_prev_bytes(b), gfp)
+ : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
+ if (!t->prev)
+ goto err;
+
+ return 0;
+err:
+ bch_btree_keys_free(b);
+ return -ENOMEM;
+}
+
+void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
+ bool *expensive_debug_checks)
+{
+ b->ops = ops;
+ b->expensive_debug_checks = expensive_debug_checks;
+ b->nsets = 0;
+ b->last_set_unwritten = 0;
+
+ /*
+ * struct btree_keys in embedded in struct btree, and struct
+ * bset_tree is embedded into struct btree_keys. They are all
+ * initialized as 0 by kzalloc() in mca_bucket_alloc(), and
+ * b->set[0].data is allocated in bch_btree_keys_alloc(), so we
+ * don't have to initiate b->set[].size and b->set[].data here
+ * any more.
+ */
+}
+
+/* Binary tree stuff for auxiliary search trees */
+
+/*
+ * return array index next to j when does in-order traverse
+ * of a binary tree which is stored in a linear array
+ */
+static unsigned int inorder_next(unsigned int j, unsigned int size)
+{
+ if (j * 2 + 1 < size) {
+ j = j * 2 + 1;
+
+ while (j * 2 < size)
+ j *= 2;
+ } else
+ j >>= ffz(j) + 1;
+
+ return j;
+}
+
+/*
+ * return array index previous to j when does in-order traverse
+ * of a binary tree which is stored in a linear array
+ */
+static unsigned int inorder_prev(unsigned int j, unsigned int size)
+{
+ if (j * 2 < size) {
+ j = j * 2;
+
+ while (j * 2 + 1 < size)
+ j = j * 2 + 1;
+ } else
+ j >>= ffs(j);
+
+ return j;
+}
+
+/*
+ * I have no idea why this code works... and I'm the one who wrote it
+ *
+ * However, I do know what it does:
+ * Given a binary tree constructed in an array (i.e. how you normally implement
+ * a heap), it converts a node in the tree - referenced by array index - to the
+ * index it would have if you did an inorder traversal.
+ *
+ * Also tested for every j, size up to size somewhere around 6 million.
+ *
+ * The binary tree starts at array index 1, not 0
+ * extra is a function of size:
+ * extra = (size - rounddown_pow_of_two(size - 1)) << 1;
+ */
+static unsigned int __to_inorder(unsigned int j,
+ unsigned int size,
+ unsigned int extra)
+{
+ unsigned int b = fls(j);
+ unsigned int shift = fls(size - 1) - b;
+
+ j ^= 1U << (b - 1);
+ j <<= 1;
+ j |= 1;
+ j <<= shift;
+
+ if (j > extra)
+ j -= (j - extra) >> 1;
+
+ return j;
+}
+
+/*
+ * Return the cacheline index in bset_tree->data, where j is index
+ * from a linear array which stores the auxiliar binary tree
+ */
+static unsigned int to_inorder(unsigned int j, struct bset_tree *t)
+{
+ return __to_inorder(j, t->size, t->extra);
+}
+
+static unsigned int __inorder_to_tree(unsigned int j,
+ unsigned int size,
+ unsigned int extra)
+{
+ unsigned int shift;
+
+ if (j > extra)
+ j += j - extra;
+
+ shift = ffs(j);
+
+ j >>= shift;
+ j |= roundup_pow_of_two(size) >> shift;
+
+ return j;
+}
+
+/*
+ * Return an index from a linear array which stores the auxiliar binary
+ * tree, j is the cacheline index of t->data.
+ */
+static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t)
+{
+ return __inorder_to_tree(j, t->size, t->extra);
+}
+
+#if 0
+void inorder_test(void)
+{
+ unsigned long done = 0;
+ ktime_t start = ktime_get();
+
+ for (unsigned int size = 2;
+ size < 65536000;
+ size++) {
+ unsigned int extra =
+ (size - rounddown_pow_of_two(size - 1)) << 1;
+ unsigned int i = 1, j = rounddown_pow_of_two(size - 1);
+
+ if (!(size % 4096))
+ pr_notice("loop %u, %llu per us\n", size,
+ done / ktime_us_delta(ktime_get(), start));
+
+ while (1) {
+ if (__inorder_to_tree(i, size, extra) != j)
+ panic("size %10u j %10u i %10u", size, j, i);
+
+ if (__to_inorder(j, size, extra) != i)
+ panic("size %10u j %10u i %10u", size, j, i);
+
+ if (j == rounddown_pow_of_two(size) - 1)
+ break;
+
+ BUG_ON(inorder_prev(inorder_next(j, size), size) != j);
+
+ j = inorder_next(j, size);
+ i++;
+ }
+
+ done += size - 1;
+ }
+}
+#endif
+
+/*
+ * Cacheline/offset <-> bkey pointer arithmetic:
+ *
+ * t->tree is a binary search tree in an array; each node corresponds to a key
+ * in one cacheline in t->set (BSET_CACHELINE bytes).
+ *
+ * This means we don't have to store the full index of the key that a node in
+ * the binary tree points to; to_inorder() gives us the cacheline, and then
+ * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes.
+ *
+ * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
+ * make this work.
+ *
+ * To construct the bfloat for an arbitrary key we need to know what the key
+ * immediately preceding it is: we have to check if the two keys differ in the
+ * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
+ * of the previous key so we can walk backwards to it from t->tree[j]'s key.
+ */
+
+static struct bkey *cacheline_to_bkey(struct bset_tree *t,
+ unsigned int cacheline,
+ unsigned int offset)
+{
+ return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
+}
+
+static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
+{
+ return ((void *) k - (void *) t->data) / BSET_CACHELINE;
+}
+
+static unsigned int bkey_to_cacheline_offset(struct bset_tree *t,
+ unsigned int cacheline,
+ struct bkey *k)
+{
+ return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
+}
+
+static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j)
+{
+ return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
+}
+
+static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j)
+{
+ return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
+}
+
+/*
+ * For the write set - the one we're currently inserting keys into - we don't
+ * maintain a full search tree, we just keep a simple lookup table in t->prev.
+ */
+static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline)
+{
+ return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
+}
+
+static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
+{
+ low >>= shift;
+ low |= (high << 1) << (63U - shift);
+ return low;
+}
+
+/*
+ * Calculate mantissa value for struct bkey_float.
+ * If most significant bit of f->exponent is not set, then
+ * - f->exponent >> 6 is 0
+ * - p[0] points to bkey->low
+ * - p[-1] borrows bits from KEY_INODE() of bkey->high
+ * if most isgnificant bits of f->exponent is set, then
+ * - f->exponent >> 6 is 1
+ * - p[0] points to bits from KEY_INODE() of bkey->high
+ * - p[-1] points to other bits from KEY_INODE() of
+ * bkey->high too.
+ * See make_bfloat() to check when most significant bit of f->exponent
+ * is set or not.
+ */
+static inline unsigned int bfloat_mantissa(const struct bkey *k,
+ struct bkey_float *f)
+{
+ const uint64_t *p = &k->low - (f->exponent >> 6);
+
+ return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
+}
+
+static void make_bfloat(struct bset_tree *t, unsigned int j)
+{
+ struct bkey_float *f = &t->tree[j];
+ struct bkey *m = tree_to_bkey(t, j);
+ struct bkey *p = tree_to_prev_bkey(t, j);
+
+ struct bkey *l = is_power_of_2(j)
+ ? t->data->start
+ : tree_to_prev_bkey(t, j >> ffs(j));
+
+ struct bkey *r = is_power_of_2(j + 1)
+ ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
+ : tree_to_bkey(t, j >> (ffz(j) + 1));
+
+ BUG_ON(m < l || m > r);
+ BUG_ON(bkey_next(p) != m);
+
+ /*
+ * If l and r have different KEY_INODE values (different backing
+ * device), f->exponent records how many least significant bits
+ * are different in KEY_INODE values and sets most significant
+ * bits to 1 (by +64).
+ * If l and r have same KEY_INODE value, f->exponent records
+ * how many different bits in least significant bits of bkey->low.
+ * See bfloat_mantiss() how the most significant bit of
+ * f->exponent is used to calculate bfloat mantissa value.
+ */
+ if (KEY_INODE(l) != KEY_INODE(r))
+ f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64;
+ else
+ f->exponent = fls64(r->low ^ l->low);
+
+ f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0);
+
+ /*
+ * Setting f->exponent = 127 flags this node as failed, and causes the
+ * lookup code to fall back to comparing against the original key.
+ */
+
+ if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f))
+ f->mantissa = bfloat_mantissa(m, f) - 1;
+ else
+ f->exponent = 127;
+}
+
+static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
+{
+ if (t != b->set) {
+ unsigned int j = roundup(t[-1].size,
+ 64 / sizeof(struct bkey_float));
+
+ t->tree = t[-1].tree + j;
+ t->prev = t[-1].prev + j;
+ }
+
+ while (t < b->set + MAX_BSETS)
+ t++->size = 0;
+}
+
+static void bch_bset_build_unwritten_tree(struct btree_keys *b)
+{
+ struct bset_tree *t = bset_tree_last(b);
+
+ BUG_ON(b->last_set_unwritten);
+ b->last_set_unwritten = 1;
+
+ bset_alloc_tree(b, t);
+
+ if (t->tree != b->set->tree + btree_keys_cachelines(b)) {
+ t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start);
+ t->size = 1;
+ }
+}
+
+void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
+{
+ if (i != b->set->data) {
+ b->set[++b->nsets].data = i;
+ i->seq = b->set->data->seq;
+ } else
+ get_random_bytes(&i->seq, sizeof(uint64_t));
+
+ i->magic = magic;
+ i->version = 0;
+ i->keys = 0;
+
+ bch_bset_build_unwritten_tree(b);
+}
+
+/*
+ * Build auxiliary binary tree 'struct bset_tree *t', this tree is used to
+ * accelerate bkey search in a btree node (pointed by bset_tree->data in
+ * memory). After search in the auxiliar tree by calling bset_search_tree(),
+ * a struct bset_search_iter is returned which indicates range [l, r] from
+ * bset_tree->data where the searching bkey might be inside. Then a followed
+ * linear comparison does the exact search, see __bch_bset_search() for how
+ * the auxiliary tree is used.
+ */
+void bch_bset_build_written_tree(struct btree_keys *b)
+{
+ struct bset_tree *t = bset_tree_last(b);
+ struct bkey *prev = NULL, *k = t->data->start;
+ unsigned int j, cacheline = 1;
+
+ b->last_set_unwritten = 0;
+
+ bset_alloc_tree(b, t);
+
+ t->size = min_t(unsigned int,
+ bkey_to_cacheline(t, bset_bkey_last(t->data)),
+ b->set->tree + btree_keys_cachelines(b) - t->tree);
+
+ if (t->size < 2) {
+ t->size = 0;
+ return;
+ }
+
+ t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
+
+ /* First we figure out where the first key in each cacheline is */
+ for (j = inorder_next(0, t->size);
+ j;
+ j = inorder_next(j, t->size)) {
+ while (bkey_to_cacheline(t, k) < cacheline) {
+ prev = k;
+ k = bkey_next(k);
+ }
+
+ t->prev[j] = bkey_u64s(prev);
+ t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k);
+ }
+
+ while (bkey_next(k) != bset_bkey_last(t->data))
+ k = bkey_next(k);
+
+ t->end = *k;
+
+ /* Then we build the tree */
+ for (j = inorder_next(0, t->size);
+ j;
+ j = inorder_next(j, t->size))
+ make_bfloat(t, j);
+}
+
+/* Insert */
+
+void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
+{
+ struct bset_tree *t;
+ unsigned int inorder, j = 1;
+
+ for (t = b->set; t <= bset_tree_last(b); t++)
+ if (k < bset_bkey_last(t->data))
+ goto found_set;
+
+ BUG();
+found_set:
+ if (!t->size || !bset_written(b, t))
+ return;
+
+ inorder = bkey_to_cacheline(t, k);
+
+ if (k == t->data->start)
+ goto fix_left;
+
+ if (bkey_next(k) == bset_bkey_last(t->data)) {
+ t->end = *k;
+ goto fix_right;
+ }
+
+ j = inorder_to_tree(inorder, t);
+
+ if (j &&
+ j < t->size &&
+ k == tree_to_bkey(t, j))
+fix_left: do {
+ make_bfloat(t, j);
+ j = j * 2;
+ } while (j < t->size);
+
+ j = inorder_to_tree(inorder + 1, t);
+
+ if (j &&
+ j < t->size &&
+ k == tree_to_prev_bkey(t, j))
+fix_right: do {
+ make_bfloat(t, j);
+ j = j * 2 + 1;
+ } while (j < t->size);
+}
+
+static void bch_bset_fix_lookup_table(struct btree_keys *b,
+ struct bset_tree *t,
+ struct bkey *k)
+{
+ unsigned int shift = bkey_u64s(k);
+ unsigned int j = bkey_to_cacheline(t, k);
+
+ /* We're getting called from btree_split() or btree_gc, just bail out */
+ if (!t->size)
+ return;
+
+ /*
+ * k is the key we just inserted; we need to find the entry in the
+ * lookup table for the first key that is strictly greater than k:
+ * it's either k's cacheline or the next one
+ */
+ while (j < t->size &&
+ table_to_bkey(t, j) <= k)
+ j++;
+
+ /*
+ * Adjust all the lookup table entries, and find a new key for any that
+ * have gotten too big
+ */
+ for (; j < t->size; j++) {
+ t->prev[j] += shift;
+
+ if (t->prev[j] > 7) {
+ k = table_to_bkey(t, j - 1);
+
+ while (k < cacheline_to_bkey(t, j, 0))
+ k = bkey_next(k);
+
+ t->prev[j] = bkey_to_cacheline_offset(t, j, k);
+ }
+ }
+
+ if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree)
+ return;
+
+ /* Possibly add a new entry to the end of the lookup table */
+
+ for (k = table_to_bkey(t, t->size - 1);
+ k != bset_bkey_last(t->data);
+ k = bkey_next(k))
+ if (t->size == bkey_to_cacheline(t, k)) {
+ t->prev[t->size] =
+ bkey_to_cacheline_offset(t, t->size, k);
+ t->size++;
+ }
+}
+
+/*
+ * Tries to merge l and r: l should be lower than r
+ * Returns true if we were able to merge. If we did merge, l will be the merged
+ * key, r will be untouched.
+ */
+bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)
+{
+ if (!b->ops->key_merge)
+ return false;
+
+ /*
+ * Generic header checks
+ * Assumes left and right are in order
+ * Left and right must be exactly aligned
+ */
+ if (!bch_bkey_equal_header(l, r) ||
+ bkey_cmp(l, &START_KEY(r)))
+ return false;
+
+ return b->ops->key_merge(b, l, r);
+}
+
+void bch_bset_insert(struct btree_keys *b, struct bkey *where,
+ struct bkey *insert)
+{
+ struct bset_tree *t = bset_tree_last(b);
+
+ BUG_ON(!b->last_set_unwritten);
+ BUG_ON(bset_byte_offset(b, t->data) +
+ __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
+ PAGE_SIZE << b->page_order);
+
+ memmove((uint64_t *) where + bkey_u64s(insert),
+ where,
+ (void *) bset_bkey_last(t->data) - (void *) where);
+
+ t->data->keys += bkey_u64s(insert);
+ bkey_copy(where, insert);
+ bch_bset_fix_lookup_table(b, t, where);
+}
+
+unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+ struct bkey *replace_key)
+{
+ unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
+ struct bset *i = bset_tree_last(b)->data;
+ struct bkey *m, *prev = NULL;
+ struct btree_iter iter;
+ struct bkey preceding_key_on_stack = ZERO_KEY;
+ struct bkey *preceding_key_p = &preceding_key_on_stack;
+
+ BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
+
+ /*
+ * If k has preceding key, preceding_key_p will be set to address
+ * of k's preceding key; otherwise preceding_key_p will be set
+ * to NULL inside preceding_key().
+ */
+ if (b->ops->is_extents)
+ preceding_key(&START_KEY(k), &preceding_key_p);
+ else
+ preceding_key(k, &preceding_key_p);
+
+ m = bch_btree_iter_init(b, &iter, preceding_key_p);
+
+ if (b->ops->insert_fixup(b, k, &iter, replace_key))
+ return status;
+
+ status = BTREE_INSERT_STATUS_INSERT;
+
+ while (m != bset_bkey_last(i) &&
+ bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) {
+ prev = m;
+ m = bkey_next(m);
+ }
+
+ /* prev is in the tree, if we merge we're done */
+ status = BTREE_INSERT_STATUS_BACK_MERGE;
+ if (prev &&
+ bch_bkey_try_merge(b, prev, k))
+ goto merged;
+#if 0
+ status = BTREE_INSERT_STATUS_OVERWROTE;
+ if (m != bset_bkey_last(i) &&
+ KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
+ goto copy;
+#endif
+ status = BTREE_INSERT_STATUS_FRONT_MERGE;
+ if (m != bset_bkey_last(i) &&
+ bch_bkey_try_merge(b, k, m))
+ goto copy;
+
+ bch_bset_insert(b, m, k);
+copy: bkey_copy(m, k);
+merged:
+ return status;
+}
+
+/* Lookup */
+
+struct bset_search_iter {
+ struct bkey *l, *r;
+};
+
+static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
+ const struct bkey *search)
+{
+ unsigned int li = 0, ri = t->size;
+
+ while (li + 1 != ri) {
+ unsigned int m = (li + ri) >> 1;
+
+ if (bkey_cmp(table_to_bkey(t, m), search) > 0)
+ ri = m;
+ else
+ li = m;
+ }
+
+ return (struct bset_search_iter) {
+ table_to_bkey(t, li),
+ ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
+ };
+}
+
+static struct bset_search_iter bset_search_tree(struct bset_tree *t,
+ const struct bkey *search)
+{
+ struct bkey *l, *r;
+ struct bkey_float *f;
+ unsigned int inorder, j, n = 1;
+
+ do {
+ unsigned int p = n << 4;
+
+ if (p < t->size)
+ prefetch(&t->tree[p]);
+
+ j = n;
+ f = &t->tree[j];
+
+ if (likely(f->exponent != 127)) {
+ if (f->mantissa >= bfloat_mantissa(search, f))
+ n = j * 2;
+ else
+ n = j * 2 + 1;
+ } else {
+ if (bkey_cmp(tree_to_bkey(t, j), search) > 0)
+ n = j * 2;
+ else
+ n = j * 2 + 1;
+ }
+ } while (n < t->size);
+
+ inorder = to_inorder(j, t);
+
+ /*
+ * n would have been the node we recursed to - the low bit tells us if
+ * we recursed left or recursed right.
+ */
+ if (n & 1) {
+ l = cacheline_to_bkey(t, inorder, f->m);
+
+ if (++inorder != t->size) {
+ f = &t->tree[inorder_next(j, t->size)];
+ r = cacheline_to_bkey(t, inorder, f->m);
+ } else
+ r = bset_bkey_last(t->data);
+ } else {
+ r = cacheline_to_bkey(t, inorder, f->m);
+
+ if (--inorder) {
+ f = &t->tree[inorder_prev(j, t->size)];
+ l = cacheline_to_bkey(t, inorder, f->m);
+ } else
+ l = t->data->start;
+ }
+
+ return (struct bset_search_iter) {l, r};
+}
+
+struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
+ const struct bkey *search)
+{
+ struct bset_search_iter i;
+
+ /*
+ * First, we search for a cacheline, then lastly we do a linear search
+ * within that cacheline.
+ *
+ * To search for the cacheline, there's three different possibilities:
+ * * The set is too small to have a search tree, so we just do a linear
+ * search over the whole set.
+ * * The set is the one we're currently inserting into; keeping a full
+ * auxiliary search tree up to date would be too expensive, so we
+ * use a much simpler lookup table to do a binary search -
+ * bset_search_write_set().
+ * * Or we use the auxiliary search tree we constructed earlier -
+ * bset_search_tree()
+ */
+
+ if (unlikely(!t->size)) {
+ i.l = t->data->start;
+ i.r = bset_bkey_last(t->data);
+ } else if (bset_written(b, t)) {
+ /*
+ * Each node in the auxiliary search tree covers a certain range
+ * of bits, and keys above and below the set it covers might
+ * differ outside those bits - so we have to special case the
+ * start and end - handle that here:
+ */
+
+ if (unlikely(bkey_cmp(search, &t->end) >= 0))
+ return bset_bkey_last(t->data);
+
+ if (unlikely(bkey_cmp(search, t->data->start) < 0))
+ return t->data->start;
+
+ i = bset_search_tree(t, search);
+ } else {
+ BUG_ON(!b->nsets &&
+ t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
+
+ i = bset_search_write_set(t, search);
+ }
+
+ if (btree_keys_expensive_checks(b)) {
+ BUG_ON(bset_written(b, t) &&
+ i.l != t->data->start &&
+ bkey_cmp(tree_to_prev_bkey(t,
+ inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
+ search) > 0);
+
+ BUG_ON(i.r != bset_bkey_last(t->data) &&
+ bkey_cmp(i.r, search) <= 0);
+ }
+
+ while (likely(i.l != i.r) &&
+ bkey_cmp(i.l, search) <= 0)
+ i.l = bkey_next(i.l);
+
+ return i.l;
+}
+
+/* Btree iterator */
+
+typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
+ struct btree_iter_set);
+
+static inline bool btree_iter_cmp(struct btree_iter_set l,
+ struct btree_iter_set r)
+{
+ return bkey_cmp(l.k, r.k) > 0;
+}
+
+static inline bool btree_iter_end(struct btree_iter *iter)
+{
+ return !iter->used;
+}
+
+void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
+ struct bkey *end)
+{
+ if (k != end)
+ BUG_ON(!heap_add(iter,
+ ((struct btree_iter_set) { k, end }),
+ btree_iter_cmp));
+}
+
+static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
+ struct btree_iter *iter,
+ struct bkey *search,
+ struct bset_tree *start)
+{
+ struct bkey *ret = NULL;
+
+ iter->size = ARRAY_SIZE(iter->data);
+ iter->used = 0;
+
+#ifdef CONFIG_BCACHE_DEBUG
+ iter->b = b;
+#endif
+
+ for (; start <= bset_tree_last(b); start++) {
+ ret = bch_bset_search(b, start, search);
+ bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
+ }
+
+ return ret;
+}
+
+struct bkey *bch_btree_iter_init(struct btree_keys *b,
+ struct btree_iter *iter,
+ struct bkey *search)
+{
+ return __bch_btree_iter_init(b, iter, search, b->set);
+}
+
+static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
+ btree_iter_cmp_fn *cmp)
+{
+ struct btree_iter_set b __maybe_unused;
+ struct bkey *ret = NULL;
+
+ if (!btree_iter_end(iter)) {
+ bch_btree_iter_next_check(iter);
+
+ ret = iter->data->k;
+ iter->data->k = bkey_next(iter->data->k);
+
+ if (iter->data->k > iter->data->end) {
+ WARN_ONCE(1, "bset was corrupt!\n");
+ iter->data->k = iter->data->end;
+ }
+
+ if (iter->data->k == iter->data->end)
+ heap_pop(iter, b, cmp);
+ else
+ heap_sift(iter, 0, cmp);
+ }
+
+ return ret;
+}
+
+struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+{
+ return __bch_btree_iter_next(iter, btree_iter_cmp);
+
+}
+
+struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
+ struct btree_keys *b, ptr_filter_fn fn)
+{
+ struct bkey *ret;
+
+ do {
+ ret = bch_btree_iter_next(iter);
+ } while (ret && fn(b, ret));
+
+ return ret;
+}
+
+/* Mergesort */
+
+void bch_bset_sort_state_free(struct bset_sort_state *state)
+{
+ mempool_exit(&state->pool);
+}
+
+int bch_bset_sort_state_init(struct bset_sort_state *state,
+ unsigned int page_order)
+{
+ spin_lock_init(&state->time.lock);
+
+ state->page_order = page_order;
+ state->crit_factor = int_sqrt(1 << page_order);
+
+ return mempool_init_page_pool(&state->pool, 1, page_order);
+}
+
+static void btree_mergesort(struct btree_keys *b, struct bset *out,
+ struct btree_iter *iter,
+ bool fixup, bool remove_stale)
+{
+ int i;
+ struct bkey *k, *last = NULL;
+ BKEY_PADDED(k) tmp;
+ bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
+ ? bch_ptr_bad
+ : bch_ptr_invalid;
+
+ /* Heapify the iterator, using our comparison function */
+ for (i = iter->used / 2 - 1; i >= 0; --i)
+ heap_sift(iter, i, b->ops->sort_cmp);
+
+ while (!btree_iter_end(iter)) {
+ if (b->ops->sort_fixup && fixup)
+ k = b->ops->sort_fixup(iter, &tmp.k);
+ else
+ k = NULL;
+
+ if (!k)
+ k = __bch_btree_iter_next(iter, b->ops->sort_cmp);
+
+ if (bad(b, k))
+ continue;
+
+ if (!last) {
+ last = out->start;
+ bkey_copy(last, k);
+ } else if (!bch_bkey_try_merge(b, last, k)) {
+ last = bkey_next(last);
+ bkey_copy(last, k);
+ }
+ }
+
+ out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
+
+ pr_debug("sorted %i keys\n", out->keys);
+}
+
+static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
+ unsigned int start, unsigned int order, bool fixup,
+ struct bset_sort_state *state)
+{
+ uint64_t start_time;
+ bool used_mempool = false;
+ struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT,
+ order);
+ if (!out) {
+ struct page *outp;
+
+ BUG_ON(order > state->page_order);
+
+ outp = mempool_alloc(&state->pool, GFP_NOIO);
+ out = page_address(outp);
+ used_mempool = true;
+ order = state->page_order;
+ }
+
+ start_time = local_clock();
+
+ btree_mergesort(b, out, iter, fixup, false);
+ b->nsets = start;
+
+ if (!start && order == b->page_order) {
+ /*
+ * Our temporary buffer is the same size as the btree node's
+ * buffer, we can just swap buffers instead of doing a big
+ * memcpy()
+ *
+ * Don't worry event 'out' is allocated from mempool, it can
+ * still be swapped here. Because state->pool is a page mempool
+ * created by mempool_init_page_pool(), which allocates
+ * pages by alloc_pages() indeed.
+ */
+
+ out->magic = b->set->data->magic;
+ out->seq = b->set->data->seq;
+ out->version = b->set->data->version;
+ swap(out, b->set->data);
+ } else {
+ b->set[start].data->keys = out->keys;
+ memcpy(b->set[start].data->start, out->start,
+ (void *) bset_bkey_last(out) - (void *) out->start);
+ }
+
+ if (used_mempool)
+ mempool_free(virt_to_page(out), &state->pool);
+ else
+ free_pages((unsigned long) out, order);
+
+ bch_bset_build_written_tree(b);
+
+ if (!start)
+ bch_time_stats_update(&state->time, start_time);
+}
+
+void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
+ struct bset_sort_state *state)
+{
+ size_t order = b->page_order, keys = 0;
+ struct btree_iter iter;
+ int oldsize = bch_count_data(b);
+
+ __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
+
+ if (start) {
+ unsigned int i;
+
+ for (i = start; i <= b->nsets; i++)
+ keys += b->set[i].data->keys;
+
+ order = get_order(__set_bytes(b->set->data, keys));
+ }
+
+ __btree_sort(b, &iter, start, order, false, state);
+
+ EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
+}
+
+void bch_btree_sort_and_fix_extents(struct btree_keys *b,
+ struct btree_iter *iter,
+ struct bset_sort_state *state)
+{
+ __btree_sort(b, iter, 0, b->page_order, true, state);
+}
+
+void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
+ struct bset_sort_state *state)
+{
+ uint64_t start_time = local_clock();
+ struct btree_iter iter;
+
+ bch_btree_iter_init(b, &iter, NULL);
+
+ btree_mergesort(b, new->set->data, &iter, false, true);
+
+ bch_time_stats_update(&state->time, start_time);
+
+ new->set->size = 0; // XXX: why?
+}
+
+#define SORT_CRIT (4096 / sizeof(uint64_t))
+
+void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
+{
+ unsigned int crit = SORT_CRIT;
+ int i;
+
+ /* Don't sort if nothing to do */
+ if (!b->nsets)
+ goto out;
+
+ for (i = b->nsets - 1; i >= 0; --i) {
+ crit *= state->crit_factor;
+
+ if (b->set[i].data->keys < crit) {
+ bch_btree_sort_partial(b, i, state);
+ return;
+ }
+ }
+
+ /* Sort if we'd overflow */
+ if (b->nsets + 1 == MAX_BSETS) {
+ bch_btree_sort(b, state);
+ return;
+ }
+
+out:
+ bch_bset_build_written_tree(b);
+}
+
+void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
+{
+ unsigned int i;
+
+ for (i = 0; i <= b->nsets; i++) {
+ struct bset_tree *t = &b->set[i];
+ size_t bytes = t->data->keys * sizeof(uint64_t);
+ size_t j;
+
+ if (bset_written(b, t)) {
+ stats->sets_written++;
+ stats->bytes_written += bytes;
+
+ stats->floats += t->size - 1;
+
+ for (j = 1; j < t->size; j++)
+ if (t->tree[j].exponent == 127)
+ stats->failed++;
+ } else {
+ stats->sets_unwritten++;
+ stats->bytes_unwritten += bytes;
+ }
+ }
+}
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
new file mode 100644
index 0000000000..d795c84246
--- /dev/null
+++ b/drivers/md/bcache/bset.h
@@ -0,0 +1,593 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHE_BSET_H
+#define _BCACHE_BSET_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "bcache_ondisk.h"
+#include "util.h" /* for time_stats */
+
+/*
+ * BKEYS:
+ *
+ * A bkey contains a key, a size field, a variable number of pointers, and some
+ * ancillary flag bits.
+ *
+ * We use two different functions for validating bkeys, bch_ptr_invalid and
+ * bch_ptr_bad().
+ *
+ * bch_ptr_invalid() primarily filters out keys and pointers that would be
+ * invalid due to some sort of bug, whereas bch_ptr_bad() filters out keys and
+ * pointer that occur in normal practice but don't point to real data.
+ *
+ * The one exception to the rule that ptr_invalid() filters out invalid keys is
+ * that it also filters out keys of size 0 - these are keys that have been
+ * completely overwritten. It'd be safe to delete these in memory while leaving
+ * them on disk, just unnecessary work - so we filter them out when resorting
+ * instead.
+ *
+ * We can't filter out stale keys when we're resorting, because garbage
+ * collection needs to find them to ensure bucket gens don't wrap around -
+ * unless we're rewriting the btree node those stale keys still exist on disk.
+ *
+ * We also implement functions here for removing some number of sectors from the
+ * front or the back of a bkey - this is mainly used for fixing overlapping
+ * extents, by removing the overlapping sectors from the older key.
+ *
+ * BSETS:
+ *
+ * A bset is an array of bkeys laid out contiguously in memory in sorted order,
+ * along with a header. A btree node is made up of a number of these, written at
+ * different times.
+ *
+ * There could be many of them on disk, but we never allow there to be more than
+ * 4 in memory - we lazily resort as needed.
+ *
+ * We implement code here for creating and maintaining auxiliary search trees
+ * (described below) for searching an individial bset, and on top of that we
+ * implement a btree iterator.
+ *
+ * BTREE ITERATOR:
+ *
+ * Most of the code in bcache doesn't care about an individual bset - it needs
+ * to search entire btree nodes and iterate over them in sorted order.
+ *
+ * The btree iterator code serves both functions; it iterates through the keys
+ * in a btree node in sorted order, starting from either keys after a specific
+ * point (if you pass it a search key) or the start of the btree node.
+ *
+ * AUXILIARY SEARCH TREES:
+ *
+ * Since keys are variable length, we can't use a binary search on a bset - we
+ * wouldn't be able to find the start of the next key. But binary searches are
+ * slow anyways, due to terrible cache behaviour; bcache originally used binary
+ * searches and that code topped out at under 50k lookups/second.
+ *
+ * So we need to construct some sort of lookup table. Since we only insert keys
+ * into the last (unwritten) set, most of the keys within a given btree node are
+ * usually in sets that are mostly constant. We use two different types of
+ * lookup tables to take advantage of this.
+ *
+ * Both lookup tables share in common that they don't index every key in the
+ * set; they index one key every BSET_CACHELINE bytes, and then a linear search
+ * is used for the rest.
+ *
+ * For sets that have been written to disk and are no longer being inserted
+ * into, we construct a binary search tree in an array - traversing a binary
+ * search tree in an array gives excellent locality of reference and is very
+ * fast, since both children of any node are adjacent to each other in memory
+ * (and their grandchildren, and great grandchildren...) - this means
+ * prefetching can be used to great effect.
+ *
+ * It's quite useful performance wise to keep these nodes small - not just
+ * because they're more likely to be in L2, but also because we can prefetch
+ * more nodes on a single cacheline and thus prefetch more iterations in advance
+ * when traversing this tree.
+ *
+ * Nodes in the auxiliary search tree must contain both a key to compare against
+ * (we don't want to fetch the key from the set, that would defeat the purpose),
+ * and a pointer to the key. We use a few tricks to compress both of these.
+ *
+ * To compress the pointer, we take advantage of the fact that one node in the
+ * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have
+ * a function (to_inorder()) that takes the index of a node in a binary tree and
+ * returns what its index would be in an inorder traversal, so we only have to
+ * store the low bits of the offset.
+ *
+ * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To
+ * compress that, we take advantage of the fact that when we're traversing the
+ * search tree at every iteration we know that both our search key and the key
+ * we're looking for lie within some range - bounded by our previous
+ * comparisons. (We special case the start of a search so that this is true even
+ * at the root of the tree).
+ *
+ * So we know the key we're looking for is between a and b, and a and b don't
+ * differ higher than bit 50, we don't need to check anything higher than bit
+ * 50.
+ *
+ * We don't usually need the rest of the bits, either; we only need enough bits
+ * to partition the key range we're currently checking. Consider key n - the
+ * key our auxiliary search tree node corresponds to, and key p, the key
+ * immediately preceding n. The lowest bit we need to store in the auxiliary
+ * search tree is the highest bit that differs between n and p.
+ *
+ * Note that this could be bit 0 - we might sometimes need all 80 bits to do the
+ * comparison. But we'd really like our nodes in the auxiliary search tree to be
+ * of fixed size.
+ *
+ * The solution is to make them fixed size, and when we're constructing a node
+ * check if p and n differed in the bits we needed them to. If they don't we
+ * flag that node, and when doing lookups we fallback to comparing against the
+ * real key. As long as this doesn't happen to often (and it seems to reliably
+ * happen a bit less than 1% of the time), we win - even on failures, that key
+ * is then more likely to be in cache than if we were doing binary searches all
+ * the way, since we're touching so much less memory.
+ *
+ * The keys in the auxiliary search tree are stored in (software) floating
+ * point, with an exponent and a mantissa. The exponent needs to be big enough
+ * to address all the bits in the original key, but the number of bits in the
+ * mantissa is somewhat arbitrary; more bits just gets us fewer failures.
+ *
+ * We need 7 bits for the exponent and 3 bits for the key's offset (since keys
+ * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes.
+ * We need one node per 128 bytes in the btree node, which means the auxiliary
+ * search trees take up 3% as much memory as the btree itself.
+ *
+ * Constructing these auxiliary search trees is moderately expensive, and we
+ * don't want to be constantly rebuilding the search tree for the last set
+ * whenever we insert another key into it. For the unwritten set, we use a much
+ * simpler lookup table - it's just a flat array, so index i in the lookup table
+ * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing
+ * within each byte range works the same as with the auxiliary search trees.
+ *
+ * These are much easier to keep up to date when we insert a key - we do it
+ * somewhat lazily; when we shift a key up we usually just increment the pointer
+ * to it, only when it would overflow do we go to the trouble of finding the
+ * first key in that range of bytes again.
+ */
+
+struct btree_keys;
+struct btree_iter;
+struct btree_iter_set;
+struct bkey_float;
+
+#define MAX_BSETS 4U
+
+struct bset_tree {
+ /*
+ * We construct a binary tree in an array as if the array
+ * started at 1, so that things line up on the same cachelines
+ * better: see comments in bset.c at cacheline_to_bkey() for
+ * details
+ */
+
+ /* size of the binary tree and prev array */
+ unsigned int size;
+
+ /* function of size - precalculated for to_inorder() */
+ unsigned int extra;
+
+ /* copy of the last key in the set */
+ struct bkey end;
+ struct bkey_float *tree;
+
+ /*
+ * The nodes in the bset tree point to specific keys - this
+ * array holds the sizes of the previous key.
+ *
+ * Conceptually it's a member of struct bkey_float, but we want
+ * to keep bkey_float to 4 bytes and prev isn't used in the fast
+ * path.
+ */
+ uint8_t *prev;
+
+ /* The actual btree node, with pointers to each sorted set */
+ struct bset *data;
+};
+
+struct btree_keys_ops {
+ bool (*sort_cmp)(struct btree_iter_set l,
+ struct btree_iter_set r);
+ struct bkey *(*sort_fixup)(struct btree_iter *iter,
+ struct bkey *tmp);
+ bool (*insert_fixup)(struct btree_keys *b,
+ struct bkey *insert,
+ struct btree_iter *iter,
+ struct bkey *replace_key);
+ bool (*key_invalid)(struct btree_keys *bk,
+ const struct bkey *k);
+ bool (*key_bad)(struct btree_keys *bk,
+ const struct bkey *k);
+ bool (*key_merge)(struct btree_keys *bk,
+ struct bkey *l, struct bkey *r);
+ void (*key_to_text)(char *buf,
+ size_t size,
+ const struct bkey *k);
+ void (*key_dump)(struct btree_keys *keys,
+ const struct bkey *k);
+
+ /*
+ * Only used for deciding whether to use START_KEY(k) or just the key
+ * itself in a couple places
+ */
+ bool is_extents;
+};
+
+struct btree_keys {
+ const struct btree_keys_ops *ops;
+ uint8_t page_order;
+ uint8_t nsets;
+ unsigned int last_set_unwritten:1;
+ bool *expensive_debug_checks;
+
+ /*
+ * Sets of sorted keys - the real btree node - plus a binary search tree
+ *
+ * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
+ * to the memory we have allocated for this btree node. Additionally,
+ * set[0]->data points to the entire btree node as it exists on disk.
+ */
+ struct bset_tree set[MAX_BSETS];
+};
+
+static inline struct bset_tree *bset_tree_last(struct btree_keys *b)
+{
+ return b->set + b->nsets;
+}
+
+static inline bool bset_written(struct btree_keys *b, struct bset_tree *t)
+{
+ return t <= b->set + b->nsets - b->last_set_unwritten;
+}
+
+static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
+{
+ return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
+}
+
+static inline unsigned int bset_byte_offset(struct btree_keys *b,
+ struct bset *i)
+{
+ return ((size_t) i) - ((size_t) b->set->data);
+}
+
+static inline unsigned int bset_sector_offset(struct btree_keys *b,
+ struct bset *i)
+{
+ return bset_byte_offset(b, i) >> 9;
+}
+
+#define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t))
+#define set_bytes(i) __set_bytes(i, i->keys)
+
+#define __set_blocks(i, k, block_bytes) \
+ DIV_ROUND_UP(__set_bytes(i, k), block_bytes)
+#define set_blocks(i, block_bytes) \
+ __set_blocks(i, (i)->keys, block_bytes)
+
+static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b)
+{
+ struct bset_tree *t = bset_tree_last(b);
+
+ BUG_ON((PAGE_SIZE << b->page_order) <
+ (bset_byte_offset(b, t->data) + set_bytes(t->data)));
+
+ if (!b->last_set_unwritten)
+ return 0;
+
+ return ((PAGE_SIZE << b->page_order) -
+ (bset_byte_offset(b, t->data) + set_bytes(t->data))) /
+ sizeof(u64);
+}
+
+static inline struct bset *bset_next_set(struct btree_keys *b,
+ unsigned int block_bytes)
+{
+ struct bset *i = bset_tree_last(b)->data;
+
+ return ((void *) i) + roundup(set_bytes(i), block_bytes);
+}
+
+void bch_btree_keys_free(struct btree_keys *b);
+int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order,
+ gfp_t gfp);
+void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
+ bool *expensive_debug_checks);
+
+void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic);
+void bch_bset_build_written_tree(struct btree_keys *b);
+void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k);
+bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r);
+void bch_bset_insert(struct btree_keys *b, struct bkey *where,
+ struct bkey *insert);
+unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+ struct bkey *replace_key);
+
+enum {
+ BTREE_INSERT_STATUS_NO_INSERT = 0,
+ BTREE_INSERT_STATUS_INSERT,
+ BTREE_INSERT_STATUS_BACK_MERGE,
+ BTREE_INSERT_STATUS_OVERWROTE,
+ BTREE_INSERT_STATUS_FRONT_MERGE,
+};
+
+/* Btree key iteration */
+
+struct btree_iter {
+ size_t size, used;
+#ifdef CONFIG_BCACHE_DEBUG
+ struct btree_keys *b;
+#endif
+ struct btree_iter_set {
+ struct bkey *k, *end;
+ } data[MAX_BSETS];
+};
+
+typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
+
+struct bkey *bch_btree_iter_next(struct btree_iter *iter);
+struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
+ struct btree_keys *b,
+ ptr_filter_fn fn);
+
+void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
+ struct bkey *end);
+struct bkey *bch_btree_iter_init(struct btree_keys *b,
+ struct btree_iter *iter,
+ struct bkey *search);
+
+struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
+ const struct bkey *search);
+
+/*
+ * Returns the first key that is strictly greater than search
+ */
+static inline struct bkey *bch_bset_search(struct btree_keys *b,
+ struct bset_tree *t,
+ const struct bkey *search)
+{
+ return search ? __bch_bset_search(b, t, search) : t->data->start;
+}
+
+#define for_each_key_filter(b, k, iter, filter) \
+ for (bch_btree_iter_init((b), (iter), NULL); \
+ ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
+
+#define for_each_key(b, k, iter) \
+ for (bch_btree_iter_init((b), (iter), NULL); \
+ ((k) = bch_btree_iter_next(iter));)
+
+/* Sorting */
+
+struct bset_sort_state {
+ mempool_t pool;
+
+ unsigned int page_order;
+ unsigned int crit_factor;
+
+ struct time_stats time;
+};
+
+void bch_bset_sort_state_free(struct bset_sort_state *state);
+int bch_bset_sort_state_init(struct bset_sort_state *state,
+ unsigned int page_order);
+void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state);
+void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
+ struct bset_sort_state *state);
+void bch_btree_sort_and_fix_extents(struct btree_keys *b,
+ struct btree_iter *iter,
+ struct bset_sort_state *state);
+void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
+ struct bset_sort_state *state);
+
+static inline void bch_btree_sort(struct btree_keys *b,
+ struct bset_sort_state *state)
+{
+ bch_btree_sort_partial(b, 0, state);
+}
+
+struct bset_stats {
+ size_t sets_written, sets_unwritten;
+ size_t bytes_written, bytes_unwritten;
+ size_t floats, failed;
+};
+
+void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state);
+
+/* Bkey utility code */
+
+#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, \
+ (unsigned int)(i)->keys)
+
+static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx)
+{
+ return bkey_idx(i->start, idx);
+}
+
+static inline void bkey_init(struct bkey *k)
+{
+ *k = ZERO_KEY;
+}
+
+static __always_inline int64_t bkey_cmp(const struct bkey *l,
+ const struct bkey *r)
+{
+ return unlikely(KEY_INODE(l) != KEY_INODE(r))
+ ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r)
+ : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
+}
+
+void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
+ unsigned int i);
+bool __bch_cut_front(const struct bkey *where, struct bkey *k);
+bool __bch_cut_back(const struct bkey *where, struct bkey *k);
+
+static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
+{
+ BUG_ON(bkey_cmp(where, k) > 0);
+ return __bch_cut_front(where, k);
+}
+
+static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
+{
+ BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
+ return __bch_cut_back(where, k);
+}
+
+/*
+ * Pointer '*preceding_key_p' points to a memory object to store preceding
+ * key of k. If the preceding key does not exist, set '*preceding_key_p' to
+ * NULL. So the caller of preceding_key() needs to take care of memory
+ * which '*preceding_key_p' pointed to before calling preceding_key().
+ * Currently the only caller of preceding_key() is bch_btree_insert_key(),
+ * and it points to an on-stack variable, so the memory release is handled
+ * by stackframe itself.
+ */
+static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p)
+{
+ if (KEY_INODE(k) || KEY_OFFSET(k)) {
+ (**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0);
+ if (!(*preceding_key_p)->low)
+ (*preceding_key_p)->high--;
+ (*preceding_key_p)->low--;
+ } else {
+ (*preceding_key_p) = NULL;
+ }
+}
+
+static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
+{
+ return b->ops->key_invalid(b, k);
+}
+
+static inline bool bch_ptr_bad(struct btree_keys *b, const struct bkey *k)
+{
+ return b->ops->key_bad(b, k);
+}
+
+static inline void bch_bkey_to_text(struct btree_keys *b, char *buf,
+ size_t size, const struct bkey *k)
+{
+ return b->ops->key_to_text(buf, size, k);
+}
+
+static inline bool bch_bkey_equal_header(const struct bkey *l,
+ const struct bkey *r)
+{
+ return (KEY_DIRTY(l) == KEY_DIRTY(r) &&
+ KEY_PTRS(l) == KEY_PTRS(r) &&
+ KEY_CSUM(l) == KEY_CSUM(r));
+}
+
+/* Keylists */
+
+struct keylist {
+ union {
+ struct bkey *keys;
+ uint64_t *keys_p;
+ };
+ union {
+ struct bkey *top;
+ uint64_t *top_p;
+ };
+
+ /* Enough room for btree_split's keys without realloc */
+#define KEYLIST_INLINE 16
+ uint64_t inline_keys[KEYLIST_INLINE];
+};
+
+static inline void bch_keylist_init(struct keylist *l)
+{
+ l->top_p = l->keys_p = l->inline_keys;
+}
+
+static inline void bch_keylist_init_single(struct keylist *l, struct bkey *k)
+{
+ l->keys = k;
+ l->top = bkey_next(k);
+}
+
+static inline void bch_keylist_push(struct keylist *l)
+{
+ l->top = bkey_next(l->top);
+}
+
+static inline void bch_keylist_add(struct keylist *l, struct bkey *k)
+{
+ bkey_copy(l->top, k);
+ bch_keylist_push(l);
+}
+
+static inline bool bch_keylist_empty(struct keylist *l)
+{
+ return l->top == l->keys;
+}
+
+static inline void bch_keylist_reset(struct keylist *l)
+{
+ l->top = l->keys;
+}
+
+static inline void bch_keylist_free(struct keylist *l)
+{
+ if (l->keys_p != l->inline_keys)
+ kfree(l->keys_p);
+}
+
+static inline size_t bch_keylist_nkeys(struct keylist *l)
+{
+ return l->top_p - l->keys_p;
+}
+
+static inline size_t bch_keylist_bytes(struct keylist *l)
+{
+ return bch_keylist_nkeys(l) * sizeof(uint64_t);
+}
+
+struct bkey *bch_keylist_pop(struct keylist *l);
+void bch_keylist_pop_front(struct keylist *l);
+int __bch_keylist_realloc(struct keylist *l, unsigned int u64s);
+
+/* Debug stuff */
+
+#ifdef CONFIG_BCACHE_DEBUG
+
+int __bch_count_data(struct btree_keys *b);
+void __printf(2, 3) __bch_check_keys(struct btree_keys *b,
+ const char *fmt,
+ ...);
+void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
+void bch_dump_bucket(struct btree_keys *b);
+
+#else
+
+static inline int __bch_count_data(struct btree_keys *b) { return -1; }
+static inline void __printf(2, 3)
+ __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
+static inline void bch_dump_bucket(struct btree_keys *b) {}
+void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
+
+#endif
+
+static inline bool btree_keys_expensive_checks(struct btree_keys *b)
+{
+#ifdef CONFIG_BCACHE_DEBUG
+ return *b->expensive_debug_checks;
+#else
+ return false;
+#endif
+}
+
+static inline int bch_count_data(struct btree_keys *b)
+{
+ return btree_keys_expensive_checks(b) ? __bch_count_data(b) : -1;
+}
+
+#define bch_check_keys(b, ...) \
+do { \
+ if (btree_keys_expensive_checks(b)) \
+ __bch_check_keys(b, __VA_ARGS__); \
+} while (0)
+
+#endif
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
new file mode 100644
index 0000000000..b709c2fde7
--- /dev/null
+++ b/drivers/md/bcache/btree.c
@@ -0,0 +1,2821 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
+ *
+ * Uses a block device as cache for other block devices; optimized for SSDs.
+ * All allocation is done in buckets, which should match the erase block size
+ * of the device.
+ *
+ * Buckets containing cached data are kept on a heap sorted by priority;
+ * bucket priority is increased on cache hit, and periodically all the buckets
+ * on the heap have their priority scaled down. This currently is just used as
+ * an LRU but in the future should allow for more intelligent heuristics.
+ *
+ * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
+ * counter. Garbage collection is used to remove stale pointers.
+ *
+ * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
+ * as keys are inserted we only sort the pages that have not yet been written.
+ * When garbage collection is run, we resort the entire node.
+ *
+ * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "extents.h"
+
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/hash.h>
+#include <linux/kthread.h>
+#include <linux/prefetch.h>
+#include <linux/random.h>
+#include <linux/rcupdate.h>
+#include <linux/sched/clock.h>
+#include <linux/rculist.h>
+#include <linux/delay.h>
+#include <trace/events/bcache.h>
+
+/*
+ * Todo:
+ * register_bcache: Return errors out to userspace correctly
+ *
+ * Writeback: don't undirty key until after a cache flush
+ *
+ * Create an iterator for key pointers
+ *
+ * On btree write error, mark bucket such that it won't be freed from the cache
+ *
+ * Journalling:
+ * Check for bad keys in replay
+ * Propagate barriers
+ * Refcount journal entries in journal_replay
+ *
+ * Garbage collection:
+ * Finish incremental gc
+ * Gc should free old UUIDs, data for invalid UUIDs
+ *
+ * Provide a way to list backing device UUIDs we have data cached for, and
+ * probably how long it's been since we've seen them, and a way to invalidate
+ * dirty data for devices that will never be attached again
+ *
+ * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
+ * that based on that and how much dirty data we have we can keep writeback
+ * from being starved
+ *
+ * Add a tracepoint or somesuch to watch for writeback starvation
+ *
+ * When btree depth > 1 and splitting an interior node, we have to make sure
+ * alloc_bucket() cannot fail. This should be true but is not completely
+ * obvious.
+ *
+ * Plugging?
+ *
+ * If data write is less than hard sector size of ssd, round up offset in open
+ * bucket to the next whole sector
+ *
+ * Superblock needs to be fleshed out for multiple cache devices
+ *
+ * Add a sysfs tunable for the number of writeback IOs in flight
+ *
+ * Add a sysfs tunable for the number of open data buckets
+ *
+ * IO tracking: Can we track when one process is doing io on behalf of another?
+ * IO tracking: Don't use just an average, weigh more recent stuff higher
+ *
+ * Test module load/unload
+ */
+
+#define MAX_NEED_GC 64
+#define MAX_SAVE_PRIO 72
+#define MAX_GC_TIMES 100
+#define MIN_GC_NODES 100
+#define GC_SLEEP_MS 100
+
+#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
+
+#define PTR_HASH(c, k) \
+ (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
+
+static struct workqueue_struct *btree_io_wq;
+
+#define insert_lock(s, b) ((b)->level <= (s)->lock)
+
+
+static inline struct bset *write_block(struct btree *b)
+{
+ return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
+}
+
+static void bch_btree_init_next(struct btree *b)
+{
+ /* If not a leaf node, always sort */
+ if (b->level && b->keys.nsets)
+ bch_btree_sort(&b->keys, &b->c->sort);
+ else
+ bch_btree_sort_lazy(&b->keys, &b->c->sort);
+
+ if (b->written < btree_blocks(b))
+ bch_bset_init_next(&b->keys, write_block(b),
+ bset_magic(&b->c->cache->sb));
+
+}
+
+/* Btree key manipulation */
+
+void bkey_put(struct cache_set *c, struct bkey *k)
+{
+ unsigned int i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i))
+ atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
+}
+
+/* Btree IO */
+
+static uint64_t btree_csum_set(struct btree *b, struct bset *i)
+{
+ uint64_t crc = b->key.ptr[0];
+ void *data = (void *) i + 8, *end = bset_bkey_last(i);
+
+ crc = crc64_be(crc, data, end - data);
+ return crc ^ 0xffffffffffffffffULL;
+}
+
+void bch_btree_node_read_done(struct btree *b)
+{
+ const char *err = "bad btree header";
+ struct bset *i = btree_bset_first(b);
+ struct btree_iter *iter;
+
+ /*
+ * c->fill_iter can allocate an iterator with more memory space
+ * than static MAX_BSETS.
+ * See the comment arount cache_set->fill_iter.
+ */
+ iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
+ iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
+ iter->used = 0;
+
+#ifdef CONFIG_BCACHE_DEBUG
+ iter->b = &b->keys;
+#endif
+
+ if (!i->seq)
+ goto err;
+
+ for (;
+ b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
+ i = write_block(b)) {
+ err = "unsupported bset version";
+ if (i->version > BCACHE_BSET_VERSION)
+ goto err;
+
+ err = "bad btree header";
+ if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
+ btree_blocks(b))
+ goto err;
+
+ err = "bad magic";
+ if (i->magic != bset_magic(&b->c->cache->sb))
+ goto err;
+
+ err = "bad checksum";
+ switch (i->version) {
+ case 0:
+ if (i->csum != csum_set(i))
+ goto err;
+ break;
+ case BCACHE_BSET_VERSION:
+ if (i->csum != btree_csum_set(b, i))
+ goto err;
+ break;
+ }
+
+ err = "empty set";
+ if (i != b->keys.set[0].data && !i->keys)
+ goto err;
+
+ bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
+
+ b->written += set_blocks(i, block_bytes(b->c->cache));
+ }
+
+ err = "corrupted btree";
+ for (i = write_block(b);
+ bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
+ i = ((void *) i) + block_bytes(b->c->cache))
+ if (i->seq == b->keys.set[0].data->seq)
+ goto err;
+
+ bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
+
+ i = b->keys.set[0].data;
+ err = "short btree key";
+ if (b->keys.set[0].size &&
+ bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
+ goto err;
+
+ if (b->written < btree_blocks(b))
+ bch_bset_init_next(&b->keys, write_block(b),
+ bset_magic(&b->c->cache->sb));
+out:
+ mempool_free(iter, &b->c->fill_iter);
+ return;
+err:
+ set_btree_node_io_error(b);
+ bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
+ err, PTR_BUCKET_NR(b->c, &b->key, 0),
+ bset_block_offset(b, i), i->keys);
+ goto out;
+}
+
+static void btree_node_read_endio(struct bio *bio)
+{
+ struct closure *cl = bio->bi_private;
+
+ closure_put(cl);
+}
+
+static void bch_btree_node_read(struct btree *b)
+{
+ uint64_t start_time = local_clock();
+ struct closure cl;
+ struct bio *bio;
+
+ trace_bcache_btree_read(b);
+
+ closure_init_stack(&cl);
+
+ bio = bch_bbio_alloc(b->c);
+ bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
+ bio->bi_end_io = btree_node_read_endio;
+ bio->bi_private = &cl;
+ bio->bi_opf = REQ_OP_READ | REQ_META;
+
+ bch_bio_map(bio, b->keys.set[0].data);
+
+ bch_submit_bbio(bio, b->c, &b->key, 0);
+ closure_sync(&cl);
+
+ if (bio->bi_status)
+ set_btree_node_io_error(b);
+
+ bch_bbio_free(bio, b->c);
+
+ if (btree_node_io_error(b))
+ goto err;
+
+ bch_btree_node_read_done(b);
+ bch_time_stats_update(&b->c->btree_read_time, start_time);
+
+ return;
+err:
+ bch_cache_set_error(b->c, "io error reading bucket %zu",
+ PTR_BUCKET_NR(b->c, &b->key, 0));
+}
+
+static void btree_complete_write(struct btree *b, struct btree_write *w)
+{
+ if (w->prio_blocked &&
+ !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
+ wake_up_allocators(b->c);
+
+ if (w->journal) {
+ atomic_dec_bug(w->journal);
+ __closure_wake_up(&b->c->journal.wait);
+ }
+
+ w->prio_blocked = 0;
+ w->journal = NULL;
+}
+
+static void btree_node_write_unlock(struct closure *cl)
+{
+ struct btree *b = container_of(cl, struct btree, io);
+
+ up(&b->io_mutex);
+}
+
+static void __btree_node_write_done(struct closure *cl)
+{
+ struct btree *b = container_of(cl, struct btree, io);
+ struct btree_write *w = btree_prev_write(b);
+
+ bch_bbio_free(b->bio, b->c);
+ b->bio = NULL;
+ btree_complete_write(b, w);
+
+ if (btree_node_dirty(b))
+ queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
+
+ closure_return_with_destructor(cl, btree_node_write_unlock);
+}
+
+static void btree_node_write_done(struct closure *cl)
+{
+ struct btree *b = container_of(cl, struct btree, io);
+
+ bio_free_pages(b->bio);
+ __btree_node_write_done(cl);
+}
+
+static void btree_node_write_endio(struct bio *bio)
+{
+ struct closure *cl = bio->bi_private;
+ struct btree *b = container_of(cl, struct btree, io);
+
+ if (bio->bi_status)
+ set_btree_node_io_error(b);
+
+ bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
+ closure_put(cl);
+}
+
+static void do_btree_node_write(struct btree *b)
+{
+ struct closure *cl = &b->io;
+ struct bset *i = btree_bset_last(b);
+ BKEY_PADDED(key) k;
+
+ i->version = BCACHE_BSET_VERSION;
+ i->csum = btree_csum_set(b, i);
+
+ BUG_ON(b->bio);
+ b->bio = bch_bbio_alloc(b->c);
+
+ b->bio->bi_end_io = btree_node_write_endio;
+ b->bio->bi_private = cl;
+ b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache));
+ b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
+ bch_bio_map(b->bio, i);
+
+ /*
+ * If we're appending to a leaf node, we don't technically need FUA -
+ * this write just needs to be persisted before the next journal write,
+ * which will be marked FLUSH|FUA.
+ *
+ * Similarly if we're writing a new btree root - the pointer is going to
+ * be in the next journal entry.
+ *
+ * But if we're writing a new btree node (that isn't a root) or
+ * appending to a non leaf btree node, we need either FUA or a flush
+ * when we write the parent with the new pointer. FUA is cheaper than a
+ * flush, and writes appending to leaf nodes aren't blocking anything so
+ * just make all btree node writes FUA to keep things sane.
+ */
+
+ bkey_copy(&k.key, &b->key);
+ SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
+ bset_sector_offset(&b->keys, i));
+
+ if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
+ struct bio_vec *bv;
+ void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
+ struct bvec_iter_all iter_all;
+
+ bio_for_each_segment_all(bv, b->bio, iter_all) {
+ memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
+ addr += PAGE_SIZE;
+ }
+
+ bch_submit_bbio(b->bio, b->c, &k.key, 0);
+
+ continue_at(cl, btree_node_write_done, NULL);
+ } else {
+ /*
+ * No problem for multipage bvec since the bio is
+ * just allocated
+ */
+ b->bio->bi_vcnt = 0;
+ bch_bio_map(b->bio, i);
+
+ bch_submit_bbio(b->bio, b->c, &k.key, 0);
+
+ closure_sync(cl);
+ continue_at_nobarrier(cl, __btree_node_write_done, NULL);
+ }
+}
+
+void __bch_btree_node_write(struct btree *b, struct closure *parent)
+{
+ struct bset *i = btree_bset_last(b);
+
+ lockdep_assert_held(&b->write_lock);
+
+ trace_bcache_btree_write(b);
+
+ BUG_ON(current->bio_list);
+ BUG_ON(b->written >= btree_blocks(b));
+ BUG_ON(b->written && !i->keys);
+ BUG_ON(btree_bset_first(b)->seq != i->seq);
+ bch_check_keys(&b->keys, "writing");
+
+ cancel_delayed_work(&b->work);
+
+ /* If caller isn't waiting for write, parent refcount is cache set */
+ down(&b->io_mutex);
+ closure_init(&b->io, parent ?: &b->c->cl);
+
+ clear_bit(BTREE_NODE_dirty, &b->flags);
+ change_bit(BTREE_NODE_write_idx, &b->flags);
+
+ do_btree_node_write(b);
+
+ atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
+ &b->c->cache->btree_sectors_written);
+
+ b->written += set_blocks(i, block_bytes(b->c->cache));
+}
+
+void bch_btree_node_write(struct btree *b, struct closure *parent)
+{
+ unsigned int nsets = b->keys.nsets;
+
+ lockdep_assert_held(&b->lock);
+
+ __bch_btree_node_write(b, parent);
+
+ /*
+ * do verify if there was more than one set initially (i.e. we did a
+ * sort) and we sorted down to a single set:
+ */
+ if (nsets && !b->keys.nsets)
+ bch_btree_verify(b);
+
+ bch_btree_init_next(b);
+}
+
+static void bch_btree_node_write_sync(struct btree *b)
+{
+ struct closure cl;
+
+ closure_init_stack(&cl);
+
+ mutex_lock(&b->write_lock);
+ bch_btree_node_write(b, &cl);
+ mutex_unlock(&b->write_lock);
+
+ closure_sync(&cl);
+}
+
+static void btree_node_write_work(struct work_struct *w)
+{
+ struct btree *b = container_of(to_delayed_work(w), struct btree, work);
+
+ mutex_lock(&b->write_lock);
+ if (btree_node_dirty(b))
+ __bch_btree_node_write(b, NULL);
+ mutex_unlock(&b->write_lock);
+}
+
+static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
+{
+ struct bset *i = btree_bset_last(b);
+ struct btree_write *w = btree_current_write(b);
+
+ lockdep_assert_held(&b->write_lock);
+
+ BUG_ON(!b->written);
+ BUG_ON(!i->keys);
+
+ if (!btree_node_dirty(b))
+ queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
+
+ set_btree_node_dirty(b);
+
+ /*
+ * w->journal is always the oldest journal pin of all bkeys
+ * in the leaf node, to make sure the oldest jset seq won't
+ * be increased before this btree node is flushed.
+ */
+ if (journal_ref) {
+ if (w->journal &&
+ journal_pin_cmp(b->c, w->journal, journal_ref)) {
+ atomic_dec_bug(w->journal);
+ w->journal = NULL;
+ }
+
+ if (!w->journal) {
+ w->journal = journal_ref;
+ atomic_inc(w->journal);
+ }
+ }
+
+ /* Force write if set is too big */
+ if (set_bytes(i) > PAGE_SIZE - 48 &&
+ !current->bio_list)
+ bch_btree_node_write(b, NULL);
+}
+
+/*
+ * Btree in memory cache - allocation/freeing
+ * mca -> memory cache
+ */
+
+#define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \
+ ? c->root->level : 1) * 8 + 16)
+#define mca_can_free(c) \
+ max_t(int, 0, c->btree_cache_used - mca_reserve(c))
+
+static void mca_data_free(struct btree *b)
+{
+ BUG_ON(b->io_mutex.count != 1);
+
+ bch_btree_keys_free(&b->keys);
+
+ b->c->btree_cache_used--;
+ list_move(&b->list, &b->c->btree_cache_freed);
+}
+
+static void mca_bucket_free(struct btree *b)
+{
+ BUG_ON(btree_node_dirty(b));
+
+ b->key.ptr[0] = 0;
+ hlist_del_init_rcu(&b->hash);
+ list_move(&b->list, &b->c->btree_cache_freeable);
+}
+
+static unsigned int btree_order(struct bkey *k)
+{
+ return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
+}
+
+static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
+{
+ if (!bch_btree_keys_alloc(&b->keys,
+ max_t(unsigned int,
+ ilog2(b->c->btree_pages),
+ btree_order(k)),
+ gfp)) {
+ b->c->btree_cache_used++;
+ list_move(&b->list, &b->c->btree_cache);
+ } else {
+ list_move(&b->list, &b->c->btree_cache_freed);
+ }
+}
+
+#define cmp_int(l, r) ((l > r) - (l < r))
+
+#ifdef CONFIG_PROVE_LOCKING
+static int btree_lock_cmp_fn(const struct lockdep_map *_a,
+ const struct lockdep_map *_b)
+{
+ const struct btree *a = container_of(_a, struct btree, lock.dep_map);
+ const struct btree *b = container_of(_b, struct btree, lock.dep_map);
+
+ return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key);
+}
+
+static void btree_lock_print_fn(const struct lockdep_map *map)
+{
+ const struct btree *b = container_of(map, struct btree, lock.dep_map);
+
+ printk(KERN_CONT " l=%u %llu:%llu", b->level,
+ KEY_INODE(&b->key), KEY_OFFSET(&b->key));
+}
+#endif
+
+static struct btree *mca_bucket_alloc(struct cache_set *c,
+ struct bkey *k, gfp_t gfp)
+{
+ /*
+ * kzalloc() is necessary here for initialization,
+ * see code comments in bch_btree_keys_init().
+ */
+ struct btree *b = kzalloc(sizeof(struct btree), gfp);
+
+ if (!b)
+ return NULL;
+
+ init_rwsem(&b->lock);
+ lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn);
+ mutex_init(&b->write_lock);
+ lockdep_set_novalidate_class(&b->write_lock);
+ INIT_LIST_HEAD(&b->list);
+ INIT_DELAYED_WORK(&b->work, btree_node_write_work);
+ b->c = c;
+ sema_init(&b->io_mutex, 1);
+
+ mca_data_alloc(b, k, gfp);
+ return b;
+}
+
+static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
+{
+ struct closure cl;
+
+ closure_init_stack(&cl);
+ lockdep_assert_held(&b->c->bucket_lock);
+
+ if (!down_write_trylock(&b->lock))
+ return -ENOMEM;
+
+ BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
+
+ if (b->keys.page_order < min_order)
+ goto out_unlock;
+
+ if (!flush) {
+ if (btree_node_dirty(b))
+ goto out_unlock;
+
+ if (down_trylock(&b->io_mutex))
+ goto out_unlock;
+ up(&b->io_mutex);
+ }
+
+retry:
+ /*
+ * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
+ * __bch_btree_node_write(). To avoid an extra flush, acquire
+ * b->write_lock before checking BTREE_NODE_dirty bit.
+ */
+ mutex_lock(&b->write_lock);
+ /*
+ * If this btree node is selected in btree_flush_write() by journal
+ * code, delay and retry until the node is flushed by journal code
+ * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
+ */
+ if (btree_node_journal_flush(b)) {
+ pr_debug("bnode %p is flushing by journal, retry\n", b);
+ mutex_unlock(&b->write_lock);
+ udelay(1);
+ goto retry;
+ }
+
+ if (btree_node_dirty(b))
+ __bch_btree_node_write(b, &cl);
+ mutex_unlock(&b->write_lock);
+
+ closure_sync(&cl);
+
+ /* wait for any in flight btree write */
+ down(&b->io_mutex);
+ up(&b->io_mutex);
+
+ return 0;
+out_unlock:
+ rw_unlock(true, b);
+ return -ENOMEM;
+}
+
+static unsigned long bch_mca_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct cache_set *c = container_of(shrink, struct cache_set, shrink);
+ struct btree *b, *t;
+ unsigned long i, nr = sc->nr_to_scan;
+ unsigned long freed = 0;
+ unsigned int btree_cache_used;
+
+ if (c->shrinker_disabled)
+ return SHRINK_STOP;
+
+ if (c->btree_cache_alloc_lock)
+ return SHRINK_STOP;
+
+ /* Return -1 if we can't do anything right now */
+ if (sc->gfp_mask & __GFP_IO)
+ mutex_lock(&c->bucket_lock);
+ else if (!mutex_trylock(&c->bucket_lock))
+ return -1;
+
+ /*
+ * It's _really_ critical that we don't free too many btree nodes - we
+ * have to always leave ourselves a reserve. The reserve is how we
+ * guarantee that allocating memory for a new btree node can always
+ * succeed, so that inserting keys into the btree can always succeed and
+ * IO can always make forward progress:
+ */
+ nr /= c->btree_pages;
+ if (nr == 0)
+ nr = 1;
+ nr = min_t(unsigned long, nr, mca_can_free(c));
+
+ i = 0;
+ btree_cache_used = c->btree_cache_used;
+ list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
+ if (nr <= 0)
+ goto out;
+
+ if (!mca_reap(b, 0, false)) {
+ mca_data_free(b);
+ rw_unlock(true, b);
+ freed++;
+ }
+ nr--;
+ i++;
+ }
+
+ list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
+ if (nr <= 0 || i >= btree_cache_used)
+ goto out;
+
+ if (!mca_reap(b, 0, false)) {
+ mca_bucket_free(b);
+ mca_data_free(b);
+ rw_unlock(true, b);
+ freed++;
+ }
+
+ nr--;
+ i++;
+ }
+out:
+ mutex_unlock(&c->bucket_lock);
+ return freed * c->btree_pages;
+}
+
+static unsigned long bch_mca_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct cache_set *c = container_of(shrink, struct cache_set, shrink);
+
+ if (c->shrinker_disabled)
+ return 0;
+
+ if (c->btree_cache_alloc_lock)
+ return 0;
+
+ return mca_can_free(c) * c->btree_pages;
+}
+
+void bch_btree_cache_free(struct cache_set *c)
+{
+ struct btree *b;
+ struct closure cl;
+
+ closure_init_stack(&cl);
+
+ if (c->shrink.list.next)
+ unregister_shrinker(&c->shrink);
+
+ mutex_lock(&c->bucket_lock);
+
+#ifdef CONFIG_BCACHE_DEBUG
+ if (c->verify_data)
+ list_move(&c->verify_data->list, &c->btree_cache);
+
+ free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
+#endif
+
+ list_splice(&c->btree_cache_freeable,
+ &c->btree_cache);
+
+ while (!list_empty(&c->btree_cache)) {
+ b = list_first_entry(&c->btree_cache, struct btree, list);
+
+ /*
+ * This function is called by cache_set_free(), no I/O
+ * request on cache now, it is unnecessary to acquire
+ * b->write_lock before clearing BTREE_NODE_dirty anymore.
+ */
+ if (btree_node_dirty(b)) {
+ btree_complete_write(b, btree_current_write(b));
+ clear_bit(BTREE_NODE_dirty, &b->flags);
+ }
+ mca_data_free(b);
+ }
+
+ while (!list_empty(&c->btree_cache_freed)) {
+ b = list_first_entry(&c->btree_cache_freed,
+ struct btree, list);
+ list_del(&b->list);
+ cancel_delayed_work_sync(&b->work);
+ kfree(b);
+ }
+
+ mutex_unlock(&c->bucket_lock);
+}
+
+int bch_btree_cache_alloc(struct cache_set *c)
+{
+ unsigned int i;
+
+ for (i = 0; i < mca_reserve(c); i++)
+ if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
+ return -ENOMEM;
+
+ list_splice_init(&c->btree_cache,
+ &c->btree_cache_freeable);
+
+#ifdef CONFIG_BCACHE_DEBUG
+ mutex_init(&c->verify_lock);
+
+ c->verify_ondisk = (void *)
+ __get_free_pages(GFP_KERNEL|__GFP_COMP,
+ ilog2(meta_bucket_pages(&c->cache->sb)));
+ if (!c->verify_ondisk) {
+ /*
+ * Don't worry about the mca_rereserve buckets
+ * allocated in previous for-loop, they will be
+ * handled properly in bch_cache_set_unregister().
+ */
+ return -ENOMEM;
+ }
+
+ c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
+
+ if (c->verify_data &&
+ c->verify_data->keys.set->data)
+ list_del_init(&c->verify_data->list);
+ else
+ c->verify_data = NULL;
+#endif
+
+ c->shrink.count_objects = bch_mca_count;
+ c->shrink.scan_objects = bch_mca_scan;
+ c->shrink.seeks = 4;
+ c->shrink.batch = c->btree_pages * 2;
+
+ if (register_shrinker(&c->shrink, "md-bcache:%pU", c->set_uuid))
+ pr_warn("bcache: %s: could not register shrinker\n",
+ __func__);
+
+ return 0;
+}
+
+/* Btree in memory cache - hash table */
+
+static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
+{
+ return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
+}
+
+static struct btree *mca_find(struct cache_set *c, struct bkey *k)
+{
+ struct btree *b;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
+ if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
+ goto out;
+ b = NULL;
+out:
+ rcu_read_unlock();
+ return b;
+}
+
+static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
+{
+ spin_lock(&c->btree_cannibalize_lock);
+ if (likely(c->btree_cache_alloc_lock == NULL)) {
+ c->btree_cache_alloc_lock = current;
+ } else if (c->btree_cache_alloc_lock != current) {
+ if (op)
+ prepare_to_wait(&c->btree_cache_wait, &op->wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock(&c->btree_cannibalize_lock);
+ return -EINTR;
+ }
+ spin_unlock(&c->btree_cannibalize_lock);
+
+ return 0;
+}
+
+static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
+ struct bkey *k)
+{
+ struct btree *b;
+
+ trace_bcache_btree_cache_cannibalize(c);
+
+ if (mca_cannibalize_lock(c, op))
+ return ERR_PTR(-EINTR);
+
+ list_for_each_entry_reverse(b, &c->btree_cache, list)
+ if (!mca_reap(b, btree_order(k), false))
+ return b;
+
+ list_for_each_entry_reverse(b, &c->btree_cache, list)
+ if (!mca_reap(b, btree_order(k), true))
+ return b;
+
+ WARN(1, "btree cache cannibalize failed\n");
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * We can only have one thread cannibalizing other cached btree nodes at a time,
+ * or we'll deadlock. We use an open coded mutex to ensure that, which a
+ * cannibalize_bucket() will take. This means every time we unlock the root of
+ * the btree, we need to release this lock if we have it held.
+ */
+void bch_cannibalize_unlock(struct cache_set *c)
+{
+ spin_lock(&c->btree_cannibalize_lock);
+ if (c->btree_cache_alloc_lock == current) {
+ c->btree_cache_alloc_lock = NULL;
+ wake_up(&c->btree_cache_wait);
+ }
+ spin_unlock(&c->btree_cannibalize_lock);
+}
+
+static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
+ struct bkey *k, int level)
+{
+ struct btree *b;
+
+ BUG_ON(current->bio_list);
+
+ lockdep_assert_held(&c->bucket_lock);
+
+ if (mca_find(c, k))
+ return NULL;
+
+ /* btree_free() doesn't free memory; it sticks the node on the end of
+ * the list. Check if there's any freed nodes there:
+ */
+ list_for_each_entry(b, &c->btree_cache_freeable, list)
+ if (!mca_reap(b, btree_order(k), false))
+ goto out;
+
+ /* We never free struct btree itself, just the memory that holds the on
+ * disk node. Check the freed list before allocating a new one:
+ */
+ list_for_each_entry(b, &c->btree_cache_freed, list)
+ if (!mca_reap(b, 0, false)) {
+ mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
+ if (!b->keys.set[0].data)
+ goto err;
+ else
+ goto out;
+ }
+
+ b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
+ if (!b)
+ goto err;
+
+ BUG_ON(!down_write_trylock(&b->lock));
+ if (!b->keys.set->data)
+ goto err;
+out:
+ BUG_ON(b->io_mutex.count != 1);
+
+ bkey_copy(&b->key, k);
+ list_move(&b->list, &c->btree_cache);
+ hlist_del_init_rcu(&b->hash);
+ hlist_add_head_rcu(&b->hash, mca_hash(c, k));
+
+ lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
+ b->parent = (void *) ~0UL;
+ b->flags = 0;
+ b->written = 0;
+ b->level = level;
+
+ if (!b->level)
+ bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
+ &b->c->expensive_debug_checks);
+ else
+ bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
+ &b->c->expensive_debug_checks);
+
+ return b;
+err:
+ if (b)
+ rw_unlock(true, b);
+
+ b = mca_cannibalize(c, op, k);
+ if (!IS_ERR(b))
+ goto out;
+
+ return b;
+}
+
+/*
+ * bch_btree_node_get - find a btree node in the cache and lock it, reading it
+ * in from disk if necessary.
+ *
+ * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
+ *
+ * The btree node will have either a read or a write lock held, depending on
+ * level and op->lock.
+ *
+ * Note: Only error code or btree pointer will be returned, it is unncessary
+ * for callers to check NULL pointer.
+ */
+struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
+ struct bkey *k, int level, bool write,
+ struct btree *parent)
+{
+ int i = 0;
+ struct btree *b;
+
+ BUG_ON(level < 0);
+retry:
+ b = mca_find(c, k);
+
+ if (!b) {
+ if (current->bio_list)
+ return ERR_PTR(-EAGAIN);
+
+ mutex_lock(&c->bucket_lock);
+ b = mca_alloc(c, op, k, level);
+ mutex_unlock(&c->bucket_lock);
+
+ if (!b)
+ goto retry;
+ if (IS_ERR(b))
+ return b;
+
+ bch_btree_node_read(b);
+
+ if (!write)
+ downgrade_write(&b->lock);
+ } else {
+ rw_lock(write, b, level);
+ if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
+ rw_unlock(write, b);
+ goto retry;
+ }
+ BUG_ON(b->level != level);
+ }
+
+ if (btree_node_io_error(b)) {
+ rw_unlock(write, b);
+ return ERR_PTR(-EIO);
+ }
+
+ BUG_ON(!b->written);
+
+ b->parent = parent;
+
+ for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
+ prefetch(b->keys.set[i].tree);
+ prefetch(b->keys.set[i].data);
+ }
+
+ for (; i <= b->keys.nsets; i++)
+ prefetch(b->keys.set[i].data);
+
+ return b;
+}
+
+static void btree_node_prefetch(struct btree *parent, struct bkey *k)
+{
+ struct btree *b;
+
+ mutex_lock(&parent->c->bucket_lock);
+ b = mca_alloc(parent->c, NULL, k, parent->level - 1);
+ mutex_unlock(&parent->c->bucket_lock);
+
+ if (!IS_ERR_OR_NULL(b)) {
+ b->parent = parent;
+ bch_btree_node_read(b);
+ rw_unlock(true, b);
+ }
+}
+
+/* Btree alloc */
+
+static void btree_node_free(struct btree *b)
+{
+ trace_bcache_btree_node_free(b);
+
+ BUG_ON(b == b->c->root);
+
+retry:
+ mutex_lock(&b->write_lock);
+ /*
+ * If the btree node is selected and flushing in btree_flush_write(),
+ * delay and retry until the BTREE_NODE_journal_flush bit cleared,
+ * then it is safe to free the btree node here. Otherwise this btree
+ * node will be in race condition.
+ */
+ if (btree_node_journal_flush(b)) {
+ mutex_unlock(&b->write_lock);
+ pr_debug("bnode %p journal_flush set, retry\n", b);
+ udelay(1);
+ goto retry;
+ }
+
+ if (btree_node_dirty(b)) {
+ btree_complete_write(b, btree_current_write(b));
+ clear_bit(BTREE_NODE_dirty, &b->flags);
+ }
+
+ mutex_unlock(&b->write_lock);
+
+ cancel_delayed_work(&b->work);
+
+ mutex_lock(&b->c->bucket_lock);
+ bch_bucket_free(b->c, &b->key);
+ mca_bucket_free(b);
+ mutex_unlock(&b->c->bucket_lock);
+}
+
+/*
+ * Only error code or btree pointer will be returned, it is unncessary for
+ * callers to check NULL pointer.
+ */
+struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
+ int level, bool wait,
+ struct btree *parent)
+{
+ BKEY_PADDED(key) k;
+ struct btree *b;
+
+ mutex_lock(&c->bucket_lock);
+retry:
+ /* return ERR_PTR(-EAGAIN) when it fails */
+ b = ERR_PTR(-EAGAIN);
+ if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
+ goto err;
+
+ bkey_put(c, &k.key);
+ SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
+
+ b = mca_alloc(c, op, &k.key, level);
+ if (IS_ERR(b))
+ goto err_free;
+
+ if (!b) {
+ cache_bug(c,
+ "Tried to allocate bucket that was in btree cache");
+ goto retry;
+ }
+
+ b->parent = parent;
+ bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
+
+ mutex_unlock(&c->bucket_lock);
+
+ trace_bcache_btree_node_alloc(b);
+ return b;
+err_free:
+ bch_bucket_free(c, &k.key);
+err:
+ mutex_unlock(&c->bucket_lock);
+
+ trace_bcache_btree_node_alloc_fail(c);
+ return b;
+}
+
+static struct btree *bch_btree_node_alloc(struct cache_set *c,
+ struct btree_op *op, int level,
+ struct btree *parent)
+{
+ return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
+}
+
+static struct btree *btree_node_alloc_replacement(struct btree *b,
+ struct btree_op *op)
+{
+ struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
+
+ if (!IS_ERR(n)) {
+ mutex_lock(&n->write_lock);
+ bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
+ bkey_copy_key(&n->key, &b->key);
+ mutex_unlock(&n->write_lock);
+ }
+
+ return n;
+}
+
+static void make_btree_freeing_key(struct btree *b, struct bkey *k)
+{
+ unsigned int i;
+
+ mutex_lock(&b->c->bucket_lock);
+
+ atomic_inc(&b->c->prio_blocked);
+
+ bkey_copy(k, &b->key);
+ bkey_copy_key(k, &ZERO_KEY);
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ SET_PTR_GEN(k, i,
+ bch_inc_gen(b->c->cache,
+ PTR_BUCKET(b->c, &b->key, i)));
+
+ mutex_unlock(&b->c->bucket_lock);
+}
+
+static int btree_check_reserve(struct btree *b, struct btree_op *op)
+{
+ struct cache_set *c = b->c;
+ struct cache *ca = c->cache;
+ unsigned int reserve = (c->root->level - b->level) * 2 + 1;
+
+ mutex_lock(&c->bucket_lock);
+
+ if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
+ if (op)
+ prepare_to_wait(&c->btree_cache_wait, &op->wait,
+ TASK_UNINTERRUPTIBLE);
+ mutex_unlock(&c->bucket_lock);
+ return -EINTR;
+ }
+
+ mutex_unlock(&c->bucket_lock);
+
+ return mca_cannibalize_lock(b->c, op);
+}
+
+/* Garbage collection */
+
+static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
+ struct bkey *k)
+{
+ uint8_t stale = 0;
+ unsigned int i;
+ struct bucket *g;
+
+ /*
+ * ptr_invalid() can't return true for the keys that mark btree nodes as
+ * freed, but since ptr_bad() returns true we'll never actually use them
+ * for anything and thus we don't want mark their pointers here
+ */
+ if (!bkey_cmp(k, &ZERO_KEY))
+ return stale;
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ if (!ptr_available(c, k, i))
+ continue;
+
+ g = PTR_BUCKET(c, k, i);
+
+ if (gen_after(g->last_gc, PTR_GEN(k, i)))
+ g->last_gc = PTR_GEN(k, i);
+
+ if (ptr_stale(c, k, i)) {
+ stale = max(stale, ptr_stale(c, k, i));
+ continue;
+ }
+
+ cache_bug_on(GC_MARK(g) &&
+ (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
+ c, "inconsistent ptrs: mark = %llu, level = %i",
+ GC_MARK(g), level);
+
+ if (level)
+ SET_GC_MARK(g, GC_MARK_METADATA);
+ else if (KEY_DIRTY(k))
+ SET_GC_MARK(g, GC_MARK_DIRTY);
+ else if (!GC_MARK(g))
+ SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
+
+ /* guard against overflow */
+ SET_GC_SECTORS_USED(g, min_t(unsigned int,
+ GC_SECTORS_USED(g) + KEY_SIZE(k),
+ MAX_GC_SECTORS_USED));
+
+ BUG_ON(!GC_SECTORS_USED(g));
+ }
+
+ return stale;
+}
+
+#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
+
+void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
+{
+ unsigned int i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i) &&
+ !ptr_stale(c, k, i)) {
+ struct bucket *b = PTR_BUCKET(c, k, i);
+
+ b->gen = PTR_GEN(k, i);
+
+ if (level && bkey_cmp(k, &ZERO_KEY))
+ b->prio = BTREE_PRIO;
+ else if (!level && b->prio == BTREE_PRIO)
+ b->prio = INITIAL_PRIO;
+ }
+
+ __bch_btree_mark_key(c, level, k);
+}
+
+void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
+{
+ stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
+}
+
+static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
+{
+ uint8_t stale = 0;
+ unsigned int keys = 0, good_keys = 0;
+ struct bkey *k;
+ struct btree_iter iter;
+ struct bset_tree *t;
+
+ gc->nodes++;
+
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
+ stale = max(stale, btree_mark_key(b, k));
+ keys++;
+
+ if (bch_ptr_bad(&b->keys, k))
+ continue;
+
+ gc->key_bytes += bkey_u64s(k);
+ gc->nkeys++;
+ good_keys++;
+
+ gc->data += KEY_SIZE(k);
+ }
+
+ for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
+ btree_bug_on(t->size &&
+ bset_written(&b->keys, t) &&
+ bkey_cmp(&b->key, &t->end) < 0,
+ b, "found short btree key in gc");
+
+ if (b->c->gc_always_rewrite)
+ return true;
+
+ if (stale > 10)
+ return true;
+
+ if ((keys - good_keys) * 2 > keys)
+ return true;
+
+ return false;
+}
+
+#define GC_MERGE_NODES 4U
+
+struct gc_merge_info {
+ struct btree *b;
+ unsigned int keys;
+};
+
+static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
+ struct keylist *insert_keys,
+ atomic_t *journal_ref,
+ struct bkey *replace_key);
+
+static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ struct gc_stat *gc, struct gc_merge_info *r)
+{
+ unsigned int i, nodes = 0, keys = 0, blocks;
+ struct btree *new_nodes[GC_MERGE_NODES];
+ struct keylist keylist;
+ struct closure cl;
+ struct bkey *k;
+
+ bch_keylist_init(&keylist);
+
+ if (btree_check_reserve(b, NULL))
+ return 0;
+
+ memset(new_nodes, 0, sizeof(new_nodes));
+ closure_init_stack(&cl);
+
+ while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
+ keys += r[nodes++].keys;
+
+ blocks = btree_default_blocks(b->c) * 2 / 3;
+
+ if (nodes < 2 ||
+ __set_blocks(b->keys.set[0].data, keys,
+ block_bytes(b->c->cache)) > blocks * (nodes - 1))
+ return 0;
+
+ for (i = 0; i < nodes; i++) {
+ new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
+ if (IS_ERR(new_nodes[i]))
+ goto out_nocoalesce;
+ }
+
+ /*
+ * We have to check the reserve here, after we've allocated our new
+ * nodes, to make sure the insert below will succeed - we also check
+ * before as an optimization to potentially avoid a bunch of expensive
+ * allocs/sorts
+ */
+ if (btree_check_reserve(b, NULL))
+ goto out_nocoalesce;
+
+ for (i = 0; i < nodes; i++)
+ mutex_lock(&new_nodes[i]->write_lock);
+
+ for (i = nodes - 1; i > 0; --i) {
+ struct bset *n1 = btree_bset_first(new_nodes[i]);
+ struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
+ struct bkey *k, *last = NULL;
+
+ keys = 0;
+
+ if (i > 1) {
+ for (k = n2->start;
+ k < bset_bkey_last(n2);
+ k = bkey_next(k)) {
+ if (__set_blocks(n1, n1->keys + keys +
+ bkey_u64s(k),
+ block_bytes(b->c->cache)) > blocks)
+ break;
+
+ last = k;
+ keys += bkey_u64s(k);
+ }
+ } else {
+ /*
+ * Last node we're not getting rid of - we're getting
+ * rid of the node at r[0]. Have to try and fit all of
+ * the remaining keys into this node; we can't ensure
+ * they will always fit due to rounding and variable
+ * length keys (shouldn't be possible in practice,
+ * though)
+ */
+ if (__set_blocks(n1, n1->keys + n2->keys,
+ block_bytes(b->c->cache)) >
+ btree_blocks(new_nodes[i]))
+ goto out_unlock_nocoalesce;
+
+ keys = n2->keys;
+ /* Take the key of the node we're getting rid of */
+ last = &r->b->key;
+ }
+
+ BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
+ btree_blocks(new_nodes[i]));
+
+ if (last)
+ bkey_copy_key(&new_nodes[i]->key, last);
+
+ memcpy(bset_bkey_last(n1),
+ n2->start,
+ (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
+
+ n1->keys += keys;
+ r[i].keys = n1->keys;
+
+ memmove(n2->start,
+ bset_bkey_idx(n2, keys),
+ (void *) bset_bkey_last(n2) -
+ (void *) bset_bkey_idx(n2, keys));
+
+ n2->keys -= keys;
+
+ if (__bch_keylist_realloc(&keylist,
+ bkey_u64s(&new_nodes[i]->key)))
+ goto out_unlock_nocoalesce;
+
+ bch_btree_node_write(new_nodes[i], &cl);
+ bch_keylist_add(&keylist, &new_nodes[i]->key);
+ }
+
+ for (i = 0; i < nodes; i++)
+ mutex_unlock(&new_nodes[i]->write_lock);
+
+ closure_sync(&cl);
+
+ /* We emptied out this node */
+ BUG_ON(btree_bset_first(new_nodes[0])->keys);
+ btree_node_free(new_nodes[0]);
+ rw_unlock(true, new_nodes[0]);
+ new_nodes[0] = NULL;
+
+ for (i = 0; i < nodes; i++) {
+ if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
+ goto out_nocoalesce;
+
+ make_btree_freeing_key(r[i].b, keylist.top);
+ bch_keylist_push(&keylist);
+ }
+
+ bch_btree_insert_node(b, op, &keylist, NULL, NULL);
+ BUG_ON(!bch_keylist_empty(&keylist));
+
+ for (i = 0; i < nodes; i++) {
+ btree_node_free(r[i].b);
+ rw_unlock(true, r[i].b);
+
+ r[i].b = new_nodes[i];
+ }
+
+ memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
+ r[nodes - 1].b = ERR_PTR(-EINTR);
+
+ trace_bcache_btree_gc_coalesce(nodes);
+ gc->nodes--;
+
+ bch_keylist_free(&keylist);
+
+ /* Invalidated our iterator */
+ return -EINTR;
+
+out_unlock_nocoalesce:
+ for (i = 0; i < nodes; i++)
+ mutex_unlock(&new_nodes[i]->write_lock);
+
+out_nocoalesce:
+ closure_sync(&cl);
+
+ while ((k = bch_keylist_pop(&keylist)))
+ if (!bkey_cmp(k, &ZERO_KEY))
+ atomic_dec(&b->c->prio_blocked);
+ bch_keylist_free(&keylist);
+
+ for (i = 0; i < nodes; i++)
+ if (!IS_ERR_OR_NULL(new_nodes[i])) {
+ btree_node_free(new_nodes[i]);
+ rw_unlock(true, new_nodes[i]);
+ }
+ return 0;
+}
+
+static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
+ struct btree *replace)
+{
+ struct keylist keys;
+ struct btree *n;
+
+ if (btree_check_reserve(b, NULL))
+ return 0;
+
+ n = btree_node_alloc_replacement(replace, NULL);
+ if (IS_ERR(n))
+ return 0;
+
+ /* recheck reserve after allocating replacement node */
+ if (btree_check_reserve(b, NULL)) {
+ btree_node_free(n);
+ rw_unlock(true, n);
+ return 0;
+ }
+
+ bch_btree_node_write_sync(n);
+
+ bch_keylist_init(&keys);
+ bch_keylist_add(&keys, &n->key);
+
+ make_btree_freeing_key(replace, keys.top);
+ bch_keylist_push(&keys);
+
+ bch_btree_insert_node(b, op, &keys, NULL, NULL);
+ BUG_ON(!bch_keylist_empty(&keys));
+
+ btree_node_free(replace);
+ rw_unlock(true, n);
+
+ /* Invalidated our iterator */
+ return -EINTR;
+}
+
+static unsigned int btree_gc_count_keys(struct btree *b)
+{
+ struct bkey *k;
+ struct btree_iter iter;
+ unsigned int ret = 0;
+
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
+ ret += bkey_u64s(k);
+
+ return ret;
+}
+
+static size_t btree_gc_min_nodes(struct cache_set *c)
+{
+ size_t min_nodes;
+
+ /*
+ * Since incremental GC would stop 100ms when front
+ * side I/O comes, so when there are many btree nodes,
+ * if GC only processes constant (100) nodes each time,
+ * GC would last a long time, and the front side I/Os
+ * would run out of the buckets (since no new bucket
+ * can be allocated during GC), and be blocked again.
+ * So GC should not process constant nodes, but varied
+ * nodes according to the number of btree nodes, which
+ * realized by dividing GC into constant(100) times,
+ * so when there are many btree nodes, GC can process
+ * more nodes each time, otherwise, GC will process less
+ * nodes each time (but no less than MIN_GC_NODES)
+ */
+ min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
+ if (min_nodes < MIN_GC_NODES)
+ min_nodes = MIN_GC_NODES;
+
+ return min_nodes;
+}
+
+
+static int btree_gc_recurse(struct btree *b, struct btree_op *op,
+ struct closure *writes, struct gc_stat *gc)
+{
+ int ret = 0;
+ bool should_rewrite;
+ struct bkey *k;
+ struct btree_iter iter;
+ struct gc_merge_info r[GC_MERGE_NODES];
+ struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
+
+ bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
+
+ for (i = r; i < r + ARRAY_SIZE(r); i++)
+ i->b = ERR_PTR(-EINTR);
+
+ while (1) {
+ k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
+ if (k) {
+ r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
+ true, b);
+ if (IS_ERR(r->b)) {
+ ret = PTR_ERR(r->b);
+ break;
+ }
+
+ r->keys = btree_gc_count_keys(r->b);
+
+ ret = btree_gc_coalesce(b, op, gc, r);
+ if (ret)
+ break;
+ }
+
+ if (!last->b)
+ break;
+
+ if (!IS_ERR(last->b)) {
+ should_rewrite = btree_gc_mark_node(last->b, gc);
+ if (should_rewrite) {
+ ret = btree_gc_rewrite_node(b, op, last->b);
+ if (ret)
+ break;
+ }
+
+ if (last->b->level) {
+ ret = btree_gc_recurse(last->b, op, writes, gc);
+ if (ret)
+ break;
+ }
+
+ bkey_copy_key(&b->c->gc_done, &last->b->key);
+
+ /*
+ * Must flush leaf nodes before gc ends, since replace
+ * operations aren't journalled
+ */
+ mutex_lock(&last->b->write_lock);
+ if (btree_node_dirty(last->b))
+ bch_btree_node_write(last->b, writes);
+ mutex_unlock(&last->b->write_lock);
+ rw_unlock(true, last->b);
+ }
+
+ memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
+ r->b = NULL;
+
+ if (atomic_read(&b->c->search_inflight) &&
+ gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
+ gc->nodes_pre = gc->nodes;
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (need_resched()) {
+ ret = -EAGAIN;
+ break;
+ }
+ }
+
+ for (i = r; i < r + ARRAY_SIZE(r); i++)
+ if (!IS_ERR_OR_NULL(i->b)) {
+ mutex_lock(&i->b->write_lock);
+ if (btree_node_dirty(i->b))
+ bch_btree_node_write(i->b, writes);
+ mutex_unlock(&i->b->write_lock);
+ rw_unlock(true, i->b);
+ }
+
+ return ret;
+}
+
+static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
+ struct closure *writes, struct gc_stat *gc)
+{
+ struct btree *n = NULL;
+ int ret = 0;
+ bool should_rewrite;
+
+ should_rewrite = btree_gc_mark_node(b, gc);
+ if (should_rewrite) {
+ n = btree_node_alloc_replacement(b, NULL);
+
+ if (!IS_ERR(n)) {
+ bch_btree_node_write_sync(n);
+
+ bch_btree_set_root(n);
+ btree_node_free(b);
+ rw_unlock(true, n);
+
+ return -EINTR;
+ }
+ }
+
+ __bch_btree_mark_key(b->c, b->level + 1, &b->key);
+
+ if (b->level) {
+ ret = btree_gc_recurse(b, op, writes, gc);
+ if (ret)
+ return ret;
+ }
+
+ bkey_copy_key(&b->c->gc_done, &b->key);
+
+ return ret;
+}
+
+static void btree_gc_start(struct cache_set *c)
+{
+ struct cache *ca;
+ struct bucket *b;
+
+ if (!c->gc_mark_valid)
+ return;
+
+ mutex_lock(&c->bucket_lock);
+
+ c->gc_mark_valid = 0;
+ c->gc_done = ZERO_KEY;
+
+ ca = c->cache;
+ for_each_bucket(b, ca) {
+ b->last_gc = b->gen;
+ if (!atomic_read(&b->pin)) {
+ SET_GC_MARK(b, 0);
+ SET_GC_SECTORS_USED(b, 0);
+ }
+ }
+
+ mutex_unlock(&c->bucket_lock);
+}
+
+static void bch_btree_gc_finish(struct cache_set *c)
+{
+ struct bucket *b;
+ struct cache *ca;
+ unsigned int i, j;
+ uint64_t *k;
+
+ mutex_lock(&c->bucket_lock);
+
+ set_gc_sectors(c);
+ c->gc_mark_valid = 1;
+ c->need_gc = 0;
+
+ for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
+ SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
+ GC_MARK_METADATA);
+
+ /* don't reclaim buckets to which writeback keys point */
+ rcu_read_lock();
+ for (i = 0; i < c->devices_max_used; i++) {
+ struct bcache_device *d = c->devices[i];
+ struct cached_dev *dc;
+ struct keybuf_key *w, *n;
+
+ if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+ dc = container_of(d, struct cached_dev, disk);
+
+ spin_lock(&dc->writeback_keys.lock);
+ rbtree_postorder_for_each_entry_safe(w, n,
+ &dc->writeback_keys.keys, node)
+ for (j = 0; j < KEY_PTRS(&w->key); j++)
+ SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
+ GC_MARK_DIRTY);
+ spin_unlock(&dc->writeback_keys.lock);
+ }
+ rcu_read_unlock();
+
+ c->avail_nbuckets = 0;
+
+ ca = c->cache;
+ ca->invalidate_needs_gc = 0;
+
+ for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
+ SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
+
+ for (k = ca->prio_buckets;
+ k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
+ SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
+
+ for_each_bucket(b, ca) {
+ c->need_gc = max(c->need_gc, bucket_gc_gen(b));
+
+ if (atomic_read(&b->pin))
+ continue;
+
+ BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
+
+ if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
+ c->avail_nbuckets++;
+ }
+
+ mutex_unlock(&c->bucket_lock);
+}
+
+static void bch_btree_gc(struct cache_set *c)
+{
+ int ret;
+ struct gc_stat stats;
+ struct closure writes;
+ struct btree_op op;
+ uint64_t start_time = local_clock();
+
+ trace_bcache_gc_start(c);
+
+ memset(&stats, 0, sizeof(struct gc_stat));
+ closure_init_stack(&writes);
+ bch_btree_op_init(&op, SHRT_MAX);
+
+ btree_gc_start(c);
+
+ /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
+ do {
+ ret = bcache_btree_root(gc_root, c, &op, &writes, &stats);
+ closure_sync(&writes);
+ cond_resched();
+
+ if (ret == -EAGAIN)
+ schedule_timeout_interruptible(msecs_to_jiffies
+ (GC_SLEEP_MS));
+ else if (ret)
+ pr_warn("gc failed!\n");
+ } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
+
+ bch_btree_gc_finish(c);
+ wake_up_allocators(c);
+
+ bch_time_stats_update(&c->btree_gc_time, start_time);
+
+ stats.key_bytes *= sizeof(uint64_t);
+ stats.data <<= 9;
+ bch_update_bucket_in_use(c, &stats);
+ memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
+
+ trace_bcache_gc_end(c);
+
+ bch_moving_gc(c);
+}
+
+static bool gc_should_run(struct cache_set *c)
+{
+ struct cache *ca = c->cache;
+
+ if (ca->invalidate_needs_gc)
+ return true;
+
+ if (atomic_read(&c->sectors_to_gc) < 0)
+ return true;
+
+ return false;
+}
+
+static int bch_gc_thread(void *arg)
+{
+ struct cache_set *c = arg;
+
+ while (1) {
+ wait_event_interruptible(c->gc_wait,
+ kthread_should_stop() ||
+ test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
+ gc_should_run(c));
+
+ if (kthread_should_stop() ||
+ test_bit(CACHE_SET_IO_DISABLE, &c->flags))
+ break;
+
+ set_gc_sectors(c);
+ bch_btree_gc(c);
+ }
+
+ wait_for_kthread_stop();
+ return 0;
+}
+
+int bch_gc_thread_start(struct cache_set *c)
+{
+ c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
+ return PTR_ERR_OR_ZERO(c->gc_thread);
+}
+
+/* Initial partial gc */
+
+static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
+{
+ int ret = 0;
+ struct bkey *k, *p = NULL;
+ struct btree_iter iter;
+
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
+ bch_initial_mark_key(b->c, b->level, k);
+
+ bch_initial_mark_key(b->c, b->level + 1, &b->key);
+
+ if (b->level) {
+ bch_btree_iter_init(&b->keys, &iter, NULL);
+
+ do {
+ k = bch_btree_iter_next_filter(&iter, &b->keys,
+ bch_ptr_bad);
+ if (k) {
+ btree_node_prefetch(b, k);
+ /*
+ * initiallize c->gc_stats.nodes
+ * for incremental GC
+ */
+ b->c->gc_stats.nodes++;
+ }
+
+ if (p)
+ ret = bcache_btree(check_recurse, p, b, op);
+
+ p = k;
+ } while (p && !ret);
+ }
+
+ return ret;
+}
+
+
+static int bch_btree_check_thread(void *arg)
+{
+ int ret;
+ struct btree_check_info *info = arg;
+ struct btree_check_state *check_state = info->state;
+ struct cache_set *c = check_state->c;
+ struct btree_iter iter;
+ struct bkey *k, *p;
+ int cur_idx, prev_idx, skip_nr;
+
+ k = p = NULL;
+ cur_idx = prev_idx = 0;
+ ret = 0;
+
+ /* root node keys are checked before thread created */
+ bch_btree_iter_init(&c->root->keys, &iter, NULL);
+ k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
+ BUG_ON(!k);
+
+ p = k;
+ while (k) {
+ /*
+ * Fetch a root node key index, skip the keys which
+ * should be fetched by other threads, then check the
+ * sub-tree indexed by the fetched key.
+ */
+ spin_lock(&check_state->idx_lock);
+ cur_idx = check_state->key_idx;
+ check_state->key_idx++;
+ spin_unlock(&check_state->idx_lock);
+
+ skip_nr = cur_idx - prev_idx;
+
+ while (skip_nr) {
+ k = bch_btree_iter_next_filter(&iter,
+ &c->root->keys,
+ bch_ptr_bad);
+ if (k)
+ p = k;
+ else {
+ /*
+ * No more keys to check in root node,
+ * current checking threads are enough,
+ * stop creating more.
+ */
+ atomic_set(&check_state->enough, 1);
+ /* Update check_state->enough earlier */
+ smp_mb__after_atomic();
+ goto out;
+ }
+ skip_nr--;
+ cond_resched();
+ }
+
+ if (p) {
+ struct btree_op op;
+
+ btree_node_prefetch(c->root, p);
+ c->gc_stats.nodes++;
+ bch_btree_op_init(&op, 0);
+ ret = bcache_btree(check_recurse, p, c->root, &op);
+ /*
+ * The op may be added to cache_set's btree_cache_wait
+ * in mca_cannibalize(), must ensure it is removed from
+ * the list and release btree_cache_alloc_lock before
+ * free op memory.
+ * Otherwise, the btree_cache_wait will be damaged.
+ */
+ bch_cannibalize_unlock(c);
+ finish_wait(&c->btree_cache_wait, &(&op)->wait);
+ if (ret)
+ goto out;
+ }
+ p = NULL;
+ prev_idx = cur_idx;
+ cond_resched();
+ }
+
+out:
+ info->result = ret;
+ /* update check_state->started among all CPUs */
+ smp_mb__before_atomic();
+ if (atomic_dec_and_test(&check_state->started))
+ wake_up(&check_state->wait);
+
+ return ret;
+}
+
+
+
+static int bch_btree_chkthread_nr(void)
+{
+ int n = num_online_cpus()/2;
+
+ if (n == 0)
+ n = 1;
+ else if (n > BCH_BTR_CHKTHREAD_MAX)
+ n = BCH_BTR_CHKTHREAD_MAX;
+
+ return n;
+}
+
+int bch_btree_check(struct cache_set *c)
+{
+ int ret = 0;
+ int i;
+ struct bkey *k = NULL;
+ struct btree_iter iter;
+ struct btree_check_state check_state;
+
+ /* check and mark root node keys */
+ for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
+ bch_initial_mark_key(c, c->root->level, k);
+
+ bch_initial_mark_key(c, c->root->level + 1, &c->root->key);
+
+ if (c->root->level == 0)
+ return 0;
+
+ memset(&check_state, 0, sizeof(struct btree_check_state));
+ check_state.c = c;
+ check_state.total_threads = bch_btree_chkthread_nr();
+ check_state.key_idx = 0;
+ spin_lock_init(&check_state.idx_lock);
+ atomic_set(&check_state.started, 0);
+ atomic_set(&check_state.enough, 0);
+ init_waitqueue_head(&check_state.wait);
+
+ rw_lock(0, c->root, c->root->level);
+ /*
+ * Run multiple threads to check btree nodes in parallel,
+ * if check_state.enough is non-zero, it means current
+ * running check threads are enough, unncessary to create
+ * more.
+ */
+ for (i = 0; i < check_state.total_threads; i++) {
+ /* fetch latest check_state.enough earlier */
+ smp_mb__before_atomic();
+ if (atomic_read(&check_state.enough))
+ break;
+
+ check_state.infos[i].result = 0;
+ check_state.infos[i].state = &check_state;
+
+ check_state.infos[i].thread =
+ kthread_run(bch_btree_check_thread,
+ &check_state.infos[i],
+ "bch_btrchk[%d]", i);
+ if (IS_ERR(check_state.infos[i].thread)) {
+ pr_err("fails to run thread bch_btrchk[%d]\n", i);
+ for (--i; i >= 0; i--)
+ kthread_stop(check_state.infos[i].thread);
+ ret = -ENOMEM;
+ goto out;
+ }
+ atomic_inc(&check_state.started);
+ }
+
+ /*
+ * Must wait for all threads to stop.
+ */
+ wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
+
+ for (i = 0; i < check_state.total_threads; i++) {
+ if (check_state.infos[i].result) {
+ ret = check_state.infos[i].result;
+ goto out;
+ }
+ }
+
+out:
+ rw_unlock(0, c->root);
+ return ret;
+}
+
+void bch_initial_gc_finish(struct cache_set *c)
+{
+ struct cache *ca = c->cache;
+ struct bucket *b;
+
+ bch_btree_gc_finish(c);
+
+ mutex_lock(&c->bucket_lock);
+
+ /*
+ * We need to put some unused buckets directly on the prio freelist in
+ * order to get the allocator thread started - it needs freed buckets in
+ * order to rewrite the prios and gens, and it needs to rewrite prios
+ * and gens in order to free buckets.
+ *
+ * This is only safe for buckets that have no live data in them, which
+ * there should always be some of.
+ */
+ for_each_bucket(b, ca) {
+ if (fifo_full(&ca->free[RESERVE_PRIO]) &&
+ fifo_full(&ca->free[RESERVE_BTREE]))
+ break;
+
+ if (bch_can_invalidate_bucket(ca, b) &&
+ !GC_MARK(b)) {
+ __bch_invalidate_one_bucket(ca, b);
+ if (!fifo_push(&ca->free[RESERVE_PRIO],
+ b - ca->buckets))
+ fifo_push(&ca->free[RESERVE_BTREE],
+ b - ca->buckets);
+ }
+ }
+
+ mutex_unlock(&c->bucket_lock);
+}
+
+/* Btree insertion */
+
+static bool btree_insert_key(struct btree *b, struct bkey *k,
+ struct bkey *replace_key)
+{
+ unsigned int status;
+
+ BUG_ON(bkey_cmp(k, &b->key) > 0);
+
+ status = bch_btree_insert_key(&b->keys, k, replace_key);
+ if (status != BTREE_INSERT_STATUS_NO_INSERT) {
+ bch_check_keys(&b->keys, "%u for %s", status,
+ replace_key ? "replace" : "insert");
+
+ trace_bcache_btree_insert_key(b, k, replace_key != NULL,
+ status);
+ return true;
+ } else
+ return false;
+}
+
+static size_t insert_u64s_remaining(struct btree *b)
+{
+ long ret = bch_btree_keys_u64s_remaining(&b->keys);
+
+ /*
+ * Might land in the middle of an existing extent and have to split it
+ */
+ if (b->keys.ops->is_extents)
+ ret -= KEY_MAX_U64S;
+
+ return max(ret, 0L);
+}
+
+static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
+ struct keylist *insert_keys,
+ struct bkey *replace_key)
+{
+ bool ret = false;
+ int oldsize = bch_count_data(&b->keys);
+
+ while (!bch_keylist_empty(insert_keys)) {
+ struct bkey *k = insert_keys->keys;
+
+ if (bkey_u64s(k) > insert_u64s_remaining(b))
+ break;
+
+ if (bkey_cmp(k, &b->key) <= 0) {
+ if (!b->level)
+ bkey_put(b->c, k);
+
+ ret |= btree_insert_key(b, k, replace_key);
+ bch_keylist_pop_front(insert_keys);
+ } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
+ BKEY_PADDED(key) temp;
+ bkey_copy(&temp.key, insert_keys->keys);
+
+ bch_cut_back(&b->key, &temp.key);
+ bch_cut_front(&b->key, insert_keys->keys);
+
+ ret |= btree_insert_key(b, &temp.key, replace_key);
+ break;
+ } else {
+ break;
+ }
+ }
+
+ if (!ret)
+ op->insert_collision = true;
+
+ BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
+
+ BUG_ON(bch_count_data(&b->keys) < oldsize);
+ return ret;
+}
+
+static int btree_split(struct btree *b, struct btree_op *op,
+ struct keylist *insert_keys,
+ struct bkey *replace_key)
+{
+ bool split;
+ struct btree *n1, *n2 = NULL, *n3 = NULL;
+ uint64_t start_time = local_clock();
+ struct closure cl;
+ struct keylist parent_keys;
+
+ closure_init_stack(&cl);
+ bch_keylist_init(&parent_keys);
+
+ if (btree_check_reserve(b, op)) {
+ if (!b->level)
+ return -EINTR;
+ else
+ WARN(1, "insufficient reserve for split\n");
+ }
+
+ n1 = btree_node_alloc_replacement(b, op);
+ if (IS_ERR(n1))
+ goto err;
+
+ split = set_blocks(btree_bset_first(n1),
+ block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
+
+ if (split) {
+ unsigned int keys = 0;
+
+ trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
+
+ n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
+ if (IS_ERR(n2))
+ goto err_free1;
+
+ if (!b->parent) {
+ n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
+ if (IS_ERR(n3))
+ goto err_free2;
+ }
+
+ mutex_lock(&n1->write_lock);
+ mutex_lock(&n2->write_lock);
+
+ bch_btree_insert_keys(n1, op, insert_keys, replace_key);
+
+ /*
+ * Has to be a linear search because we don't have an auxiliary
+ * search tree yet
+ */
+
+ while (keys < (btree_bset_first(n1)->keys * 3) / 5)
+ keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
+ keys));
+
+ bkey_copy_key(&n1->key,
+ bset_bkey_idx(btree_bset_first(n1), keys));
+ keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
+
+ btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
+ btree_bset_first(n1)->keys = keys;
+
+ memcpy(btree_bset_first(n2)->start,
+ bset_bkey_last(btree_bset_first(n1)),
+ btree_bset_first(n2)->keys * sizeof(uint64_t));
+
+ bkey_copy_key(&n2->key, &b->key);
+
+ bch_keylist_add(&parent_keys, &n2->key);
+ bch_btree_node_write(n2, &cl);
+ mutex_unlock(&n2->write_lock);
+ rw_unlock(true, n2);
+ } else {
+ trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
+
+ mutex_lock(&n1->write_lock);
+ bch_btree_insert_keys(n1, op, insert_keys, replace_key);
+ }
+
+ bch_keylist_add(&parent_keys, &n1->key);
+ bch_btree_node_write(n1, &cl);
+ mutex_unlock(&n1->write_lock);
+
+ if (n3) {
+ /* Depth increases, make a new root */
+ mutex_lock(&n3->write_lock);
+ bkey_copy_key(&n3->key, &MAX_KEY);
+ bch_btree_insert_keys(n3, op, &parent_keys, NULL);
+ bch_btree_node_write(n3, &cl);
+ mutex_unlock(&n3->write_lock);
+
+ closure_sync(&cl);
+ bch_btree_set_root(n3);
+ rw_unlock(true, n3);
+ } else if (!b->parent) {
+ /* Root filled up but didn't need to be split */
+ closure_sync(&cl);
+ bch_btree_set_root(n1);
+ } else {
+ /* Split a non root node */
+ closure_sync(&cl);
+ make_btree_freeing_key(b, parent_keys.top);
+ bch_keylist_push(&parent_keys);
+
+ bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
+ BUG_ON(!bch_keylist_empty(&parent_keys));
+ }
+
+ btree_node_free(b);
+ rw_unlock(true, n1);
+
+ bch_time_stats_update(&b->c->btree_split_time, start_time);
+
+ return 0;
+err_free2:
+ bkey_put(b->c, &n2->key);
+ btree_node_free(n2);
+ rw_unlock(true, n2);
+err_free1:
+ bkey_put(b->c, &n1->key);
+ btree_node_free(n1);
+ rw_unlock(true, n1);
+err:
+ WARN(1, "bcache: btree split failed (level %u)", b->level);
+
+ if (n3 == ERR_PTR(-EAGAIN) ||
+ n2 == ERR_PTR(-EAGAIN) ||
+ n1 == ERR_PTR(-EAGAIN))
+ return -EAGAIN;
+
+ return -ENOMEM;
+}
+
+static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
+ struct keylist *insert_keys,
+ atomic_t *journal_ref,
+ struct bkey *replace_key)
+{
+ struct closure cl;
+
+ BUG_ON(b->level && replace_key);
+
+ closure_init_stack(&cl);
+
+ mutex_lock(&b->write_lock);
+
+ if (write_block(b) != btree_bset_last(b) &&
+ b->keys.last_set_unwritten)
+ bch_btree_init_next(b); /* just wrote a set */
+
+ if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
+ mutex_unlock(&b->write_lock);
+ goto split;
+ }
+
+ BUG_ON(write_block(b) != btree_bset_last(b));
+
+ if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
+ if (!b->level)
+ bch_btree_leaf_dirty(b, journal_ref);
+ else
+ bch_btree_node_write(b, &cl);
+ }
+
+ mutex_unlock(&b->write_lock);
+
+ /* wait for btree node write if necessary, after unlock */
+ closure_sync(&cl);
+
+ return 0;
+split:
+ if (current->bio_list) {
+ op->lock = b->c->root->level + 1;
+ return -EAGAIN;
+ } else if (op->lock <= b->c->root->level) {
+ op->lock = b->c->root->level + 1;
+ return -EINTR;
+ } else {
+ /* Invalidated all iterators */
+ int ret = btree_split(b, op, insert_keys, replace_key);
+
+ if (bch_keylist_empty(insert_keys))
+ return 0;
+ else if (!ret)
+ return -EINTR;
+ return ret;
+ }
+}
+
+int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
+ struct bkey *check_key)
+{
+ int ret = -EINTR;
+ uint64_t btree_ptr = b->key.ptr[0];
+ unsigned long seq = b->seq;
+ struct keylist insert;
+ bool upgrade = op->lock == -1;
+
+ bch_keylist_init(&insert);
+
+ if (upgrade) {
+ rw_unlock(false, b);
+ rw_lock(true, b, b->level);
+
+ if (b->key.ptr[0] != btree_ptr ||
+ b->seq != seq + 1) {
+ op->lock = b->level;
+ goto out;
+ }
+ }
+
+ SET_KEY_PTRS(check_key, 1);
+ get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
+
+ SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
+
+ bch_keylist_add(&insert, check_key);
+
+ ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
+
+ BUG_ON(!ret && !bch_keylist_empty(&insert));
+out:
+ if (upgrade)
+ downgrade_write(&b->lock);
+ return ret;
+}
+
+struct btree_insert_op {
+ struct btree_op op;
+ struct keylist *keys;
+ atomic_t *journal_ref;
+ struct bkey *replace_key;
+};
+
+static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
+{
+ struct btree_insert_op *op = container_of(b_op,
+ struct btree_insert_op, op);
+
+ int ret = bch_btree_insert_node(b, &op->op, op->keys,
+ op->journal_ref, op->replace_key);
+ if (ret && !bch_keylist_empty(op->keys))
+ return ret;
+ else
+ return MAP_DONE;
+}
+
+int bch_btree_insert(struct cache_set *c, struct keylist *keys,
+ atomic_t *journal_ref, struct bkey *replace_key)
+{
+ struct btree_insert_op op;
+ int ret = 0;
+
+ BUG_ON(current->bio_list);
+ BUG_ON(bch_keylist_empty(keys));
+
+ bch_btree_op_init(&op.op, 0);
+ op.keys = keys;
+ op.journal_ref = journal_ref;
+ op.replace_key = replace_key;
+
+ while (!ret && !bch_keylist_empty(keys)) {
+ op.op.lock = 0;
+ ret = bch_btree_map_leaf_nodes(&op.op, c,
+ &START_KEY(keys->keys),
+ btree_insert_fn);
+ }
+
+ if (ret) {
+ struct bkey *k;
+
+ pr_err("error %i\n", ret);
+
+ while ((k = bch_keylist_pop(keys)))
+ bkey_put(c, k);
+ } else if (op.op.insert_collision)
+ ret = -ESRCH;
+
+ return ret;
+}
+
+void bch_btree_set_root(struct btree *b)
+{
+ unsigned int i;
+ struct closure cl;
+
+ closure_init_stack(&cl);
+
+ trace_bcache_btree_set_root(b);
+
+ BUG_ON(!b->written);
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
+
+ mutex_lock(&b->c->bucket_lock);
+ list_del_init(&b->list);
+ mutex_unlock(&b->c->bucket_lock);
+
+ b->c->root = b;
+
+ bch_journal_meta(b->c, &cl);
+ closure_sync(&cl);
+}
+
+/* Map across nodes or keys */
+
+static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
+ struct bkey *from,
+ btree_map_nodes_fn *fn, int flags)
+{
+ int ret = MAP_CONTINUE;
+
+ if (b->level) {
+ struct bkey *k;
+ struct btree_iter iter;
+
+ bch_btree_iter_init(&b->keys, &iter, from);
+
+ while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
+ bch_ptr_bad))) {
+ ret = bcache_btree(map_nodes_recurse, k, b,
+ op, from, fn, flags);
+ from = NULL;
+
+ if (ret != MAP_CONTINUE)
+ return ret;
+ }
+ }
+
+ if (!b->level || flags == MAP_ALL_NODES)
+ ret = fn(op, b);
+
+ return ret;
+}
+
+int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_nodes_fn *fn, int flags)
+{
+ return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags);
+}
+
+int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
+ struct bkey *from, btree_map_keys_fn *fn,
+ int flags)
+{
+ int ret = MAP_CONTINUE;
+ struct bkey *k;
+ struct btree_iter iter;
+
+ bch_btree_iter_init(&b->keys, &iter, from);
+
+ while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
+ ret = !b->level
+ ? fn(op, b, k)
+ : bcache_btree(map_keys_recurse, k,
+ b, op, from, fn, flags);
+ from = NULL;
+
+ if (ret != MAP_CONTINUE)
+ return ret;
+ }
+
+ if (!b->level && (flags & MAP_END_KEY))
+ ret = fn(op, b, &KEY(KEY_INODE(&b->key),
+ KEY_OFFSET(&b->key), 0));
+
+ return ret;
+}
+
+int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_keys_fn *fn, int flags)
+{
+ return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags);
+}
+
+/* Keybuf code */
+
+static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
+{
+ /* Overlapping keys compare equal */
+ if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
+ return -1;
+ if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
+ return 1;
+ return 0;
+}
+
+static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
+ struct keybuf_key *r)
+{
+ return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
+}
+
+struct refill {
+ struct btree_op op;
+ unsigned int nr_found;
+ struct keybuf *buf;
+ struct bkey *end;
+ keybuf_pred_fn *pred;
+};
+
+static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
+ struct bkey *k)
+{
+ struct refill *refill = container_of(op, struct refill, op);
+ struct keybuf *buf = refill->buf;
+ int ret = MAP_CONTINUE;
+
+ if (bkey_cmp(k, refill->end) > 0) {
+ ret = MAP_DONE;
+ goto out;
+ }
+
+ if (!KEY_SIZE(k)) /* end key */
+ goto out;
+
+ if (refill->pred(buf, k)) {
+ struct keybuf_key *w;
+
+ spin_lock(&buf->lock);
+
+ w = array_alloc(&buf->freelist);
+ if (!w) {
+ spin_unlock(&buf->lock);
+ return MAP_DONE;
+ }
+
+ w->private = NULL;
+ bkey_copy(&w->key, k);
+
+ if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
+ array_free(&buf->freelist, w);
+ else
+ refill->nr_found++;
+
+ if (array_freelist_empty(&buf->freelist))
+ ret = MAP_DONE;
+
+ spin_unlock(&buf->lock);
+ }
+out:
+ buf->last_scanned = *k;
+ return ret;
+}
+
+void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
+ struct bkey *end, keybuf_pred_fn *pred)
+{
+ struct bkey start = buf->last_scanned;
+ struct refill refill;
+
+ cond_resched();
+
+ bch_btree_op_init(&refill.op, -1);
+ refill.nr_found = 0;
+ refill.buf = buf;
+ refill.end = end;
+ refill.pred = pred;
+
+ bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
+ refill_keybuf_fn, MAP_END_KEY);
+
+ trace_bcache_keyscan(refill.nr_found,
+ KEY_INODE(&start), KEY_OFFSET(&start),
+ KEY_INODE(&buf->last_scanned),
+ KEY_OFFSET(&buf->last_scanned));
+
+ spin_lock(&buf->lock);
+
+ if (!RB_EMPTY_ROOT(&buf->keys)) {
+ struct keybuf_key *w;
+
+ w = RB_FIRST(&buf->keys, struct keybuf_key, node);
+ buf->start = START_KEY(&w->key);
+
+ w = RB_LAST(&buf->keys, struct keybuf_key, node);
+ buf->end = w->key;
+ } else {
+ buf->start = MAX_KEY;
+ buf->end = MAX_KEY;
+ }
+
+ spin_unlock(&buf->lock);
+}
+
+static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
+{
+ rb_erase(&w->node, &buf->keys);
+ array_free(&buf->freelist, w);
+}
+
+void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
+{
+ spin_lock(&buf->lock);
+ __bch_keybuf_del(buf, w);
+ spin_unlock(&buf->lock);
+}
+
+bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
+ struct bkey *end)
+{
+ bool ret = false;
+ struct keybuf_key *p, *w, s;
+
+ s.key = *start;
+
+ if (bkey_cmp(end, &buf->start) <= 0 ||
+ bkey_cmp(start, &buf->end) >= 0)
+ return false;
+
+ spin_lock(&buf->lock);
+ w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
+
+ while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
+ p = w;
+ w = RB_NEXT(w, node);
+
+ if (p->private)
+ ret = true;
+ else
+ __bch_keybuf_del(buf, p);
+ }
+
+ spin_unlock(&buf->lock);
+ return ret;
+}
+
+struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
+{
+ struct keybuf_key *w;
+
+ spin_lock(&buf->lock);
+
+ w = RB_FIRST(&buf->keys, struct keybuf_key, node);
+
+ while (w && w->private)
+ w = RB_NEXT(w, node);
+
+ if (w)
+ w->private = ERR_PTR(-EINTR);
+
+ spin_unlock(&buf->lock);
+ return w;
+}
+
+struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
+ struct keybuf *buf,
+ struct bkey *end,
+ keybuf_pred_fn *pred)
+{
+ struct keybuf_key *ret;
+
+ while (1) {
+ ret = bch_keybuf_next(buf);
+ if (ret)
+ break;
+
+ if (bkey_cmp(&buf->last_scanned, end) >= 0) {
+ pr_debug("scan finished\n");
+ break;
+ }
+
+ bch_refill_keybuf(c, buf, end, pred);
+ }
+
+ return ret;
+}
+
+void bch_keybuf_init(struct keybuf *buf)
+{
+ buf->last_scanned = MAX_KEY;
+ buf->keys = RB_ROOT;
+
+ spin_lock_init(&buf->lock);
+ array_allocator_init(&buf->freelist);
+}
+
+void bch_btree_exit(void)
+{
+ if (btree_io_wq)
+ destroy_workqueue(btree_io_wq);
+}
+
+int __init bch_btree_init(void)
+{
+ btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
+ if (!btree_io_wq)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
new file mode 100644
index 0000000000..45d64b5411
--- /dev/null
+++ b/drivers/md/bcache/btree.h
@@ -0,0 +1,417 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHE_BTREE_H
+#define _BCACHE_BTREE_H
+
+/*
+ * THE BTREE:
+ *
+ * At a high level, bcache's btree is relatively standard b+ tree. All keys and
+ * pointers are in the leaves; interior nodes only have pointers to the child
+ * nodes.
+ *
+ * In the interior nodes, a struct bkey always points to a child btree node, and
+ * the key is the highest key in the child node - except that the highest key in
+ * an interior node is always MAX_KEY. The size field refers to the size on disk
+ * of the child node - this would allow us to have variable sized btree nodes
+ * (handy for keeping the depth of the btree 1 by expanding just the root).
+ *
+ * Btree nodes are themselves log structured, but this is hidden fairly
+ * thoroughly. Btree nodes on disk will in practice have extents that overlap
+ * (because they were written at different times), but in memory we never have
+ * overlapping extents - when we read in a btree node from disk, the first thing
+ * we do is resort all the sets of keys with a mergesort, and in the same pass
+ * we check for overlapping extents and adjust them appropriately.
+ *
+ * struct btree_op is a central interface to the btree code. It's used for
+ * specifying read vs. write locking, and the embedded closure is used for
+ * waiting on IO or reserve memory.
+ *
+ * BTREE CACHE:
+ *
+ * Btree nodes are cached in memory; traversing the btree might require reading
+ * in btree nodes which is handled mostly transparently.
+ *
+ * bch_btree_node_get() looks up a btree node in the cache and reads it in from
+ * disk if necessary. This function is almost never called directly though - the
+ * btree() macro is used to get a btree node, call some function on it, and
+ * unlock the node after the function returns.
+ *
+ * The root is special cased - it's taken out of the cache's lru (thus pinning
+ * it in memory), so we can find the root of the btree by just dereferencing a
+ * pointer instead of looking it up in the cache. This makes locking a bit
+ * tricky, since the root pointer is protected by the lock in the btree node it
+ * points to - the btree_root() macro handles this.
+ *
+ * In various places we must be able to allocate memory for multiple btree nodes
+ * in order to make forward progress. To do this we use the btree cache itself
+ * as a reserve; if __get_free_pages() fails, we'll find a node in the btree
+ * cache we can reuse. We can't allow more than one thread to be doing this at a
+ * time, so there's a lock, implemented by a pointer to the btree_op closure -
+ * this allows the btree_root() macro to implicitly release this lock.
+ *
+ * BTREE IO:
+ *
+ * Btree nodes never have to be explicitly read in; bch_btree_node_get() handles
+ * this.
+ *
+ * For writing, we have two btree_write structs embeddded in struct btree - one
+ * write in flight, and one being set up, and we toggle between them.
+ *
+ * Writing is done with a single function - bch_btree_write() really serves two
+ * different purposes and should be broken up into two different functions. When
+ * passing now = false, it merely indicates that the node is now dirty - calling
+ * it ensures that the dirty keys will be written at some point in the future.
+ *
+ * When passing now = true, bch_btree_write() causes a write to happen
+ * "immediately" (if there was already a write in flight, it'll cause the write
+ * to happen as soon as the previous write completes). It returns immediately
+ * though - but it takes a refcount on the closure in struct btree_op you passed
+ * to it, so a closure_sync() later can be used to wait for the write to
+ * complete.
+ *
+ * This is handy because btree_split() and garbage collection can issue writes
+ * in parallel, reducing the amount of time they have to hold write locks.
+ *
+ * LOCKING:
+ *
+ * When traversing the btree, we may need write locks starting at some level -
+ * inserting a key into the btree will typically only require a write lock on
+ * the leaf node.
+ *
+ * This is specified with the lock field in struct btree_op; lock = 0 means we
+ * take write locks at level <= 0, i.e. only leaf nodes. bch_btree_node_get()
+ * checks this field and returns the node with the appropriate lock held.
+ *
+ * If, after traversing the btree, the insertion code discovers it has to split
+ * then it must restart from the root and take new locks - to do this it changes
+ * the lock field and returns -EINTR, which causes the btree_root() macro to
+ * loop.
+ *
+ * Handling cache misses require a different mechanism for upgrading to a write
+ * lock. We do cache lookups with only a read lock held, but if we get a cache
+ * miss and we wish to insert this data into the cache, we have to insert a
+ * placeholder key to detect races - otherwise, we could race with a write and
+ * overwrite the data that was just written to the cache with stale data from
+ * the backing device.
+ *
+ * For this we use a sequence number that write locks and unlocks increment - to
+ * insert the check key it unlocks the btree node and then takes a write lock,
+ * and fails if the sequence number doesn't match.
+ */
+
+#include "bset.h"
+#include "debug.h"
+
+struct btree_write {
+ atomic_t *journal;
+
+ /* If btree_split() frees a btree node, it writes a new pointer to that
+ * btree node indicating it was freed; it takes a refcount on
+ * c->prio_blocked because we can't write the gens until the new
+ * pointer is on disk. This allows btree_write_endio() to release the
+ * refcount that btree_split() took.
+ */
+ int prio_blocked;
+};
+
+struct btree {
+ /* Hottest entries first */
+ struct hlist_node hash;
+
+ /* Key/pointer for this btree node */
+ BKEY_PADDED(key);
+
+ unsigned long seq;
+ struct rw_semaphore lock;
+ struct cache_set *c;
+ struct btree *parent;
+
+ struct mutex write_lock;
+
+ unsigned long flags;
+ uint16_t written; /* would be nice to kill */
+ uint8_t level;
+
+ struct btree_keys keys;
+
+ /* For outstanding btree writes, used as a lock - protects write_idx */
+ struct closure io;
+ struct semaphore io_mutex;
+
+ struct list_head list;
+ struct delayed_work work;
+
+ struct btree_write writes[2];
+ struct bio *bio;
+};
+
+
+
+
+#define BTREE_FLAG(flag) \
+static inline bool btree_node_ ## flag(struct btree *b) \
+{ return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
+ \
+static inline void set_btree_node_ ## flag(struct btree *b) \
+{ set_bit(BTREE_NODE_ ## flag, &b->flags); }
+
+enum btree_flags {
+ BTREE_NODE_io_error,
+ BTREE_NODE_dirty,
+ BTREE_NODE_write_idx,
+ BTREE_NODE_journal_flush,
+};
+
+BTREE_FLAG(io_error);
+BTREE_FLAG(dirty);
+BTREE_FLAG(write_idx);
+BTREE_FLAG(journal_flush);
+
+static inline struct btree_write *btree_current_write(struct btree *b)
+{
+ return b->writes + btree_node_write_idx(b);
+}
+
+static inline struct btree_write *btree_prev_write(struct btree *b)
+{
+ return b->writes + (btree_node_write_idx(b) ^ 1);
+}
+
+static inline struct bset *btree_bset_first(struct btree *b)
+{
+ return b->keys.set->data;
+}
+
+static inline struct bset *btree_bset_last(struct btree *b)
+{
+ return bset_tree_last(&b->keys)->data;
+}
+
+static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
+{
+ return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
+}
+
+static inline void set_gc_sectors(struct cache_set *c)
+{
+ atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16);
+}
+
+void bkey_put(struct cache_set *c, struct bkey *k);
+
+/* Looping macros */
+
+#define for_each_cached_btree(b, c, iter) \
+ for (iter = 0; \
+ iter < ARRAY_SIZE((c)->bucket_hash); \
+ iter++) \
+ hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
+
+/* Recursing down the btree */
+
+struct btree_op {
+ /* for waiting on btree reserve in btree_split() */
+ wait_queue_entry_t wait;
+
+ /* Btree level at which we start taking write locks */
+ short lock;
+
+ unsigned int insert_collision:1;
+};
+
+struct btree_check_state;
+struct btree_check_info {
+ struct btree_check_state *state;
+ struct task_struct *thread;
+ int result;
+};
+
+#define BCH_BTR_CHKTHREAD_MAX 12
+struct btree_check_state {
+ struct cache_set *c;
+ int total_threads;
+ int key_idx;
+ spinlock_t idx_lock;
+ atomic_t started;
+ atomic_t enough;
+ wait_queue_head_t wait;
+ struct btree_check_info infos[BCH_BTR_CHKTHREAD_MAX];
+};
+
+static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
+{
+ memset(op, 0, sizeof(struct btree_op));
+ init_wait(&op->wait);
+ op->lock = write_lock_level;
+}
+
+static inline void rw_lock(bool w, struct btree *b, int level)
+{
+ w ? down_write(&b->lock)
+ : down_read(&b->lock);
+ if (w)
+ b->seq++;
+}
+
+static inline void rw_unlock(bool w, struct btree *b)
+{
+ if (w)
+ b->seq++;
+ (w ? up_write : up_read)(&b->lock);
+}
+
+void bch_btree_node_read_done(struct btree *b);
+void __bch_btree_node_write(struct btree *b, struct closure *parent);
+void bch_btree_node_write(struct btree *b, struct closure *parent);
+
+void bch_btree_set_root(struct btree *b);
+struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
+ int level, bool wait,
+ struct btree *parent);
+struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
+ struct bkey *k, int level, bool write,
+ struct btree *parent);
+
+int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
+ struct bkey *check_key);
+int bch_btree_insert(struct cache_set *c, struct keylist *keys,
+ atomic_t *journal_ref, struct bkey *replace_key);
+
+int bch_gc_thread_start(struct cache_set *c);
+void bch_initial_gc_finish(struct cache_set *c);
+void bch_moving_gc(struct cache_set *c);
+int bch_btree_check(struct cache_set *c);
+void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k);
+void bch_cannibalize_unlock(struct cache_set *c);
+
+static inline void wake_up_gc(struct cache_set *c)
+{
+ wake_up(&c->gc_wait);
+}
+
+static inline void force_wake_up_gc(struct cache_set *c)
+{
+ /*
+ * Garbage collection thread only works when sectors_to_gc < 0,
+ * calling wake_up_gc() won't start gc thread if sectors_to_gc is
+ * not a nagetive value.
+ * Therefore sectors_to_gc is set to -1 here, before waking up
+ * gc thread by calling wake_up_gc(). Then gc_should_run() will
+ * give a chance to permit gc thread to run. "Give a chance" means
+ * before going into gc_should_run(), there is still possibility
+ * that c->sectors_to_gc being set to other positive value. So
+ * this routine won't 100% make sure gc thread will be woken up
+ * to run.
+ */
+ atomic_set(&c->sectors_to_gc, -1);
+ wake_up_gc(c);
+}
+
+/*
+ * These macros are for recursing down the btree - they handle the details of
+ * locking and looking up nodes in the cache for you. They're best treated as
+ * mere syntax when reading code that uses them.
+ *
+ * op->lock determines whether we take a read or a write lock at a given depth.
+ * If you've got a read lock and find that you need a write lock (i.e. you're
+ * going to have to split), set op->lock and return -EINTR; btree_root() will
+ * call you again and you'll have the correct lock.
+ */
+
+/**
+ * btree - recurse down the btree on a specified key
+ * @fn: function to call, which will be passed the child node
+ * @key: key to recurse on
+ * @b: parent btree node
+ * @op: pointer to struct btree_op
+ */
+#define bcache_btree(fn, key, b, op, ...) \
+({ \
+ int _r, l = (b)->level - 1; \
+ bool _w = l <= (op)->lock; \
+ struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
+ _w, b); \
+ if (!IS_ERR(_child)) { \
+ _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
+ rw_unlock(_w, _child); \
+ } else \
+ _r = PTR_ERR(_child); \
+ _r; \
+})
+
+/**
+ * btree_root - call a function on the root of the btree
+ * @fn: function to call, which will be passed the child node
+ * @c: cache set
+ * @op: pointer to struct btree_op
+ */
+#define bcache_btree_root(fn, c, op, ...) \
+({ \
+ int _r = -EINTR; \
+ do { \
+ struct btree *_b = (c)->root; \
+ bool _w = insert_lock(op, _b); \
+ rw_lock(_w, _b, _b->level); \
+ if (_b == (c)->root && \
+ _w == insert_lock(op, _b)) { \
+ _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
+ } \
+ rw_unlock(_w, _b); \
+ bch_cannibalize_unlock(c); \
+ if (_r == -EINTR) \
+ schedule(); \
+ } while (_r == -EINTR); \
+ \
+ finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
+ _r; \
+})
+
+#define MAP_DONE 0
+#define MAP_CONTINUE 1
+
+#define MAP_ALL_NODES 0
+#define MAP_LEAF_NODES 1
+
+#define MAP_END_KEY 1
+
+typedef int (btree_map_nodes_fn)(struct btree_op *b_op, struct btree *b);
+int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_nodes_fn *fn, int flags);
+
+static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_nodes_fn *fn)
+{
+ return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES);
+}
+
+static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
+ struct cache_set *c,
+ struct bkey *from,
+ btree_map_nodes_fn *fn)
+{
+ return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
+}
+
+typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b,
+ struct bkey *k);
+int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_keys_fn *fn, int flags);
+int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
+ struct bkey *from, btree_map_keys_fn *fn,
+ int flags);
+
+typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k);
+
+void bch_keybuf_init(struct keybuf *buf);
+void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
+ struct bkey *end, keybuf_pred_fn *pred);
+bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
+ struct bkey *end);
+void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w);
+struct keybuf_key *bch_keybuf_next(struct keybuf *buf);
+struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
+ struct keybuf *buf,
+ struct bkey *end,
+ keybuf_pred_fn *pred);
+void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats);
+#endif
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
new file mode 100644
index 0000000000..d8d9394a6b
--- /dev/null
+++ b/drivers/md/bcache/closure.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Asynchronous refcounty things
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/sched/debug.h>
+
+#include "closure.h"
+
+static inline void closure_put_after_sub(struct closure *cl, int flags)
+{
+ int r = flags & CLOSURE_REMAINING_MASK;
+
+ BUG_ON(flags & CLOSURE_GUARD_MASK);
+ BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
+
+ if (!r) {
+ if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
+ atomic_set(&cl->remaining,
+ CLOSURE_REMAINING_INITIALIZER);
+ closure_queue(cl);
+ } else {
+ struct closure *parent = cl->parent;
+ closure_fn *destructor = cl->fn;
+
+ closure_debug_destroy(cl);
+
+ if (destructor)
+ destructor(cl);
+
+ if (parent)
+ closure_put(parent);
+ }
+ }
+}
+
+/* For clearing flags with the same atomic op as a put */
+void closure_sub(struct closure *cl, int v)
+{
+ closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
+}
+
+/*
+ * closure_put - decrement a closure's refcount
+ */
+void closure_put(struct closure *cl)
+{
+ closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
+}
+
+/*
+ * closure_wake_up - wake up all closures on a wait list, without memory barrier
+ */
+void __closure_wake_up(struct closure_waitlist *wait_list)
+{
+ struct llist_node *list;
+ struct closure *cl, *t;
+ struct llist_node *reverse = NULL;
+
+ list = llist_del_all(&wait_list->list);
+
+ /* We first reverse the list to preserve FIFO ordering and fairness */
+ reverse = llist_reverse_order(list);
+
+ /* Then do the wakeups */
+ llist_for_each_entry_safe(cl, t, reverse, list) {
+ closure_set_waiting(cl, 0);
+ closure_sub(cl, CLOSURE_WAITING + 1);
+ }
+}
+
+/**
+ * closure_wait - add a closure to a waitlist
+ * @waitlist: will own a ref on @cl, which will be released when
+ * closure_wake_up() is called on @waitlist.
+ * @cl: closure pointer.
+ *
+ */
+bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
+{
+ if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
+ return false;
+
+ closure_set_waiting(cl, _RET_IP_);
+ atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
+ llist_add(&cl->list, &waitlist->list);
+
+ return true;
+}
+
+struct closure_syncer {
+ struct task_struct *task;
+ int done;
+};
+
+static void closure_sync_fn(struct closure *cl)
+{
+ struct closure_syncer *s = cl->s;
+ struct task_struct *p;
+
+ rcu_read_lock();
+ p = READ_ONCE(s->task);
+ s->done = 1;
+ wake_up_process(p);
+ rcu_read_unlock();
+}
+
+void __sched __closure_sync(struct closure *cl)
+{
+ struct closure_syncer s = { .task = current };
+
+ cl->s = &s;
+ continue_at(cl, closure_sync_fn, NULL);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (s.done)
+ break;
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+}
+
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+
+static LIST_HEAD(closure_list);
+static DEFINE_SPINLOCK(closure_list_lock);
+
+void closure_debug_create(struct closure *cl)
+{
+ unsigned long flags;
+
+ BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
+ cl->magic = CLOSURE_MAGIC_ALIVE;
+
+ spin_lock_irqsave(&closure_list_lock, flags);
+ list_add(&cl->all, &closure_list);
+ spin_unlock_irqrestore(&closure_list_lock, flags);
+}
+
+void closure_debug_destroy(struct closure *cl)
+{
+ unsigned long flags;
+
+ BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
+ cl->magic = CLOSURE_MAGIC_DEAD;
+
+ spin_lock_irqsave(&closure_list_lock, flags);
+ list_del(&cl->all);
+ spin_unlock_irqrestore(&closure_list_lock, flags);
+}
+
+static struct dentry *closure_debug;
+
+static int debug_show(struct seq_file *f, void *data)
+{
+ struct closure *cl;
+
+ spin_lock_irq(&closure_list_lock);
+
+ list_for_each_entry(cl, &closure_list, all) {
+ int r = atomic_read(&cl->remaining);
+
+ seq_printf(f, "%p: %pS -> %pS p %p r %i ",
+ cl, (void *) cl->ip, cl->fn, cl->parent,
+ r & CLOSURE_REMAINING_MASK);
+
+ seq_printf(f, "%s%s\n",
+ test_bit(WORK_STRUCT_PENDING_BIT,
+ work_data_bits(&cl->work)) ? "Q" : "",
+ r & CLOSURE_RUNNING ? "R" : "");
+
+ if (r & CLOSURE_WAITING)
+ seq_printf(f, " W %pS\n",
+ (void *) cl->waiting_on);
+
+ seq_printf(f, "\n");
+ }
+
+ spin_unlock_irq(&closure_list_lock);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(debug);
+
+void __init closure_debug_init(void)
+{
+ if (!IS_ERR_OR_NULL(bcache_debug))
+ /*
+ * it is unnecessary to check return value of
+ * debugfs_create_file(), we should not care
+ * about this.
+ */
+ closure_debug = debugfs_create_file(
+ "closures", 0400, bcache_debug, NULL, &debug_fops);
+}
+#endif
+
+MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
new file mode 100644
index 0000000000..c88cdc4ae4
--- /dev/null
+++ b/drivers/md/bcache/closure.h
@@ -0,0 +1,378 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CLOSURE_H
+#define _LINUX_CLOSURE_H
+
+#include <linux/llist.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/workqueue.h>
+
+/*
+ * Closure is perhaps the most overused and abused term in computer science, but
+ * since I've been unable to come up with anything better you're stuck with it
+ * again.
+ *
+ * What are closures?
+ *
+ * They embed a refcount. The basic idea is they count "things that are in
+ * progress" - in flight bios, some other thread that's doing something else -
+ * anything you might want to wait on.
+ *
+ * The refcount may be manipulated with closure_get() and closure_put().
+ * closure_put() is where many of the interesting things happen, when it causes
+ * the refcount to go to 0.
+ *
+ * Closures can be used to wait on things both synchronously and asynchronously,
+ * and synchronous and asynchronous use can be mixed without restriction. To
+ * wait synchronously, use closure_sync() - you will sleep until your closure's
+ * refcount hits 1.
+ *
+ * To wait asynchronously, use
+ * continue_at(cl, next_function, workqueue);
+ *
+ * passing it, as you might expect, the function to run when nothing is pending
+ * and the workqueue to run that function out of.
+ *
+ * continue_at() also, critically, requires a 'return' immediately following the
+ * location where this macro is referenced, to return to the calling function.
+ * There's good reason for this.
+ *
+ * To use safely closures asynchronously, they must always have a refcount while
+ * they are running owned by the thread that is running them. Otherwise, suppose
+ * you submit some bios and wish to have a function run when they all complete:
+ *
+ * foo_endio(struct bio *bio)
+ * {
+ * closure_put(cl);
+ * }
+ *
+ * closure_init(cl);
+ *
+ * do_stuff();
+ * closure_get(cl);
+ * bio1->bi_endio = foo_endio;
+ * bio_submit(bio1);
+ *
+ * do_more_stuff();
+ * closure_get(cl);
+ * bio2->bi_endio = foo_endio;
+ * bio_submit(bio2);
+ *
+ * continue_at(cl, complete_some_read, system_wq);
+ *
+ * If closure's refcount started at 0, complete_some_read() could run before the
+ * second bio was submitted - which is almost always not what you want! More
+ * importantly, it wouldn't be possible to say whether the original thread or
+ * complete_some_read()'s thread owned the closure - and whatever state it was
+ * associated with!
+ *
+ * So, closure_init() initializes a closure's refcount to 1 - and when a
+ * closure_fn is run, the refcount will be reset to 1 first.
+ *
+ * Then, the rule is - if you got the refcount with closure_get(), release it
+ * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount
+ * on a closure because you called closure_init() or you were run out of a
+ * closure - _always_ use continue_at(). Doing so consistently will help
+ * eliminate an entire class of particularly pernicious races.
+ *
+ * Lastly, you might have a wait list dedicated to a specific event, and have no
+ * need for specifying the condition - you just want to wait until someone runs
+ * closure_wake_up() on the appropriate wait list. In that case, just use
+ * closure_wait(). It will return either true or false, depending on whether the
+ * closure was already on a wait list or not - a closure can only be on one wait
+ * list at a time.
+ *
+ * Parents:
+ *
+ * closure_init() takes two arguments - it takes the closure to initialize, and
+ * a (possibly null) parent.
+ *
+ * If parent is non null, the new closure will have a refcount for its lifetime;
+ * a closure is considered to be "finished" when its refcount hits 0 and the
+ * function to run is null. Hence
+ *
+ * continue_at(cl, NULL, NULL);
+ *
+ * returns up the (spaghetti) stack of closures, precisely like normal return
+ * returns up the C stack. continue_at() with non null fn is better thought of
+ * as doing a tail call.
+ *
+ * All this implies that a closure should typically be embedded in a particular
+ * struct (which its refcount will normally control the lifetime of), and that
+ * struct can very much be thought of as a stack frame.
+ */
+
+struct closure;
+struct closure_syncer;
+typedef void (closure_fn) (struct closure *);
+extern struct dentry *bcache_debug;
+
+struct closure_waitlist {
+ struct llist_head list;
+};
+
+enum closure_state {
+ /*
+ * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
+ * the thread that owns the closure, and cleared by the thread that's
+ * waking up the closure.
+ *
+ * The rest are for debugging and don't affect behaviour:
+ *
+ * CLOSURE_RUNNING: Set when a closure is running (i.e. by
+ * closure_init() and when closure_put() runs then next function), and
+ * must be cleared before remaining hits 0. Primarily to help guard
+ * against incorrect usage and accidentally transferring references.
+ * continue_at() and closure_return() clear it for you, if you're doing
+ * something unusual you can use closure_set_dead() which also helps
+ * annotate where references are being transferred.
+ */
+
+ CLOSURE_BITS_START = (1U << 26),
+ CLOSURE_DESTRUCTOR = (1U << 26),
+ CLOSURE_WAITING = (1U << 28),
+ CLOSURE_RUNNING = (1U << 30),
+};
+
+#define CLOSURE_GUARD_MASK \
+ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1)
+
+#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
+#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
+
+struct closure {
+ union {
+ struct {
+ struct workqueue_struct *wq;
+ struct closure_syncer *s;
+ struct llist_node list;
+ closure_fn *fn;
+ };
+ struct work_struct work;
+ };
+
+ struct closure *parent;
+
+ atomic_t remaining;
+
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+#define CLOSURE_MAGIC_DEAD 0xc054dead
+#define CLOSURE_MAGIC_ALIVE 0xc054a11e
+
+ unsigned int magic;
+ struct list_head all;
+ unsigned long ip;
+ unsigned long waiting_on;
+#endif
+};
+
+void closure_sub(struct closure *cl, int v);
+void closure_put(struct closure *cl);
+void __closure_wake_up(struct closure_waitlist *list);
+bool closure_wait(struct closure_waitlist *list, struct closure *cl);
+void __closure_sync(struct closure *cl);
+
+/**
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
+ *
+ * Sleeps until the refcount hits 1 - the thread that's running the closure owns
+ * the last refcount.
+ */
+static inline void closure_sync(struct closure *cl)
+{
+ if ((atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK) != 1)
+ __closure_sync(cl);
+}
+
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+
+void closure_debug_init(void);
+void closure_debug_create(struct closure *cl);
+void closure_debug_destroy(struct closure *cl);
+
+#else
+
+static inline void closure_debug_init(void) {}
+static inline void closure_debug_create(struct closure *cl) {}
+static inline void closure_debug_destroy(struct closure *cl) {}
+
+#endif
+
+static inline void closure_set_ip(struct closure *cl)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+ cl->ip = _THIS_IP_;
+#endif
+}
+
+static inline void closure_set_ret_ip(struct closure *cl)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+ cl->ip = _RET_IP_;
+#endif
+}
+
+static inline void closure_set_waiting(struct closure *cl, unsigned long f)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+ cl->waiting_on = f;
+#endif
+}
+
+static inline void closure_set_stopped(struct closure *cl)
+{
+ atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+}
+
+static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
+ struct workqueue_struct *wq)
+{
+ closure_set_ip(cl);
+ cl->fn = fn;
+ cl->wq = wq;
+ /* between atomic_dec() in closure_put() */
+ smp_mb__before_atomic();
+}
+
+static inline void closure_queue(struct closure *cl)
+{
+ struct workqueue_struct *wq = cl->wq;
+ /**
+ * Changes made to closure, work_struct, or a couple of other structs
+ * may cause work.func not pointing to the right location.
+ */
+ BUILD_BUG_ON(offsetof(struct closure, fn)
+ != offsetof(struct work_struct, func));
+ if (wq) {
+ INIT_WORK(&cl->work, cl->work.func);
+ BUG_ON(!queue_work(wq, &cl->work));
+ } else
+ cl->fn(cl);
+}
+
+/**
+ * closure_get - increment a closure's refcount
+ */
+static inline void closure_get(struct closure *cl)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+ BUG_ON((atomic_inc_return(&cl->remaining) &
+ CLOSURE_REMAINING_MASK) <= 1);
+#else
+ atomic_inc(&cl->remaining);
+#endif
+}
+
+/**
+ * closure_init - Initialize a closure, setting the refcount to 1
+ * @cl: closure to initialize
+ * @parent: parent of the new closure. cl will take a refcount on it for its
+ * lifetime; may be NULL.
+ */
+static inline void closure_init(struct closure *cl, struct closure *parent)
+{
+ memset(cl, 0, sizeof(struct closure));
+ cl->parent = parent;
+ if (parent)
+ closure_get(parent);
+
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+
+ closure_debug_create(cl);
+ closure_set_ip(cl);
+}
+
+static inline void closure_init_stack(struct closure *cl)
+{
+ memset(cl, 0, sizeof(struct closure));
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+}
+
+/**
+ * closure_wake_up - wake up all closures on a wait list,
+ * with memory barrier
+ */
+static inline void closure_wake_up(struct closure_waitlist *list)
+{
+ /* Memory barrier for the wait list */
+ smp_mb();
+ __closure_wake_up(list);
+}
+
+/**
+ * continue_at - jump to another function with barrier
+ *
+ * After @cl is no longer waiting on anything (i.e. all outstanding refs have
+ * been dropped with closure_put()), it will resume execution at @fn running out
+ * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
+ *
+ * This is because after calling continue_at() you no longer have a ref on @cl,
+ * and whatever @cl owns may be freed out from under you - a running closure fn
+ * has a ref on its own closure which continue_at() drops.
+ *
+ * Note you are expected to immediately return after using this macro.
+ */
+#define continue_at(_cl, _fn, _wq) \
+do { \
+ set_closure_fn(_cl, _fn, _wq); \
+ closure_sub(_cl, CLOSURE_RUNNING + 1); \
+} while (0)
+
+/**
+ * closure_return - finish execution of a closure
+ *
+ * This is used to indicate that @cl is finished: when all outstanding refs on
+ * @cl have been dropped @cl's ref on its parent closure (as passed to
+ * closure_init()) will be dropped, if one was specified - thus this can be
+ * thought of as returning to the parent closure.
+ */
+#define closure_return(_cl) continue_at((_cl), NULL, NULL)
+
+/**
+ * continue_at_nobarrier - jump to another function without barrier
+ *
+ * Causes @fn to be executed out of @cl, in @wq context (or called directly if
+ * @wq is NULL).
+ *
+ * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
+ * thus it's not safe to touch anything protected by @cl after a
+ * continue_at_nobarrier().
+ */
+#define continue_at_nobarrier(_cl, _fn, _wq) \
+do { \
+ set_closure_fn(_cl, _fn, _wq); \
+ closure_queue(_cl); \
+} while (0)
+
+/**
+ * closure_return_with_destructor - finish execution of a closure,
+ * with destructor
+ *
+ * Works like closure_return(), except @destructor will be called when all
+ * outstanding refs on @cl have been dropped; @destructor may be used to safely
+ * free the memory occupied by @cl, and it is called with the ref on the parent
+ * closure still held - so @destructor could safely return an item to a
+ * freelist protected by @cl's parent.
+ */
+#define closure_return_with_destructor(_cl, _destructor) \
+do { \
+ set_closure_fn(_cl, _destructor, NULL); \
+ closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
+} while (0)
+
+/**
+ * closure_call - execute @fn out of a new, uninitialized closure
+ *
+ * Typically used when running out of one closure, and we want to run @fn
+ * asynchronously out of a new closure - @parent will then wait for @cl to
+ * finish.
+ */
+static inline void closure_call(struct closure *cl, closure_fn fn,
+ struct workqueue_struct *wq,
+ struct closure *parent)
+{
+ closure_init(cl, parent);
+ continue_at_nobarrier(cl, fn, wq);
+}
+
+#endif /* _LINUX_CLOSURE_H */
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
new file mode 100644
index 0000000000..7510d1c983
--- /dev/null
+++ b/drivers/md/bcache/debug.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Assorted bcache debug code
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "extents.h"
+
+#include <linux/console.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/seq_file.h>
+
+struct dentry *bcache_debug;
+
+#ifdef CONFIG_BCACHE_DEBUG
+
+#define for_each_written_bset(b, start, i) \
+ for (i = (start); \
+ (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
+ i->seq == (start)->seq; \
+ i = (void *) i + set_blocks(i, block_bytes(b->c->cache)) * \
+ block_bytes(b->c->cache))
+
+void bch_btree_verify(struct btree *b)
+{
+ struct btree *v = b->c->verify_data;
+ struct bset *ondisk, *sorted, *inmemory;
+ struct bio *bio;
+
+ if (!b->c->verify || !b->c->verify_ondisk)
+ return;
+
+ down(&b->io_mutex);
+ mutex_lock(&b->c->verify_lock);
+
+ ondisk = b->c->verify_ondisk;
+ sorted = b->c->verify_data->keys.set->data;
+ inmemory = b->keys.set->data;
+
+ bkey_copy(&v->key, &b->key);
+ v->written = 0;
+ v->level = b->level;
+ v->keys.ops = b->keys.ops;
+
+ bio = bch_bbio_alloc(b->c);
+ bio_set_dev(bio, b->c->cache->bdev);
+ bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
+ bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
+ bio->bi_opf = REQ_OP_READ | REQ_META;
+ bch_bio_map(bio, sorted);
+
+ submit_bio_wait(bio);
+ bch_bbio_free(bio, b->c);
+
+ memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
+
+ bch_btree_node_read_done(v);
+ sorted = v->keys.set->data;
+
+ if (inmemory->keys != sorted->keys ||
+ memcmp(inmemory->start,
+ sorted->start,
+ (void *) bset_bkey_last(inmemory) -
+ (void *) inmemory->start)) {
+ struct bset *i;
+ unsigned int j;
+
+ console_lock();
+
+ pr_err("*** in memory:\n");
+ bch_dump_bset(&b->keys, inmemory, 0);
+
+ pr_err("*** read back in:\n");
+ bch_dump_bset(&v->keys, sorted, 0);
+
+ for_each_written_bset(b, ondisk, i) {
+ unsigned int block = ((void *) i - (void *) ondisk) /
+ block_bytes(b->c->cache);
+
+ pr_err("*** on disk block %u:\n", block);
+ bch_dump_bset(&b->keys, i, block);
+ }
+
+ pr_err("*** block %zu not written\n",
+ ((void *) i - (void *) ondisk) / block_bytes(b->c->cache));
+
+ for (j = 0; j < inmemory->keys; j++)
+ if (inmemory->d[j] != sorted->d[j])
+ break;
+
+ pr_err("b->written %u\n", b->written);
+
+ console_unlock();
+ panic("verify failed at %u\n", j);
+ }
+
+ mutex_unlock(&b->c->verify_lock);
+ up(&b->io_mutex);
+}
+
+void bch_data_verify(struct cached_dev *dc, struct bio *bio)
+{
+ unsigned int nr_segs = bio_segments(bio);
+ struct bio *check;
+ struct bio_vec bv, cbv;
+ struct bvec_iter iter, citer = { 0 };
+
+ check = bio_kmalloc(nr_segs, GFP_NOIO);
+ if (!check)
+ return;
+ bio_init(check, bio->bi_bdev, check->bi_inline_vecs, nr_segs,
+ REQ_OP_READ);
+ check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
+ check->bi_iter.bi_size = bio->bi_iter.bi_size;
+
+ bch_bio_map(check, NULL);
+ if (bch_bio_alloc_pages(check, GFP_NOIO))
+ goto out_put;
+
+ submit_bio_wait(check);
+
+ citer.bi_size = UINT_MAX;
+ bio_for_each_segment(bv, bio, iter) {
+ void *p1 = bvec_kmap_local(&bv);
+ void *p2;
+
+ cbv = bio_iter_iovec(check, citer);
+ p2 = bvec_kmap_local(&cbv);
+
+ cache_set_err_on(memcmp(p1, p2, bv.bv_len),
+ dc->disk.c,
+ "verify failed at dev %pg sector %llu",
+ dc->bdev,
+ (uint64_t) bio->bi_iter.bi_sector);
+
+ kunmap_local(p2);
+ kunmap_local(p1);
+ bio_advance_iter(check, &citer, bv.bv_len);
+ }
+
+ bio_free_pages(check);
+out_put:
+ bio_uninit(check);
+ kfree(check);
+}
+
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+/* XXX: cache set refcounting */
+
+struct dump_iterator {
+ char buf[PAGE_SIZE];
+ size_t bytes;
+ struct cache_set *c;
+ struct keybuf keys;
+};
+
+static bool dump_pred(struct keybuf *buf, struct bkey *k)
+{
+ return true;
+}
+
+static ssize_t bch_dump_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iterator *i = file->private_data;
+ ssize_t ret = 0;
+ char kbuf[80];
+
+ while (size) {
+ struct keybuf_key *w;
+ unsigned int bytes = min(i->bytes, size);
+
+ if (copy_to_user(buf, i->buf, bytes))
+ return -EFAULT;
+
+ ret += bytes;
+ buf += bytes;
+ size -= bytes;
+ i->bytes -= bytes;
+ memmove(i->buf, i->buf + bytes, i->bytes);
+
+ if (i->bytes)
+ break;
+
+ w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY, dump_pred);
+ if (!w)
+ break;
+
+ bch_extent_to_text(kbuf, sizeof(kbuf), &w->key);
+ i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
+ bch_keybuf_del(&i->keys, w);
+ }
+
+ return ret;
+}
+
+static int bch_dump_open(struct inode *inode, struct file *file)
+{
+ struct cache_set *c = inode->i_private;
+ struct dump_iterator *i;
+
+ i = kzalloc(sizeof(struct dump_iterator), GFP_KERNEL);
+ if (!i)
+ return -ENOMEM;
+
+ file->private_data = i;
+ i->c = c;
+ bch_keybuf_init(&i->keys);
+ i->keys.last_scanned = KEY(0, 0, 0);
+
+ return 0;
+}
+
+static int bch_dump_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations cache_set_debug_ops = {
+ .owner = THIS_MODULE,
+ .open = bch_dump_open,
+ .read = bch_dump_read,
+ .release = bch_dump_release
+};
+
+void bch_debug_init_cache_set(struct cache_set *c)
+{
+ if (!IS_ERR_OR_NULL(bcache_debug)) {
+ char name[50];
+
+ snprintf(name, 50, "bcache-%pU", c->set_uuid);
+ c->debug = debugfs_create_file(name, 0400, bcache_debug, c,
+ &cache_set_debug_ops);
+ }
+}
+
+#endif
+
+void bch_debug_exit(void)
+{
+ debugfs_remove_recursive(bcache_debug);
+}
+
+void __init bch_debug_init(void)
+{
+ /*
+ * it is unnecessary to check return value of
+ * debugfs_create_file(), we should not care
+ * about this.
+ */
+ bcache_debug = debugfs_create_dir("bcache", NULL);
+}
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
new file mode 100644
index 0000000000..fb3d4dff4b
--- /dev/null
+++ b/drivers/md/bcache/debug.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHE_DEBUG_H
+#define _BCACHE_DEBUG_H
+
+struct bio;
+struct cached_dev;
+struct cache_set;
+
+#ifdef CONFIG_BCACHE_DEBUG
+
+void bch_btree_verify(struct btree *b);
+void bch_data_verify(struct cached_dev *dc, struct bio *bio);
+
+#define expensive_debug_checks(c) ((c)->expensive_debug_checks)
+#define key_merging_disabled(c) ((c)->key_merging_disabled)
+#define bypass_torture_test(d) ((d)->bypass_torture_test)
+
+#else /* DEBUG */
+
+static inline void bch_btree_verify(struct btree *b) {}
+static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
+
+#define expensive_debug_checks(c) 0
+#define key_merging_disabled(c) 0
+#define bypass_torture_test(d) 0
+
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void bch_debug_init_cache_set(struct cache_set *c);
+#else
+static inline void bch_debug_init_cache_set(struct cache_set *c) {}
+#endif
+
+#endif
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
new file mode 100644
index 0000000000..d626ffcbec
--- /dev/null
+++ b/drivers/md/bcache/extents.c
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
+ *
+ * Uses a block device as cache for other block devices; optimized for SSDs.
+ * All allocation is done in buckets, which should match the erase block size
+ * of the device.
+ *
+ * Buckets containing cached data are kept on a heap sorted by priority;
+ * bucket priority is increased on cache hit, and periodically all the buckets
+ * on the heap have their priority scaled down. This currently is just used as
+ * an LRU but in the future should allow for more intelligent heuristics.
+ *
+ * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
+ * counter. Garbage collection is used to remove stale pointers.
+ *
+ * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
+ * as keys are inserted we only sort the pages that have not yet been written.
+ * When garbage collection is run, we resort the entire node.
+ *
+ * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "extents.h"
+#include "writeback.h"
+
+static void sort_key_next(struct btree_iter *iter,
+ struct btree_iter_set *i)
+{
+ i->k = bkey_next(i->k);
+
+ if (i->k == i->end)
+ *i = iter->data[--iter->used];
+}
+
+static bool bch_key_sort_cmp(struct btree_iter_set l,
+ struct btree_iter_set r)
+{
+ int64_t c = bkey_cmp(l.k, r.k);
+
+ return c ? c > 0 : l.k < r.k;
+}
+
+static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+ unsigned int i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i)) {
+ struct cache *ca = c->cache;
+ size_t bucket = PTR_BUCKET_NR(c, k, i);
+ size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+ if (KEY_SIZE(k) + r > c->cache->sb.bucket_size ||
+ bucket < ca->sb.first_bucket ||
+ bucket >= ca->sb.nbuckets)
+ return true;
+ }
+
+ return false;
+}
+
+/* Common among btree and extent ptrs */
+
+static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
+{
+ unsigned int i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i)) {
+ struct cache *ca = c->cache;
+ size_t bucket = PTR_BUCKET_NR(c, k, i);
+ size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+ if (KEY_SIZE(k) + r > c->cache->sb.bucket_size)
+ return "bad, length too big";
+ if (bucket < ca->sb.first_bucket)
+ return "bad, short offset";
+ if (bucket >= ca->sb.nbuckets)
+ return "bad, offset past end of device";
+ if (ptr_stale(c, k, i))
+ return "stale";
+ }
+
+ if (!bkey_cmp(k, &ZERO_KEY))
+ return "bad, null key";
+ if (!KEY_PTRS(k))
+ return "bad, no pointers";
+ if (!KEY_SIZE(k))
+ return "zeroed key";
+ return "";
+}
+
+void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
+{
+ unsigned int i = 0;
+ char *out = buf, *end = buf + size;
+
+#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
+
+ p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ if (i)
+ p(", ");
+
+ if (PTR_DEV(k, i) == PTR_CHECK_DEV)
+ p("check dev");
+ else
+ p("%llu:%llu gen %llu", PTR_DEV(k, i),
+ PTR_OFFSET(k, i), PTR_GEN(k, i));
+ }
+
+ p("]");
+
+ if (KEY_DIRTY(k))
+ p(" dirty");
+ if (KEY_CSUM(k))
+ p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
+#undef p
+}
+
+static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
+{
+ struct btree *b = container_of(keys, struct btree, keys);
+ unsigned int j;
+ char buf[80];
+
+ bch_extent_to_text(buf, sizeof(buf), k);
+ pr_cont(" %s", buf);
+
+ for (j = 0; j < KEY_PTRS(k); j++) {
+ size_t n = PTR_BUCKET_NR(b->c, k, j);
+
+ pr_cont(" bucket %zu", n);
+ if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets)
+ pr_cont(" prio %i",
+ PTR_BUCKET(b->c, k, j)->prio);
+ }
+
+ pr_cont(" %s\n", bch_ptr_status(b->c, k));
+}
+
+/* Btree ptrs */
+
+bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+ char buf[80];
+
+ if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
+ goto bad;
+
+ if (__ptr_invalid(c, k))
+ goto bad;
+
+ return false;
+bad:
+ bch_extent_to_text(buf, sizeof(buf), k);
+ cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
+ return true;
+}
+
+static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+
+ return __bch_btree_ptr_invalid(b->c, k);
+}
+
+static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
+{
+ unsigned int i;
+ char buf[80];
+ struct bucket *g;
+
+ if (mutex_trylock(&b->c->bucket_lock)) {
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(b->c, k, i)) {
+ g = PTR_BUCKET(b->c, k, i);
+
+ if (KEY_DIRTY(k) ||
+ g->prio != BTREE_PRIO ||
+ (b->c->gc_mark_valid &&
+ GC_MARK(g) != GC_MARK_METADATA))
+ goto err;
+ }
+
+ mutex_unlock(&b->c->bucket_lock);
+ }
+
+ return false;
+err:
+ mutex_unlock(&b->c->bucket_lock);
+ bch_extent_to_text(buf, sizeof(buf), k);
+ btree_bug(b,
+"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu",
+ buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
+ g->prio, g->gen, g->last_gc, GC_MARK(g));
+ return true;
+}
+
+static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+ unsigned int i;
+
+ if (!bkey_cmp(k, &ZERO_KEY) ||
+ !KEY_PTRS(k) ||
+ bch_ptr_invalid(bk, k))
+ return true;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (!ptr_available(b->c, k, i) ||
+ ptr_stale(b->c, k, i))
+ return true;
+
+ if (expensive_debug_checks(b->c) &&
+ btree_ptr_bad_expensive(b, k))
+ return true;
+
+ return false;
+}
+
+static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
+ struct bkey *insert,
+ struct btree_iter *iter,
+ struct bkey *replace_key)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+
+ if (!KEY_OFFSET(insert))
+ btree_current_write(b)->prio_blocked++;
+
+ return false;
+}
+
+const struct btree_keys_ops bch_btree_keys_ops = {
+ .sort_cmp = bch_key_sort_cmp,
+ .insert_fixup = bch_btree_ptr_insert_fixup,
+ .key_invalid = bch_btree_ptr_invalid,
+ .key_bad = bch_btree_ptr_bad,
+ .key_to_text = bch_extent_to_text,
+ .key_dump = bch_bkey_dump,
+};
+
+/* Extents */
+
+/*
+ * Returns true if l > r - unless l == r, in which case returns true if l is
+ * older than r.
+ *
+ * Necessary for btree_sort_fixup() - if there are multiple keys that compare
+ * equal in different sets, we have to process them newest to oldest.
+ */
+static bool bch_extent_sort_cmp(struct btree_iter_set l,
+ struct btree_iter_set r)
+{
+ int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
+
+ return c ? c > 0 : l.k < r.k;
+}
+
+static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
+ struct bkey *tmp)
+{
+ while (iter->used > 1) {
+ struct btree_iter_set *top = iter->data, *i = top + 1;
+
+ if (iter->used > 2 &&
+ bch_extent_sort_cmp(i[0], i[1]))
+ i++;
+
+ if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
+ break;
+
+ if (!KEY_SIZE(i->k)) {
+ sort_key_next(iter, i);
+ heap_sift(iter, i - top, bch_extent_sort_cmp);
+ continue;
+ }
+
+ if (top->k > i->k) {
+ if (bkey_cmp(top->k, i->k) >= 0)
+ sort_key_next(iter, i);
+ else
+ bch_cut_front(top->k, i->k);
+
+ heap_sift(iter, i - top, bch_extent_sort_cmp);
+ } else {
+ /* can't happen because of comparison func */
+ BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
+
+ if (bkey_cmp(i->k, top->k) < 0) {
+ bkey_copy(tmp, top->k);
+
+ bch_cut_back(&START_KEY(i->k), tmp);
+ bch_cut_front(i->k, top->k);
+ heap_sift(iter, 0, bch_extent_sort_cmp);
+
+ return tmp;
+ } else {
+ bch_cut_back(&START_KEY(i->k), top->k);
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static void bch_subtract_dirty(struct bkey *k,
+ struct cache_set *c,
+ uint64_t offset,
+ int sectors)
+{
+ if (KEY_DIRTY(k))
+ bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
+ offset, -sectors);
+}
+
+static bool bch_extent_insert_fixup(struct btree_keys *b,
+ struct bkey *insert,
+ struct btree_iter *iter,
+ struct bkey *replace_key)
+{
+ struct cache_set *c = container_of(b, struct btree, keys)->c;
+
+ uint64_t old_offset;
+ unsigned int old_size, sectors_found = 0;
+
+ BUG_ON(!KEY_OFFSET(insert));
+ BUG_ON(!KEY_SIZE(insert));
+
+ while (1) {
+ struct bkey *k = bch_btree_iter_next(iter);
+
+ if (!k)
+ break;
+
+ if (bkey_cmp(&START_KEY(k), insert) >= 0) {
+ if (KEY_SIZE(k))
+ break;
+ else
+ continue;
+ }
+
+ if (bkey_cmp(k, &START_KEY(insert)) <= 0)
+ continue;
+
+ old_offset = KEY_START(k);
+ old_size = KEY_SIZE(k);
+
+ /*
+ * We might overlap with 0 size extents; we can't skip these
+ * because if they're in the set we're inserting to we have to
+ * adjust them so they don't overlap with the key we're
+ * inserting. But we don't want to check them for replace
+ * operations.
+ */
+
+ if (replace_key && KEY_SIZE(k)) {
+ /*
+ * k might have been split since we inserted/found the
+ * key we're replacing
+ */
+ unsigned int i;
+ uint64_t offset = KEY_START(k) -
+ KEY_START(replace_key);
+
+ /* But it must be a subset of the replace key */
+ if (KEY_START(k) < KEY_START(replace_key) ||
+ KEY_OFFSET(k) > KEY_OFFSET(replace_key))
+ goto check_failed;
+
+ /* We didn't find a key that we were supposed to */
+ if (KEY_START(k) > KEY_START(insert) + sectors_found)
+ goto check_failed;
+
+ if (!bch_bkey_equal_header(k, replace_key))
+ goto check_failed;
+
+ /* skip past gen */
+ offset <<= 8;
+
+ BUG_ON(!KEY_PTRS(replace_key));
+
+ for (i = 0; i < KEY_PTRS(replace_key); i++)
+ if (k->ptr[i] != replace_key->ptr[i] + offset)
+ goto check_failed;
+
+ sectors_found = KEY_OFFSET(k) - KEY_START(insert);
+ }
+
+ if (bkey_cmp(insert, k) < 0 &&
+ bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
+ /*
+ * We overlapped in the middle of an existing key: that
+ * means we have to split the old key. But we have to do
+ * slightly different things depending on whether the
+ * old key has been written out yet.
+ */
+
+ struct bkey *top;
+
+ bch_subtract_dirty(k, c, KEY_START(insert),
+ KEY_SIZE(insert));
+
+ if (bkey_written(b, k)) {
+ /*
+ * We insert a new key to cover the top of the
+ * old key, and the old key is modified in place
+ * to represent the bottom split.
+ *
+ * It's completely arbitrary whether the new key
+ * is the top or the bottom, but it has to match
+ * up with what btree_sort_fixup() does - it
+ * doesn't check for this kind of overlap, it
+ * depends on us inserting a new key for the top
+ * here.
+ */
+ top = bch_bset_search(b, bset_tree_last(b),
+ insert);
+ bch_bset_insert(b, top, k);
+ } else {
+ BKEY_PADDED(key) temp;
+ bkey_copy(&temp.key, k);
+ bch_bset_insert(b, k, &temp.key);
+ top = bkey_next(k);
+ }
+
+ bch_cut_front(insert, top);
+ bch_cut_back(&START_KEY(insert), k);
+ bch_bset_fix_invalidated_key(b, k);
+ goto out;
+ }
+
+ if (bkey_cmp(insert, k) < 0) {
+ bch_cut_front(insert, k);
+ } else {
+ if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
+ old_offset = KEY_START(insert);
+
+ if (bkey_written(b, k) &&
+ bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
+ /*
+ * Completely overwrote, so we don't have to
+ * invalidate the binary search tree
+ */
+ bch_cut_front(k, k);
+ } else {
+ __bch_cut_back(&START_KEY(insert), k);
+ bch_bset_fix_invalidated_key(b, k);
+ }
+ }
+
+ bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k));
+ }
+
+check_failed:
+ if (replace_key) {
+ if (!sectors_found) {
+ return true;
+ } else if (sectors_found < KEY_SIZE(insert)) {
+ SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
+ (KEY_SIZE(insert) - sectors_found));
+ SET_KEY_SIZE(insert, sectors_found);
+ }
+ }
+out:
+ if (KEY_DIRTY(insert))
+ bcache_dev_sectors_dirty_add(c, KEY_INODE(insert),
+ KEY_START(insert),
+ KEY_SIZE(insert));
+
+ return false;
+}
+
+bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k)
+{
+ char buf[80];
+
+ if (!KEY_SIZE(k))
+ return true;
+
+ if (KEY_SIZE(k) > KEY_OFFSET(k))
+ goto bad;
+
+ if (__ptr_invalid(c, k))
+ goto bad;
+
+ return false;
+bad:
+ bch_extent_to_text(buf, sizeof(buf), k);
+ cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
+ return true;
+}
+
+static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+
+ return __bch_extent_invalid(b->c, k);
+}
+
+static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
+ unsigned int ptr)
+{
+ struct bucket *g = PTR_BUCKET(b->c, k, ptr);
+ char buf[80];
+
+ if (mutex_trylock(&b->c->bucket_lock)) {
+ if (b->c->gc_mark_valid &&
+ (!GC_MARK(g) ||
+ GC_MARK(g) == GC_MARK_METADATA ||
+ (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k))))
+ goto err;
+
+ if (g->prio == BTREE_PRIO)
+ goto err;
+
+ mutex_unlock(&b->c->bucket_lock);
+ }
+
+ return false;
+err:
+ mutex_unlock(&b->c->bucket_lock);
+ bch_extent_to_text(buf, sizeof(buf), k);
+ btree_bug(b,
+"inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu",
+ buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
+ g->prio, g->gen, g->last_gc, GC_MARK(g));
+ return true;
+}
+
+static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+ unsigned int i, stale;
+ char buf[80];
+
+ if (!KEY_PTRS(k) ||
+ bch_extent_invalid(bk, k))
+ return true;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (!ptr_available(b->c, k, i))
+ return true;
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ stale = ptr_stale(b->c, k, i);
+
+ if (stale && KEY_DIRTY(k)) {
+ bch_extent_to_text(buf, sizeof(buf), k);
+ pr_info("stale dirty pointer, stale %u, key: %s\n",
+ stale, buf);
+ }
+
+ btree_bug_on(stale > BUCKET_GC_GEN_MAX, b,
+ "key too stale: %i, need_gc %u",
+ stale, b->c->need_gc);
+
+ if (stale)
+ return true;
+
+ if (expensive_debug_checks(b->c) &&
+ bch_extent_bad_expensive(b, k, i))
+ return true;
+ }
+
+ return false;
+}
+
+static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
+{
+ return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
+ ~((uint64_t)1 << 63);
+}
+
+static bool bch_extent_merge(struct btree_keys *bk,
+ struct bkey *l,
+ struct bkey *r)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+ unsigned int i;
+
+ if (key_merging_disabled(b->c))
+ return false;
+
+ for (i = 0; i < KEY_PTRS(l); i++)
+ if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+ PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
+ return false;
+
+ /* Keys with no pointers aren't restricted to one bucket and could
+ * overflow KEY_SIZE
+ */
+ if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
+ SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
+ SET_KEY_SIZE(l, USHRT_MAX);
+
+ bch_cut_front(l, r);
+ return false;
+ }
+
+ if (KEY_CSUM(l)) {
+ if (KEY_CSUM(r))
+ l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
+ else
+ SET_KEY_CSUM(l, 0);
+ }
+
+ SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
+ SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
+
+ return true;
+}
+
+const struct btree_keys_ops bch_extent_keys_ops = {
+ .sort_cmp = bch_extent_sort_cmp,
+ .sort_fixup = bch_extent_sort_fixup,
+ .insert_fixup = bch_extent_insert_fixup,
+ .key_invalid = bch_extent_invalid,
+ .key_bad = bch_extent_bad,
+ .key_merge = bch_extent_merge,
+ .key_to_text = bch_extent_to_text,
+ .key_dump = bch_bkey_dump,
+ .is_extents = true,
+};
diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h
new file mode 100644
index 0000000000..4d667e05bb
--- /dev/null
+++ b/drivers/md/bcache/extents.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHE_EXTENTS_H
+#define _BCACHE_EXTENTS_H
+
+extern const struct btree_keys_ops bch_btree_keys_ops;
+extern const struct btree_keys_ops bch_extent_keys_ops;
+
+struct bkey;
+struct cache_set;
+
+void bch_extent_to_text(char *buf, size_t size, const struct bkey *k);
+bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k);
+bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k);
+
+#endif /* _BCACHE_EXTENTS_H */
diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
new file mode 100644
index 0000000000..634922c560
--- /dev/null
+++ b/drivers/md/bcache/features.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Feature set bits and string conversion.
+ * Inspired by ext4's features compat/incompat/ro_compat related code.
+ *
+ * Copyright 2020 Coly Li <colyli@suse.de>
+ *
+ */
+#include "bcache_ondisk.h"
+#include "bcache.h"
+#include "features.h"
+
+struct feature {
+ int compat;
+ unsigned int mask;
+ const char *string;
+};
+
+static struct feature feature_list[] = {
+ {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE,
+ "large_bucket"},
+ {0, 0, NULL },
+};
+
+#define compose_feature_string(type) \
+({ \
+ struct feature *f; \
+ bool first = true; \
+ \
+ for (f = &feature_list[0]; f->compat != 0; f++) { \
+ if (f->compat != BCH_FEATURE_ ## type) \
+ continue; \
+ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) { \
+ if (first) { \
+ out += snprintf(out, buf + size - out, \
+ "["); \
+ } else { \
+ out += snprintf(out, buf + size - out, \
+ " ["); \
+ } \
+ } else if (!first) { \
+ out += snprintf(out, buf + size - out, " "); \
+ } \
+ \
+ out += snprintf(out, buf + size - out, "%s", f->string);\
+ \
+ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) \
+ out += snprintf(out, buf + size - out, "]"); \
+ \
+ first = false; \
+ } \
+ if (!first) \
+ out += snprintf(out, buf + size - out, "\n"); \
+})
+
+int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size)
+{
+ char *out = buf;
+ compose_feature_string(COMPAT);
+ return out - buf;
+}
+
+int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size)
+{
+ char *out = buf;
+ compose_feature_string(RO_COMPAT);
+ return out - buf;
+}
+
+int bch_print_cache_set_feature_incompat(struct cache_set *c, char *buf, int size)
+{
+ char *out = buf;
+ compose_feature_string(INCOMPAT);
+ return out - buf;
+}
diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h
new file mode 100644
index 0000000000..09161b89c6
--- /dev/null
+++ b/drivers/md/bcache/features.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _BCACHE_FEATURES_H
+#define _BCACHE_FEATURES_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "bcache_ondisk.h"
+
+#define BCH_FEATURE_COMPAT 0
+#define BCH_FEATURE_RO_COMPAT 1
+#define BCH_FEATURE_INCOMPAT 2
+#define BCH_FEATURE_TYPE_MASK 0x03
+
+/* Feature set definition */
+/* Incompat feature set */
+/* 32bit bucket size, obsoleted */
+#define BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET 0x0001
+/* real bucket size is (1 << bucket_size) */
+#define BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE 0x0002
+
+#define BCH_FEATURE_COMPAT_SUPP 0
+#define BCH_FEATURE_RO_COMPAT_SUPP 0
+#define BCH_FEATURE_INCOMPAT_SUPP (BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET| \
+ BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE)
+
+#define BCH_HAS_COMPAT_FEATURE(sb, mask) \
+ ((sb)->feature_compat & (mask))
+#define BCH_HAS_RO_COMPAT_FEATURE(sb, mask) \
+ ((sb)->feature_ro_compat & (mask))
+#define BCH_HAS_INCOMPAT_FEATURE(sb, mask) \
+ ((sb)->feature_incompat & (mask))
+
+#define BCH_FEATURE_COMPAT_FUNCS(name, flagname) \
+static inline int bch_has_feature_##name(struct cache_sb *sb) \
+{ \
+ if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+ return 0; \
+ return (((sb)->feature_compat & \
+ BCH##_FEATURE_COMPAT_##flagname) != 0); \
+} \
+static inline void bch_set_feature_##name(struct cache_sb *sb) \
+{ \
+ (sb)->feature_compat |= \
+ BCH##_FEATURE_COMPAT_##flagname; \
+} \
+static inline void bch_clear_feature_##name(struct cache_sb *sb) \
+{ \
+ (sb)->feature_compat &= \
+ ~BCH##_FEATURE_COMPAT_##flagname; \
+}
+
+#define BCH_FEATURE_RO_COMPAT_FUNCS(name, flagname) \
+static inline int bch_has_feature_##name(struct cache_sb *sb) \
+{ \
+ if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+ return 0; \
+ return (((sb)->feature_ro_compat & \
+ BCH##_FEATURE_RO_COMPAT_##flagname) != 0); \
+} \
+static inline void bch_set_feature_##name(struct cache_sb *sb) \
+{ \
+ (sb)->feature_ro_compat |= \
+ BCH##_FEATURE_RO_COMPAT_##flagname; \
+} \
+static inline void bch_clear_feature_##name(struct cache_sb *sb) \
+{ \
+ (sb)->feature_ro_compat &= \
+ ~BCH##_FEATURE_RO_COMPAT_##flagname; \
+}
+
+#define BCH_FEATURE_INCOMPAT_FUNCS(name, flagname) \
+static inline int bch_has_feature_##name(struct cache_sb *sb) \
+{ \
+ if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+ return 0; \
+ return (((sb)->feature_incompat & \
+ BCH##_FEATURE_INCOMPAT_##flagname) != 0); \
+} \
+static inline void bch_set_feature_##name(struct cache_sb *sb) \
+{ \
+ (sb)->feature_incompat |= \
+ BCH##_FEATURE_INCOMPAT_##flagname; \
+} \
+static inline void bch_clear_feature_##name(struct cache_sb *sb) \
+{ \
+ (sb)->feature_incompat &= \
+ ~BCH##_FEATURE_INCOMPAT_##flagname; \
+}
+
+BCH_FEATURE_INCOMPAT_FUNCS(obso_large_bucket, OBSO_LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LOG_LARGE_BUCKET_SIZE);
+
+static inline bool bch_has_unknown_compat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_compat & ~BCH_FEATURE_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_ro_compat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_ro_compat & ~BCH_FEATURE_RO_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_incompat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_incompat & ~BCH_FEATURE_INCOMPAT_SUPP) != 0);
+}
+
+int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size);
+int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size);
+int bch_print_cache_set_feature_incompat(struct cache_set *c, char *buf, int size);
+
+#endif
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
new file mode 100644
index 0000000000..020712c520
--- /dev/null
+++ b/drivers/md/bcache/io.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Some low level IO code, and hacks for various block layer limitations
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "bset.h"
+#include "debug.h"
+
+#include <linux/blkdev.h>
+
+/* Bios with headers */
+
+void bch_bbio_free(struct bio *bio, struct cache_set *c)
+{
+ struct bbio *b = container_of(bio, struct bbio, bio);
+
+ mempool_free(b, &c->bio_meta);
+}
+
+struct bio *bch_bbio_alloc(struct cache_set *c)
+{
+ struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
+ struct bio *bio = &b->bio;
+
+ bio_init(bio, NULL, bio->bi_inline_vecs,
+ meta_bucket_pages(&c->cache->sb), 0);
+
+ return bio;
+}
+
+void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
+{
+ struct bbio *b = container_of(bio, struct bbio, bio);
+
+ bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
+ bio_set_dev(bio, c->cache->bdev);
+
+ b->submit_time_us = local_clock_us();
+ closure_bio_submit(c, bio, bio->bi_private);
+}
+
+void bch_submit_bbio(struct bio *bio, struct cache_set *c,
+ struct bkey *k, unsigned int ptr)
+{
+ struct bbio *b = container_of(bio, struct bbio, bio);
+
+ bch_bkey_copy_single_ptr(&b->key, k, ptr);
+ __bch_submit_bbio(bio, c);
+}
+
+/* IO errors */
+void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
+{
+ unsigned int errors;
+
+ WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
+
+ /*
+ * Read-ahead requests on a degrading and recovering md raid
+ * (e.g. raid6) device might be failured immediately by md
+ * raid code, which is not a real hardware media failure. So
+ * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
+ */
+ if (bio->bi_opf & REQ_RAHEAD) {
+ pr_warn_ratelimited("%pg: Read-ahead I/O failed on backing device, ignore\n",
+ dc->bdev);
+ return;
+ }
+
+ errors = atomic_add_return(1, &dc->io_errors);
+ if (errors < dc->error_limit)
+ pr_err("%pg: IO error on backing device, unrecoverable\n",
+ dc->bdev);
+ else
+ bch_cached_dev_error(dc);
+}
+
+void bch_count_io_errors(struct cache *ca,
+ blk_status_t error,
+ int is_read,
+ const char *m)
+{
+ /*
+ * The halflife of an error is:
+ * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
+ */
+
+ if (ca->set->error_decay) {
+ unsigned int count = atomic_inc_return(&ca->io_count);
+
+ while (count > ca->set->error_decay) {
+ unsigned int errors;
+ unsigned int old = count;
+ unsigned int new = count - ca->set->error_decay;
+
+ /*
+ * First we subtract refresh from count; each time we
+ * successfully do so, we rescale the errors once:
+ */
+
+ count = atomic_cmpxchg(&ca->io_count, old, new);
+
+ if (count == old) {
+ count = new;
+
+ errors = atomic_read(&ca->io_errors);
+ do {
+ old = errors;
+ new = ((uint64_t) errors * 127) / 128;
+ errors = atomic_cmpxchg(&ca->io_errors,
+ old, new);
+ } while (old != errors);
+ }
+ }
+ }
+
+ if (error) {
+ unsigned int errors = atomic_add_return(1 << IO_ERROR_SHIFT,
+ &ca->io_errors);
+ errors >>= IO_ERROR_SHIFT;
+
+ if (errors < ca->set->error_limit)
+ pr_err("%pg: IO error on %s%s\n",
+ ca->bdev, m,
+ is_read ? ", recovering." : ".");
+ else
+ bch_cache_set_error(ca->set,
+ "%pg: too many IO errors %s\n",
+ ca->bdev, m);
+ }
+}
+
+void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
+ blk_status_t error, const char *m)
+{
+ struct bbio *b = container_of(bio, struct bbio, bio);
+ struct cache *ca = c->cache;
+ int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
+
+ unsigned int threshold = op_is_write(bio_op(bio))
+ ? c->congested_write_threshold_us
+ : c->congested_read_threshold_us;
+
+ if (threshold) {
+ unsigned int t = local_clock_us();
+ int us = t - b->submit_time_us;
+ int congested = atomic_read(&c->congested);
+
+ if (us > (int) threshold) {
+ int ms = us / 1024;
+
+ c->congested_last_us = t;
+
+ ms = min(ms, CONGESTED_MAX + congested);
+ atomic_sub(ms, &c->congested);
+ } else if (congested < 0)
+ atomic_inc(&c->congested);
+ }
+
+ bch_count_io_errors(ca, error, is_read, m);
+}
+
+void bch_bbio_endio(struct cache_set *c, struct bio *bio,
+ blk_status_t error, const char *m)
+{
+ struct closure *cl = bio->bi_private;
+
+ bch_bbio_count_io_errors(c, bio, error, m);
+ bio_put(bio);
+ closure_put(cl);
+}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
new file mode 100644
index 0000000000..c182c21de2
--- /dev/null
+++ b/drivers/md/bcache/journal.c
@@ -0,0 +1,1000 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bcache journalling code, for btree insertions
+ *
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "extents.h"
+
+#include <trace/events/bcache.h>
+
+/*
+ * Journal replay/recovery:
+ *
+ * This code is all driven from run_cache_set(); we first read the journal
+ * entries, do some other stuff, then we mark all the keys in the journal
+ * entries (same as garbage collection would), then we replay them - reinserting
+ * them into the cache in precisely the same order as they appear in the
+ * journal.
+ *
+ * We only journal keys that go in leaf nodes, which simplifies things quite a
+ * bit.
+ */
+
+static void journal_read_endio(struct bio *bio)
+{
+ struct closure *cl = bio->bi_private;
+
+ closure_put(cl);
+}
+
+static int journal_read_bucket(struct cache *ca, struct list_head *list,
+ unsigned int bucket_index)
+{
+ struct journal_device *ja = &ca->journal;
+ struct bio *bio = &ja->bio;
+
+ struct journal_replay *i;
+ struct jset *j, *data = ca->set->journal.w[0].data;
+ struct closure cl;
+ unsigned int len, left, offset = 0;
+ int ret = 0;
+ sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
+
+ closure_init_stack(&cl);
+
+ pr_debug("reading %u\n", bucket_index);
+
+ while (offset < ca->sb.bucket_size) {
+reread: left = ca->sb.bucket_size - offset;
+ len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
+
+ bio_reset(bio, ca->bdev, REQ_OP_READ);
+ bio->bi_iter.bi_sector = bucket + offset;
+ bio->bi_iter.bi_size = len << 9;
+
+ bio->bi_end_io = journal_read_endio;
+ bio->bi_private = &cl;
+ bch_bio_map(bio, data);
+
+ closure_bio_submit(ca->set, bio, &cl);
+ closure_sync(&cl);
+
+ /* This function could be simpler now since we no longer write
+ * journal entries that overlap bucket boundaries; this means
+ * the start of a bucket will always have a valid journal entry
+ * if it has any journal entries at all.
+ */
+
+ j = data;
+ while (len) {
+ struct list_head *where;
+ size_t blocks, bytes = set_bytes(j);
+
+ if (j->magic != jset_magic(&ca->sb)) {
+ pr_debug("%u: bad magic\n", bucket_index);
+ return ret;
+ }
+
+ if (bytes > left << 9 ||
+ bytes > PAGE_SIZE << JSET_BITS) {
+ pr_info("%u: too big, %zu bytes, offset %u\n",
+ bucket_index, bytes, offset);
+ return ret;
+ }
+
+ if (bytes > len << 9)
+ goto reread;
+
+ if (j->csum != csum_set(j)) {
+ pr_info("%u: bad csum, %zu bytes, offset %u\n",
+ bucket_index, bytes, offset);
+ return ret;
+ }
+
+ blocks = set_blocks(j, block_bytes(ca));
+
+ /*
+ * Nodes in 'list' are in linear increasing order of
+ * i->j.seq, the node on head has the smallest (oldest)
+ * journal seq, the node on tail has the biggest
+ * (latest) journal seq.
+ */
+
+ /*
+ * Check from the oldest jset for last_seq. If
+ * i->j.seq < j->last_seq, it means the oldest jset
+ * in list is expired and useless, remove it from
+ * this list. Otherwise, j is a candidate jset for
+ * further following checks.
+ */
+ while (!list_empty(list)) {
+ i = list_first_entry(list,
+ struct journal_replay, list);
+ if (i->j.seq >= j->last_seq)
+ break;
+ list_del(&i->list);
+ kfree(i);
+ }
+
+ /* iterate list in reverse order (from latest jset) */
+ list_for_each_entry_reverse(i, list, list) {
+ if (j->seq == i->j.seq)
+ goto next_set;
+
+ /*
+ * if j->seq is less than any i->j.last_seq
+ * in list, j is an expired and useless jset.
+ */
+ if (j->seq < i->j.last_seq)
+ goto next_set;
+
+ /*
+ * 'where' points to first jset in list which
+ * is elder then j.
+ */
+ if (j->seq > i->j.seq) {
+ where = &i->list;
+ goto add;
+ }
+ }
+
+ where = list;
+add:
+ i = kmalloc(offsetof(struct journal_replay, j) +
+ bytes, GFP_KERNEL);
+ if (!i)
+ return -ENOMEM;
+ unsafe_memcpy(&i->j, j, bytes,
+ /* "bytes" was calculated by set_bytes() above */);
+ /* Add to the location after 'where' points to */
+ list_add(&i->list, where);
+ ret = 1;
+
+ if (j->seq > ja->seq[bucket_index])
+ ja->seq[bucket_index] = j->seq;
+next_set:
+ offset += blocks * ca->sb.block_size;
+ len -= blocks * ca->sb.block_size;
+ j = ((void *) j) + blocks * block_bytes(ca);
+ }
+ }
+
+ return ret;
+}
+
+int bch_journal_read(struct cache_set *c, struct list_head *list)
+{
+#define read_bucket(b) \
+ ({ \
+ ret = journal_read_bucket(ca, list, b); \
+ __set_bit(b, bitmap); \
+ if (ret < 0) \
+ return ret; \
+ ret; \
+ })
+
+ struct cache *ca = c->cache;
+ int ret = 0;
+ struct journal_device *ja = &ca->journal;
+ DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
+ unsigned int i, l, r, m;
+ uint64_t seq;
+
+ bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
+ pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
+
+ /*
+ * Read journal buckets ordered by golden ratio hash to quickly
+ * find a sequence of buckets with valid journal entries
+ */
+ for (i = 0; i < ca->sb.njournal_buckets; i++) {
+ /*
+ * We must try the index l with ZERO first for
+ * correctness due to the scenario that the journal
+ * bucket is circular buffer which might have wrapped
+ */
+ l = (i * 2654435769U) % ca->sb.njournal_buckets;
+
+ if (test_bit(l, bitmap))
+ break;
+
+ if (read_bucket(l))
+ goto bsearch;
+ }
+
+ /*
+ * If that fails, check all the buckets we haven't checked
+ * already
+ */
+ pr_debug("falling back to linear search\n");
+
+ for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
+ if (read_bucket(l))
+ goto bsearch;
+
+ /* no journal entries on this device? */
+ if (l == ca->sb.njournal_buckets)
+ goto out;
+bsearch:
+ BUG_ON(list_empty(list));
+
+ /* Binary search */
+ m = l;
+ r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
+ pr_debug("starting binary search, l %u r %u\n", l, r);
+
+ while (l + 1 < r) {
+ seq = list_entry(list->prev, struct journal_replay,
+ list)->j.seq;
+
+ m = (l + r) >> 1;
+ read_bucket(m);
+
+ if (seq != list_entry(list->prev, struct journal_replay,
+ list)->j.seq)
+ l = m;
+ else
+ r = m;
+ }
+
+ /*
+ * Read buckets in reverse order until we stop finding more
+ * journal entries
+ */
+ pr_debug("finishing up: m %u njournal_buckets %u\n",
+ m, ca->sb.njournal_buckets);
+ l = m;
+
+ while (1) {
+ if (!l--)
+ l = ca->sb.njournal_buckets - 1;
+
+ if (l == m)
+ break;
+
+ if (test_bit(l, bitmap))
+ continue;
+
+ if (!read_bucket(l))
+ break;
+ }
+
+ seq = 0;
+
+ for (i = 0; i < ca->sb.njournal_buckets; i++)
+ if (ja->seq[i] > seq) {
+ seq = ja->seq[i];
+ /*
+ * When journal_reclaim() goes to allocate for
+ * the first time, it'll use the bucket after
+ * ja->cur_idx
+ */
+ ja->cur_idx = i;
+ ja->last_idx = ja->discard_idx = (i + 1) %
+ ca->sb.njournal_buckets;
+
+ }
+
+out:
+ if (!list_empty(list))
+ c->journal.seq = list_entry(list->prev,
+ struct journal_replay,
+ list)->j.seq;
+
+ return 0;
+#undef read_bucket
+}
+
+void bch_journal_mark(struct cache_set *c, struct list_head *list)
+{
+ atomic_t p = { 0 };
+ struct bkey *k;
+ struct journal_replay *i;
+ struct journal *j = &c->journal;
+ uint64_t last = j->seq;
+
+ /*
+ * journal.pin should never fill up - we never write a journal
+ * entry when it would fill up. But if for some reason it does, we
+ * iterate over the list in reverse order so that we can just skip that
+ * refcount instead of bugging.
+ */
+
+ list_for_each_entry_reverse(i, list, list) {
+ BUG_ON(last < i->j.seq);
+ i->pin = NULL;
+
+ while (last-- != i->j.seq)
+ if (fifo_free(&j->pin) > 1) {
+ fifo_push_front(&j->pin, p);
+ atomic_set(&fifo_front(&j->pin), 0);
+ }
+
+ if (fifo_free(&j->pin) > 1) {
+ fifo_push_front(&j->pin, p);
+ i->pin = &fifo_front(&j->pin);
+ atomic_set(i->pin, 1);
+ }
+
+ for (k = i->j.start;
+ k < bset_bkey_last(&i->j);
+ k = bkey_next(k))
+ if (!__bch_extent_invalid(c, k)) {
+ unsigned int j;
+
+ for (j = 0; j < KEY_PTRS(k); j++)
+ if (ptr_available(c, k, j))
+ atomic_inc(&PTR_BUCKET(c, k, j)->pin);
+
+ bch_initial_mark_key(c, 0, k);
+ }
+ }
+}
+
+static bool is_discard_enabled(struct cache_set *s)
+{
+ struct cache *ca = s->cache;
+
+ if (ca->discard)
+ return true;
+
+ return false;
+}
+
+int bch_journal_replay(struct cache_set *s, struct list_head *list)
+{
+ int ret = 0, keys = 0, entries = 0;
+ struct bkey *k;
+ struct journal_replay *i =
+ list_entry(list->prev, struct journal_replay, list);
+
+ uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
+ struct keylist keylist;
+
+ list_for_each_entry(i, list, list) {
+ BUG_ON(i->pin && atomic_read(i->pin) != 1);
+
+ if (n != i->j.seq) {
+ if (n == start && is_discard_enabled(s))
+ pr_info("journal entries %llu-%llu may be discarded! (replaying %llu-%llu)\n",
+ n, i->j.seq - 1, start, end);
+ else {
+ pr_err("journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
+ n, i->j.seq - 1, start, end);
+ ret = -EIO;
+ goto err;
+ }
+ }
+
+ for (k = i->j.start;
+ k < bset_bkey_last(&i->j);
+ k = bkey_next(k)) {
+ trace_bcache_journal_replay_key(k);
+
+ bch_keylist_init_single(&keylist, k);
+
+ ret = bch_btree_insert(s, &keylist, i->pin, NULL);
+ if (ret)
+ goto err;
+
+ BUG_ON(!bch_keylist_empty(&keylist));
+ keys++;
+
+ cond_resched();
+ }
+
+ if (i->pin)
+ atomic_dec(i->pin);
+ n = i->j.seq + 1;
+ entries++;
+ }
+
+ pr_info("journal replay done, %i keys in %i entries, seq %llu\n",
+ keys, entries, end);
+err:
+ while (!list_empty(list)) {
+ i = list_first_entry(list, struct journal_replay, list);
+ list_del(&i->list);
+ kfree(i);
+ }
+
+ return ret;
+}
+
+void bch_journal_space_reserve(struct journal *j)
+{
+ j->do_reserve = true;
+}
+
+/* Journalling */
+
+static void btree_flush_write(struct cache_set *c)
+{
+ struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
+ unsigned int i, nr;
+ int ref_nr;
+ atomic_t *fifo_front_p, *now_fifo_front_p;
+ size_t mask;
+
+ if (c->journal.btree_flushing)
+ return;
+
+ spin_lock(&c->journal.flush_write_lock);
+ if (c->journal.btree_flushing) {
+ spin_unlock(&c->journal.flush_write_lock);
+ return;
+ }
+ c->journal.btree_flushing = true;
+ spin_unlock(&c->journal.flush_write_lock);
+
+ /* get the oldest journal entry and check its refcount */
+ spin_lock(&c->journal.lock);
+ fifo_front_p = &fifo_front(&c->journal.pin);
+ ref_nr = atomic_read(fifo_front_p);
+ if (ref_nr <= 0) {
+ /*
+ * do nothing if no btree node references
+ * the oldest journal entry
+ */
+ spin_unlock(&c->journal.lock);
+ goto out;
+ }
+ spin_unlock(&c->journal.lock);
+
+ mask = c->journal.pin.mask;
+ nr = 0;
+ atomic_long_inc(&c->flush_write);
+ memset(btree_nodes, 0, sizeof(btree_nodes));
+
+ mutex_lock(&c->bucket_lock);
+ list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
+ /*
+ * It is safe to get now_fifo_front_p without holding
+ * c->journal.lock here, because we don't need to know
+ * the exactly accurate value, just check whether the
+ * front pointer of c->journal.pin is changed.
+ */
+ now_fifo_front_p = &fifo_front(&c->journal.pin);
+ /*
+ * If the oldest journal entry is reclaimed and front
+ * pointer of c->journal.pin changes, it is unnecessary
+ * to scan c->btree_cache anymore, just quit the loop and
+ * flush out what we have already.
+ */
+ if (now_fifo_front_p != fifo_front_p)
+ break;
+ /*
+ * quit this loop if all matching btree nodes are
+ * scanned and record in btree_nodes[] already.
+ */
+ ref_nr = atomic_read(fifo_front_p);
+ if (nr >= ref_nr)
+ break;
+
+ if (btree_node_journal_flush(b))
+ pr_err("BUG: flush_write bit should not be set here!\n");
+
+ mutex_lock(&b->write_lock);
+
+ if (!btree_node_dirty(b)) {
+ mutex_unlock(&b->write_lock);
+ continue;
+ }
+
+ if (!btree_current_write(b)->journal) {
+ mutex_unlock(&b->write_lock);
+ continue;
+ }
+
+ /*
+ * Only select the btree node which exactly references
+ * the oldest journal entry.
+ *
+ * If the journal entry pointed by fifo_front_p is
+ * reclaimed in parallel, don't worry:
+ * - the list_for_each_xxx loop will quit when checking
+ * next now_fifo_front_p.
+ * - If there are matched nodes recorded in btree_nodes[],
+ * they are clean now (this is why and how the oldest
+ * journal entry can be reclaimed). These selected nodes
+ * will be ignored and skipped in the following for-loop.
+ */
+ if (((btree_current_write(b)->journal - fifo_front_p) &
+ mask) != 0) {
+ mutex_unlock(&b->write_lock);
+ continue;
+ }
+
+ set_btree_node_journal_flush(b);
+
+ mutex_unlock(&b->write_lock);
+
+ btree_nodes[nr++] = b;
+ /*
+ * To avoid holding c->bucket_lock too long time,
+ * only scan for BTREE_FLUSH_NR matched btree nodes
+ * at most. If there are more btree nodes reference
+ * the oldest journal entry, try to flush them next
+ * time when btree_flush_write() is called.
+ */
+ if (nr == BTREE_FLUSH_NR)
+ break;
+ }
+ mutex_unlock(&c->bucket_lock);
+
+ for (i = 0; i < nr; i++) {
+ b = btree_nodes[i];
+ if (!b) {
+ pr_err("BUG: btree_nodes[%d] is NULL\n", i);
+ continue;
+ }
+
+ /* safe to check without holding b->write_lock */
+ if (!btree_node_journal_flush(b)) {
+ pr_err("BUG: bnode %p: journal_flush bit cleaned\n", b);
+ continue;
+ }
+
+ mutex_lock(&b->write_lock);
+ if (!btree_current_write(b)->journal) {
+ clear_bit(BTREE_NODE_journal_flush, &b->flags);
+ mutex_unlock(&b->write_lock);
+ pr_debug("bnode %p: written by others\n", b);
+ continue;
+ }
+
+ if (!btree_node_dirty(b)) {
+ clear_bit(BTREE_NODE_journal_flush, &b->flags);
+ mutex_unlock(&b->write_lock);
+ pr_debug("bnode %p: dirty bit cleaned by others\n", b);
+ continue;
+ }
+
+ __bch_btree_node_write(b, NULL);
+ clear_bit(BTREE_NODE_journal_flush, &b->flags);
+ mutex_unlock(&b->write_lock);
+ }
+
+out:
+ spin_lock(&c->journal.flush_write_lock);
+ c->journal.btree_flushing = false;
+ spin_unlock(&c->journal.flush_write_lock);
+}
+
+#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
+
+static void journal_discard_endio(struct bio *bio)
+{
+ struct journal_device *ja =
+ container_of(bio, struct journal_device, discard_bio);
+ struct cache *ca = container_of(ja, struct cache, journal);
+
+ atomic_set(&ja->discard_in_flight, DISCARD_DONE);
+
+ closure_wake_up(&ca->set->journal.wait);
+ closure_put(&ca->set->cl);
+}
+
+static void journal_discard_work(struct work_struct *work)
+{
+ struct journal_device *ja =
+ container_of(work, struct journal_device, discard_work);
+
+ submit_bio(&ja->discard_bio);
+}
+
+static void do_journal_discard(struct cache *ca)
+{
+ struct journal_device *ja = &ca->journal;
+ struct bio *bio = &ja->discard_bio;
+
+ if (!ca->discard) {
+ ja->discard_idx = ja->last_idx;
+ return;
+ }
+
+ switch (atomic_read(&ja->discard_in_flight)) {
+ case DISCARD_IN_FLIGHT:
+ return;
+
+ case DISCARD_DONE:
+ ja->discard_idx = (ja->discard_idx + 1) %
+ ca->sb.njournal_buckets;
+
+ atomic_set(&ja->discard_in_flight, DISCARD_READY);
+ fallthrough;
+
+ case DISCARD_READY:
+ if (ja->discard_idx == ja->last_idx)
+ return;
+
+ atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
+
+ bio_init(bio, ca->bdev, bio->bi_inline_vecs, 1, REQ_OP_DISCARD);
+ bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
+ ca->sb.d[ja->discard_idx]);
+ bio->bi_iter.bi_size = bucket_bytes(ca);
+ bio->bi_end_io = journal_discard_endio;
+
+ closure_get(&ca->set->cl);
+ INIT_WORK(&ja->discard_work, journal_discard_work);
+ queue_work(bch_journal_wq, &ja->discard_work);
+ }
+}
+
+static unsigned int free_journal_buckets(struct cache_set *c)
+{
+ struct journal *j = &c->journal;
+ struct cache *ca = c->cache;
+ struct journal_device *ja = &c->cache->journal;
+ unsigned int n;
+
+ /* In case njournal_buckets is not power of 2 */
+ if (ja->cur_idx >= ja->discard_idx)
+ n = ca->sb.njournal_buckets + ja->discard_idx - ja->cur_idx;
+ else
+ n = ja->discard_idx - ja->cur_idx;
+
+ if (n > (1 + j->do_reserve))
+ return n - (1 + j->do_reserve);
+
+ return 0;
+}
+
+static void journal_reclaim(struct cache_set *c)
+{
+ struct bkey *k = &c->journal.key;
+ struct cache *ca = c->cache;
+ uint64_t last_seq;
+ struct journal_device *ja = &ca->journal;
+ atomic_t p __maybe_unused;
+
+ atomic_long_inc(&c->reclaim);
+
+ while (!atomic_read(&fifo_front(&c->journal.pin)))
+ fifo_pop(&c->journal.pin, p);
+
+ last_seq = last_seq(&c->journal);
+
+ /* Update last_idx */
+
+ while (ja->last_idx != ja->cur_idx &&
+ ja->seq[ja->last_idx] < last_seq)
+ ja->last_idx = (ja->last_idx + 1) %
+ ca->sb.njournal_buckets;
+
+ do_journal_discard(ca);
+
+ if (c->journal.blocks_free)
+ goto out;
+
+ if (!free_journal_buckets(c))
+ goto out;
+
+ ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
+ k->ptr[0] = MAKE_PTR(0,
+ bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
+ ca->sb.nr_this_dev);
+ atomic_long_inc(&c->reclaimed_journal_buckets);
+
+ bkey_init(k);
+ SET_KEY_PTRS(k, 1);
+ c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
+
+out:
+ if (!journal_full(&c->journal))
+ __closure_wake_up(&c->journal.wait);
+}
+
+void bch_journal_next(struct journal *j)
+{
+ atomic_t p = { 1 };
+
+ j->cur = (j->cur == j->w)
+ ? &j->w[1]
+ : &j->w[0];
+
+ /*
+ * The fifo_push() needs to happen at the same time as j->seq is
+ * incremented for last_seq() to be calculated correctly
+ */
+ BUG_ON(!fifo_push(&j->pin, p));
+ atomic_set(&fifo_back(&j->pin), 1);
+
+ j->cur->data->seq = ++j->seq;
+ j->cur->dirty = false;
+ j->cur->need_write = false;
+ j->cur->data->keys = 0;
+
+ if (fifo_full(&j->pin))
+ pr_debug("journal_pin full (%zu)\n", fifo_used(&j->pin));
+}
+
+static void journal_write_endio(struct bio *bio)
+{
+ struct journal_write *w = bio->bi_private;
+
+ cache_set_err_on(bio->bi_status, w->c, "journal io error");
+ closure_put(&w->c->journal.io);
+}
+
+static void journal_write(struct closure *cl);
+
+static void journal_write_done(struct closure *cl)
+{
+ struct journal *j = container_of(cl, struct journal, io);
+ struct journal_write *w = (j->cur == j->w)
+ ? &j->w[1]
+ : &j->w[0];
+
+ __closure_wake_up(&w->wait);
+ continue_at_nobarrier(cl, journal_write, bch_journal_wq);
+}
+
+static void journal_write_unlock(struct closure *cl)
+ __releases(&c->journal.lock)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+
+ c->journal.io_in_flight = 0;
+ spin_unlock(&c->journal.lock);
+}
+
+static void journal_write_unlocked(struct closure *cl)
+ __releases(c->journal.lock)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+ struct cache *ca = c->cache;
+ struct journal_write *w = c->journal.cur;
+ struct bkey *k = &c->journal.key;
+ unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
+ ca->sb.block_size;
+
+ struct bio *bio;
+ struct bio_list list;
+
+ bio_list_init(&list);
+
+ if (!w->need_write) {
+ closure_return_with_destructor(cl, journal_write_unlock);
+ return;
+ } else if (journal_full(&c->journal)) {
+ journal_reclaim(c);
+ spin_unlock(&c->journal.lock);
+
+ btree_flush_write(c);
+ continue_at(cl, journal_write, bch_journal_wq);
+ return;
+ }
+
+ c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca));
+
+ w->data->btree_level = c->root->level;
+
+ bkey_copy(&w->data->btree_root, &c->root->key);
+ bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
+
+ w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
+ w->data->magic = jset_magic(&ca->sb);
+ w->data->version = BCACHE_JSET_VERSION;
+ w->data->last_seq = last_seq(&c->journal);
+ w->data->csum = csum_set(w->data);
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ ca = c->cache;
+ bio = &ca->journal.bio;
+
+ atomic_long_add(sectors, &ca->meta_sectors_written);
+
+ bio_reset(bio, ca->bdev, REQ_OP_WRITE |
+ REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA);
+ bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
+ bio->bi_iter.bi_size = sectors << 9;
+
+ bio->bi_end_io = journal_write_endio;
+ bio->bi_private = w;
+ bch_bio_map(bio, w->data);
+
+ trace_bcache_journal_write(bio, w->data->keys);
+ bio_list_add(&list, bio);
+
+ SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
+
+ ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
+ }
+
+ /* If KEY_PTRS(k) == 0, this jset gets lost in air */
+ BUG_ON(i == 0);
+
+ atomic_dec_bug(&fifo_back(&c->journal.pin));
+ bch_journal_next(&c->journal);
+ journal_reclaim(c);
+
+ spin_unlock(&c->journal.lock);
+
+ while ((bio = bio_list_pop(&list)))
+ closure_bio_submit(c, bio, cl);
+
+ continue_at(cl, journal_write_done, NULL);
+}
+
+static void journal_write(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+
+ spin_lock(&c->journal.lock);
+ journal_write_unlocked(cl);
+}
+
+static void journal_try_write(struct cache_set *c)
+ __releases(c->journal.lock)
+{
+ struct closure *cl = &c->journal.io;
+ struct journal_write *w = c->journal.cur;
+
+ w->need_write = true;
+
+ if (!c->journal.io_in_flight) {
+ c->journal.io_in_flight = 1;
+ closure_call(cl, journal_write_unlocked, NULL, &c->cl);
+ } else {
+ spin_unlock(&c->journal.lock);
+ }
+}
+
+static struct journal_write *journal_wait_for_write(struct cache_set *c,
+ unsigned int nkeys)
+ __acquires(&c->journal.lock)
+{
+ size_t sectors;
+ struct closure cl;
+ bool wait = false;
+ struct cache *ca = c->cache;
+
+ closure_init_stack(&cl);
+
+ spin_lock(&c->journal.lock);
+
+ while (1) {
+ struct journal_write *w = c->journal.cur;
+
+ sectors = __set_blocks(w->data, w->data->keys + nkeys,
+ block_bytes(ca)) * ca->sb.block_size;
+
+ if (sectors <= min_t(size_t,
+ c->journal.blocks_free * ca->sb.block_size,
+ PAGE_SECTORS << JSET_BITS))
+ return w;
+
+ if (wait)
+ closure_wait(&c->journal.wait, &cl);
+
+ if (!journal_full(&c->journal)) {
+ if (wait)
+ trace_bcache_journal_entry_full(c);
+
+ /*
+ * XXX: If we were inserting so many keys that they
+ * won't fit in an _empty_ journal write, we'll
+ * deadlock. For now, handle this in
+ * bch_keylist_realloc() - but something to think about.
+ */
+ BUG_ON(!w->data->keys);
+
+ journal_try_write(c); /* unlocks */
+ } else {
+ if (wait)
+ trace_bcache_journal_full(c);
+
+ journal_reclaim(c);
+ spin_unlock(&c->journal.lock);
+
+ btree_flush_write(c);
+ }
+
+ closure_sync(&cl);
+ spin_lock(&c->journal.lock);
+ wait = true;
+ }
+}
+
+static void journal_write_work(struct work_struct *work)
+{
+ struct cache_set *c = container_of(to_delayed_work(work),
+ struct cache_set,
+ journal.work);
+ spin_lock(&c->journal.lock);
+ if (c->journal.cur->dirty)
+ journal_try_write(c);
+ else
+ spin_unlock(&c->journal.lock);
+}
+
+/*
+ * Entry point to the journalling code - bio_insert() and btree_invalidate()
+ * pass bch_journal() a list of keys to be journalled, and then
+ * bch_journal() hands those same keys off to btree_insert_async()
+ */
+
+atomic_t *bch_journal(struct cache_set *c,
+ struct keylist *keys,
+ struct closure *parent)
+{
+ struct journal_write *w;
+ atomic_t *ret;
+
+ /* No journaling if CACHE_SET_IO_DISABLE set already */
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
+ return NULL;
+
+ if (!CACHE_SYNC(&c->cache->sb))
+ return NULL;
+
+ w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
+
+ memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
+ w->data->keys += bch_keylist_nkeys(keys);
+
+ ret = &fifo_back(&c->journal.pin);
+ atomic_inc(ret);
+
+ if (parent) {
+ closure_wait(&w->wait, parent);
+ journal_try_write(c);
+ } else if (!w->dirty) {
+ w->dirty = true;
+ queue_delayed_work(bch_flush_wq, &c->journal.work,
+ msecs_to_jiffies(c->journal_delay_ms));
+ spin_unlock(&c->journal.lock);
+ } else {
+ spin_unlock(&c->journal.lock);
+ }
+
+
+ return ret;
+}
+
+void bch_journal_meta(struct cache_set *c, struct closure *cl)
+{
+ struct keylist keys;
+ atomic_t *ref;
+
+ bch_keylist_init(&keys);
+
+ ref = bch_journal(c, &keys, cl);
+ if (ref)
+ atomic_dec_bug(ref);
+}
+
+void bch_journal_free(struct cache_set *c)
+{
+ free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
+ free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
+ free_fifo(&c->journal.pin);
+}
+
+int bch_journal_alloc(struct cache_set *c)
+{
+ struct journal *j = &c->journal;
+
+ spin_lock_init(&j->lock);
+ spin_lock_init(&j->flush_write_lock);
+ INIT_DELAYED_WORK(&j->work, journal_write_work);
+
+ c->journal_delay_ms = 100;
+
+ j->w[0].c = c;
+ j->w[1].c = c;
+
+ if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
+ !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) ||
+ !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)))
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
new file mode 100644
index 0000000000..cd316b4a1e
--- /dev/null
+++ b/drivers/md/bcache/journal.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHE_JOURNAL_H
+#define _BCACHE_JOURNAL_H
+
+/*
+ * THE JOURNAL:
+ *
+ * The journal is treated as a circular buffer of buckets - a journal entry
+ * never spans two buckets. This means (not implemented yet) we can resize the
+ * journal at runtime, and will be needed for bcache on raw flash support.
+ *
+ * Journal entries contain a list of keys, ordered by the time they were
+ * inserted; thus journal replay just has to reinsert the keys.
+ *
+ * We also keep some things in the journal header that are logically part of the
+ * superblock - all the things that are frequently updated. This is for future
+ * bcache on raw flash support; the superblock (which will become another
+ * journal) can't be moved or wear leveled, so it contains just enough
+ * information to find the main journal, and the superblock only has to be
+ * rewritten when we want to move/wear level the main journal.
+ *
+ * Currently, we don't journal BTREE_REPLACE operations - this will hopefully be
+ * fixed eventually. This isn't a bug - BTREE_REPLACE is used for insertions
+ * from cache misses, which don't have to be journaled, and for writeback and
+ * moving gc we work around it by flushing the btree to disk before updating the
+ * gc information. But it is a potential issue with incremental garbage
+ * collection, and it's fragile.
+ *
+ * OPEN JOURNAL ENTRIES:
+ *
+ * Each journal entry contains, in the header, the sequence number of the last
+ * journal entry still open - i.e. that has keys that haven't been flushed to
+ * disk in the btree.
+ *
+ * We track this by maintaining a refcount for every open journal entry, in a
+ * fifo; each entry in the fifo corresponds to a particular journal
+ * entry/sequence number. When the refcount at the tail of the fifo goes to
+ * zero, we pop it off - thus, the size of the fifo tells us the number of open
+ * journal entries
+ *
+ * We take a refcount on a journal entry when we add some keys to a journal
+ * entry that we're going to insert (held by struct btree_op), and then when we
+ * insert those keys into the btree the btree write we're setting up takes a
+ * copy of that refcount (held by struct btree_write). That refcount is dropped
+ * when the btree write completes.
+ *
+ * A struct btree_write can only hold a refcount on a single journal entry, but
+ * might contain keys for many journal entries - we handle this by making sure
+ * it always has a refcount on the _oldest_ journal entry of all the journal
+ * entries it has keys for.
+ *
+ * JOURNAL RECLAIM:
+ *
+ * As mentioned previously, our fifo of refcounts tells us the number of open
+ * journal entries; from that and the current journal sequence number we compute
+ * last_seq - the oldest journal entry we still need. We write last_seq in each
+ * journal entry, and we also have to keep track of where it exists on disk so
+ * we don't overwrite it when we loop around the journal.
+ *
+ * To do that we track, for each journal bucket, the sequence number of the
+ * newest journal entry it contains - if we don't need that journal entry we
+ * don't need anything in that bucket anymore. From that we track the last
+ * journal bucket we still need; all this is tracked in struct journal_device
+ * and updated by journal_reclaim().
+ *
+ * JOURNAL FILLING UP:
+ *
+ * There are two ways the journal could fill up; either we could run out of
+ * space to write to, or we could have too many open journal entries and run out
+ * of room in the fifo of refcounts. Since those refcounts are decremented
+ * without any locking we can't safely resize that fifo, so we handle it the
+ * same way.
+ *
+ * If the journal fills up, we start flushing dirty btree nodes until we can
+ * allocate space for a journal write again - preferentially flushing btree
+ * nodes that are pinning the oldest journal entries first.
+ */
+
+/*
+ * Only used for holding the journal entries we read in btree_journal_read()
+ * during cache_registration
+ */
+struct journal_replay {
+ struct list_head list;
+ atomic_t *pin;
+ struct jset j;
+};
+
+/*
+ * We put two of these in struct journal; we used them for writes to the
+ * journal that are being staged or in flight.
+ */
+struct journal_write {
+ struct jset *data;
+#define JSET_BITS 3
+
+ struct cache_set *c;
+ struct closure_waitlist wait;
+ bool dirty;
+ bool need_write;
+};
+
+/* Embedded in struct cache_set */
+struct journal {
+ spinlock_t lock;
+ spinlock_t flush_write_lock;
+ bool btree_flushing;
+ bool do_reserve;
+ /* used when waiting because the journal was full */
+ struct closure_waitlist wait;
+ struct closure io;
+ int io_in_flight;
+ struct delayed_work work;
+
+ /* Number of blocks free in the bucket(s) we're currently writing to */
+ unsigned int blocks_free;
+ uint64_t seq;
+ DECLARE_FIFO(atomic_t, pin);
+
+ BKEY_PADDED(key);
+
+ struct journal_write w[2], *cur;
+};
+
+/*
+ * Embedded in struct cache. First three fields refer to the array of journal
+ * buckets, in cache_sb.
+ */
+struct journal_device {
+ /*
+ * For each journal bucket, contains the max sequence number of the
+ * journal writes it contains - so we know when a bucket can be reused.
+ */
+ uint64_t seq[SB_JOURNAL_BUCKETS];
+
+ /* Journal bucket we're currently writing to */
+ unsigned int cur_idx;
+
+ /* Last journal bucket that still contains an open journal entry */
+ unsigned int last_idx;
+
+ /* Next journal bucket to be discarded */
+ unsigned int discard_idx;
+
+#define DISCARD_READY 0
+#define DISCARD_IN_FLIGHT 1
+#define DISCARD_DONE 2
+ /* 1 - discard in flight, -1 - discard completed */
+ atomic_t discard_in_flight;
+
+ struct work_struct discard_work;
+ struct bio discard_bio;
+ struct bio_vec discard_bv;
+
+ /* Bio for journal reads/writes to this device */
+ struct bio bio;
+ struct bio_vec bv[8];
+};
+
+#define BTREE_FLUSH_NR 8
+
+#define journal_pin_cmp(c, l, r) \
+ (fifo_idx(&(c)->journal.pin, (l)) > fifo_idx(&(c)->journal.pin, (r)))
+
+#define JOURNAL_PIN 20000
+
+#define journal_full(j) \
+ (!(j)->blocks_free || fifo_free(&(j)->pin) <= 1)
+
+struct closure;
+struct cache_set;
+struct btree_op;
+struct keylist;
+
+atomic_t *bch_journal(struct cache_set *c,
+ struct keylist *keys,
+ struct closure *parent);
+void bch_journal_next(struct journal *j);
+void bch_journal_mark(struct cache_set *c, struct list_head *list);
+void bch_journal_meta(struct cache_set *c, struct closure *cl);
+int bch_journal_read(struct cache_set *c, struct list_head *list);
+int bch_journal_replay(struct cache_set *c, struct list_head *list);
+
+void bch_journal_free(struct cache_set *c);
+int bch_journal_alloc(struct cache_set *c);
+void bch_journal_space_reserve(struct journal *j);
+
+#endif /* _BCACHE_JOURNAL_H */
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
new file mode 100644
index 0000000000..9f32901fda
--- /dev/null
+++ b/drivers/md/bcache/movinggc.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Moving/copying garbage collector
+ *
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "request.h"
+
+#include <trace/events/bcache.h>
+
+struct moving_io {
+ struct closure cl;
+ struct keybuf_key *w;
+ struct data_insert_op op;
+ struct bbio bio;
+};
+
+static bool moving_pred(struct keybuf *buf, struct bkey *k)
+{
+ struct cache_set *c = container_of(buf, struct cache_set,
+ moving_gc_keys);
+ unsigned int i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i) &&
+ GC_MOVE(PTR_BUCKET(c, k, i)))
+ return true;
+
+ return false;
+}
+
+/* Moving GC - IO loop */
+
+static void moving_io_destructor(struct closure *cl)
+{
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
+
+ kfree(io);
+}
+
+static void write_moving_finish(struct closure *cl)
+{
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
+ struct bio *bio = &io->bio.bio;
+
+ bio_free_pages(bio);
+
+ if (io->op.replace_collision)
+ trace_bcache_gc_copy_collision(&io->w->key);
+
+ bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
+
+ up(&io->op.c->moving_in_flight);
+
+ closure_return_with_destructor(cl, moving_io_destructor);
+}
+
+static void read_moving_endio(struct bio *bio)
+{
+ struct bbio *b = container_of(bio, struct bbio, bio);
+ struct moving_io *io = container_of(bio->bi_private,
+ struct moving_io, cl);
+
+ if (bio->bi_status)
+ io->op.status = bio->bi_status;
+ else if (!KEY_DIRTY(&b->key) &&
+ ptr_stale(io->op.c, &b->key, 0)) {
+ io->op.status = BLK_STS_IOERR;
+ }
+
+ bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move");
+}
+
+static void moving_init(struct moving_io *io)
+{
+ struct bio *bio = &io->bio.bio;
+
+ bio_init(bio, NULL, bio->bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0);
+ bio_get(bio);
+ bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+
+ bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
+ bio->bi_private = &io->cl;
+ bch_bio_map(bio, NULL);
+}
+
+static void write_moving(struct closure *cl)
+{
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
+ struct data_insert_op *op = &io->op;
+
+ if (!op->status) {
+ moving_init(io);
+
+ io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
+ op->write_prio = 1;
+ op->bio = &io->bio.bio;
+
+ op->writeback = KEY_DIRTY(&io->w->key);
+ op->csum = KEY_CSUM(&io->w->key);
+
+ bkey_copy(&op->replace_key, &io->w->key);
+ op->replace = true;
+
+ closure_call(&op->cl, bch_data_insert, NULL, cl);
+ }
+
+ continue_at(cl, write_moving_finish, op->wq);
+}
+
+static void read_moving_submit(struct closure *cl)
+{
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
+ struct bio *bio = &io->bio.bio;
+
+ bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
+
+ continue_at(cl, write_moving, io->op.wq);
+}
+
+static void read_moving(struct cache_set *c)
+{
+ struct keybuf_key *w;
+ struct moving_io *io;
+ struct bio *bio;
+ struct closure cl;
+
+ closure_init_stack(&cl);
+
+ /* XXX: if we error, background writeback could stall indefinitely */
+
+ while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
+ w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
+ &MAX_KEY, moving_pred);
+ if (!w)
+ break;
+
+ if (ptr_stale(c, &w->key, 0)) {
+ bch_keybuf_del(&c->moving_gc_keys, w);
+ continue;
+ }
+
+ io = kzalloc(struct_size(io, bio.bio.bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
+ GFP_KERNEL);
+ if (!io)
+ goto err;
+
+ w->private = io;
+ io->w = w;
+ io->op.inode = KEY_INODE(&w->key);
+ io->op.c = c;
+ io->op.wq = c->moving_gc_wq;
+
+ moving_init(io);
+ bio = &io->bio.bio;
+
+ bio->bi_opf = REQ_OP_READ;
+ bio->bi_end_io = read_moving_endio;
+
+ if (bch_bio_alloc_pages(bio, GFP_KERNEL))
+ goto err;
+
+ trace_bcache_gc_copy(&w->key);
+
+ down(&c->moving_in_flight);
+ closure_call(&io->cl, read_moving_submit, NULL, &cl);
+ }
+
+ if (0) {
+err: if (!IS_ERR_OR_NULL(w->private))
+ kfree(w->private);
+
+ bch_keybuf_del(&c->moving_gc_keys, w);
+ }
+
+ closure_sync(&cl);
+}
+
+static bool bucket_cmp(struct bucket *l, struct bucket *r)
+{
+ return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
+}
+
+static unsigned int bucket_heap_top(struct cache *ca)
+{
+ struct bucket *b;
+
+ return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
+}
+
+void bch_moving_gc(struct cache_set *c)
+{
+ struct cache *ca = c->cache;
+ struct bucket *b;
+ unsigned long sectors_to_move, reserve_sectors;
+
+ if (!c->copy_gc_enabled)
+ return;
+
+ mutex_lock(&c->bucket_lock);
+
+ sectors_to_move = 0;
+ reserve_sectors = ca->sb.bucket_size *
+ fifo_used(&ca->free[RESERVE_MOVINGGC]);
+
+ ca->heap.used = 0;
+
+ for_each_bucket(b, ca) {
+ if (GC_MARK(b) == GC_MARK_METADATA ||
+ !GC_SECTORS_USED(b) ||
+ GC_SECTORS_USED(b) == ca->sb.bucket_size ||
+ atomic_read(&b->pin))
+ continue;
+
+ if (!heap_full(&ca->heap)) {
+ sectors_to_move += GC_SECTORS_USED(b);
+ heap_add(&ca->heap, b, bucket_cmp);
+ } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
+ sectors_to_move -= bucket_heap_top(ca);
+ sectors_to_move += GC_SECTORS_USED(b);
+
+ ca->heap.data[0] = b;
+ heap_sift(&ca->heap, 0, bucket_cmp);
+ }
+ }
+
+ while (sectors_to_move > reserve_sectors) {
+ heap_pop(&ca->heap, b, bucket_cmp);
+ sectors_to_move -= GC_SECTORS_USED(b);
+ }
+
+ while (heap_pop(&ca->heap, b, bucket_cmp))
+ SET_GC_MOVE(b, 1);
+
+ mutex_unlock(&c->bucket_lock);
+
+ c->moving_gc_keys.last_scanned = ZERO_KEY;
+
+ read_moving(c);
+}
+
+void bch_moving_init_cache_set(struct cache_set *c)
+{
+ bch_keybuf_init(&c->moving_gc_keys);
+ sema_init(&c->moving_in_flight, 64);
+}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
new file mode 100644
index 0000000000..a9b1f38962
--- /dev/null
+++ b/drivers/md/bcache/request.c
@@ -0,0 +1,1345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Main bcache entry point - handle a read or a write request and decide what to
+ * do with it; the make_request functions are called by the block layer.
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "request.h"
+#include "writeback.h"
+
+#include <linux/module.h>
+#include <linux/hash.h>
+#include <linux/random.h>
+#include <linux/backing-dev.h>
+
+#include <trace/events/bcache.h>
+
+#define CUTOFF_CACHE_ADD 95
+#define CUTOFF_CACHE_READA 90
+
+struct kmem_cache *bch_search_cache;
+
+static void bch_data_insert_start(struct closure *cl);
+
+static unsigned int cache_mode(struct cached_dev *dc)
+{
+ return BDEV_CACHE_MODE(&dc->sb);
+}
+
+static bool verify(struct cached_dev *dc)
+{
+ return dc->verify;
+}
+
+static void bio_csum(struct bio *bio, struct bkey *k)
+{
+ struct bio_vec bv;
+ struct bvec_iter iter;
+ uint64_t csum = 0;
+
+ bio_for_each_segment(bv, bio, iter) {
+ void *d = bvec_kmap_local(&bv);
+
+ csum = crc64_be(csum, d, bv.bv_len);
+ kunmap_local(d);
+ }
+
+ k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
+}
+
+/* Insert data into cache */
+
+static void bch_data_insert_keys(struct closure *cl)
+{
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ atomic_t *journal_ref = NULL;
+ struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
+ int ret;
+
+ if (!op->replace)
+ journal_ref = bch_journal(op->c, &op->insert_keys,
+ op->flush_journal ? cl : NULL);
+
+ ret = bch_btree_insert(op->c, &op->insert_keys,
+ journal_ref, replace_key);
+ if (ret == -ESRCH) {
+ op->replace_collision = true;
+ } else if (ret) {
+ op->status = BLK_STS_RESOURCE;
+ op->insert_data_done = true;
+ }
+
+ if (journal_ref)
+ atomic_dec_bug(journal_ref);
+
+ if (!op->insert_data_done) {
+ continue_at(cl, bch_data_insert_start, op->wq);
+ return;
+ }
+
+ bch_keylist_free(&op->insert_keys);
+ closure_return(cl);
+}
+
+static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
+ struct cache_set *c)
+{
+ size_t oldsize = bch_keylist_nkeys(l);
+ size_t newsize = oldsize + u64s;
+
+ /*
+ * The journalling code doesn't handle the case where the keys to insert
+ * is bigger than an empty write: If we just return -ENOMEM here,
+ * bch_data_insert_keys() will insert the keys created so far
+ * and finish the rest when the keylist is empty.
+ */
+ if (newsize * sizeof(uint64_t) > block_bytes(c->cache) - sizeof(struct jset))
+ return -ENOMEM;
+
+ return __bch_keylist_realloc(l, u64s);
+}
+
+static void bch_data_invalidate(struct closure *cl)
+{
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ struct bio *bio = op->bio;
+
+ pr_debug("invalidating %i sectors from %llu\n",
+ bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
+
+ while (bio_sectors(bio)) {
+ unsigned int sectors = min(bio_sectors(bio),
+ 1U << (KEY_SIZE_BITS - 1));
+
+ if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
+ goto out;
+
+ bio->bi_iter.bi_sector += sectors;
+ bio->bi_iter.bi_size -= sectors << 9;
+
+ bch_keylist_add(&op->insert_keys,
+ &KEY(op->inode,
+ bio->bi_iter.bi_sector,
+ sectors));
+ }
+
+ op->insert_data_done = true;
+ /* get in bch_data_insert() */
+ bio_put(bio);
+out:
+ continue_at(cl, bch_data_insert_keys, op->wq);
+}
+
+static void bch_data_insert_error(struct closure *cl)
+{
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+
+ /*
+ * Our data write just errored, which means we've got a bunch of keys to
+ * insert that point to data that wasn't successfully written.
+ *
+ * We don't have to insert those keys but we still have to invalidate
+ * that region of the cache - so, if we just strip off all the pointers
+ * from the keys we'll accomplish just that.
+ */
+
+ struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
+
+ while (src != op->insert_keys.top) {
+ struct bkey *n = bkey_next(src);
+
+ SET_KEY_PTRS(src, 0);
+ memmove(dst, src, bkey_bytes(src));
+
+ dst = bkey_next(dst);
+ src = n;
+ }
+
+ op->insert_keys.top = dst;
+
+ bch_data_insert_keys(cl);
+}
+
+static void bch_data_insert_endio(struct bio *bio)
+{
+ struct closure *cl = bio->bi_private;
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+
+ if (bio->bi_status) {
+ /* TODO: We could try to recover from this. */
+ if (op->writeback)
+ op->status = bio->bi_status;
+ else if (!op->replace)
+ set_closure_fn(cl, bch_data_insert_error, op->wq);
+ else
+ set_closure_fn(cl, NULL, NULL);
+ }
+
+ bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
+}
+
+static void bch_data_insert_start(struct closure *cl)
+{
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ struct bio *bio = op->bio, *n;
+
+ if (op->bypass)
+ return bch_data_invalidate(cl);
+
+ if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
+ wake_up_gc(op->c);
+
+ /*
+ * Journal writes are marked REQ_PREFLUSH; if the original write was a
+ * flush, it'll wait on the journal write.
+ */
+ bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
+
+ do {
+ unsigned int i;
+ struct bkey *k;
+ struct bio_set *split = &op->c->bio_split;
+
+ /* 1 for the device pointer and 1 for the chksum */
+ if (bch_keylist_realloc(&op->insert_keys,
+ 3 + (op->csum ? 1 : 0),
+ op->c)) {
+ continue_at(cl, bch_data_insert_keys, op->wq);
+ return;
+ }
+
+ k = op->insert_keys.top;
+ bkey_init(k);
+ SET_KEY_INODE(k, op->inode);
+ SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
+
+ if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
+ op->write_point, op->write_prio,
+ op->writeback))
+ goto err;
+
+ n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
+
+ n->bi_end_io = bch_data_insert_endio;
+ n->bi_private = cl;
+
+ if (op->writeback) {
+ SET_KEY_DIRTY(k, true);
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ SET_GC_MARK(PTR_BUCKET(op->c, k, i),
+ GC_MARK_DIRTY);
+ }
+
+ SET_KEY_CSUM(k, op->csum);
+ if (KEY_CSUM(k))
+ bio_csum(n, k);
+
+ trace_bcache_cache_insert(k);
+ bch_keylist_push(&op->insert_keys);
+
+ n->bi_opf = REQ_OP_WRITE;
+ bch_submit_bbio(n, op->c, k, 0);
+ } while (n != bio);
+
+ op->insert_data_done = true;
+ continue_at(cl, bch_data_insert_keys, op->wq);
+ return;
+err:
+ /* bch_alloc_sectors() blocks if s->writeback = true */
+ BUG_ON(op->writeback);
+
+ /*
+ * But if it's not a writeback write we'd rather just bail out if
+ * there aren't any buckets ready to write to - it might take awhile and
+ * we might be starving btree writes for gc or something.
+ */
+
+ if (!op->replace) {
+ /*
+ * Writethrough write: We can't complete the write until we've
+ * updated the index. But we don't want to delay the write while
+ * we wait for buckets to be freed up, so just invalidate the
+ * rest of the write.
+ */
+ op->bypass = true;
+ return bch_data_invalidate(cl);
+ } else {
+ /*
+ * From a cache miss, we can just insert the keys for the data
+ * we have written or bail out if we didn't do anything.
+ */
+ op->insert_data_done = true;
+ bio_put(bio);
+
+ if (!bch_keylist_empty(&op->insert_keys))
+ continue_at(cl, bch_data_insert_keys, op->wq);
+ else
+ closure_return(cl);
+ }
+}
+
+/**
+ * bch_data_insert - stick some data in the cache
+ * @cl: closure pointer.
+ *
+ * This is the starting point for any data to end up in a cache device; it could
+ * be from a normal write, or a writeback write, or a write to a flash only
+ * volume - it's also used by the moving garbage collector to compact data in
+ * mostly empty buckets.
+ *
+ * It first writes the data to the cache, creating a list of keys to be inserted
+ * (if the data had to be fragmented there will be multiple keys); after the
+ * data is written it calls bch_journal, and after the keys have been added to
+ * the next journal write they're inserted into the btree.
+ *
+ * It inserts the data in op->bio; bi_sector is used for the key offset,
+ * and op->inode is used for the key inode.
+ *
+ * If op->bypass is true, instead of inserting the data it invalidates the
+ * region of the cache represented by op->bio and op->inode.
+ */
+void bch_data_insert(struct closure *cl)
+{
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+
+ trace_bcache_write(op->c, op->inode, op->bio,
+ op->writeback, op->bypass);
+
+ bch_keylist_init(&op->insert_keys);
+ bio_get(op->bio);
+ bch_data_insert_start(cl);
+}
+
+/*
+ * Congested? Return 0 (not congested) or the limit (in sectors)
+ * beyond which we should bypass the cache due to congestion.
+ */
+unsigned int bch_get_congested(const struct cache_set *c)
+{
+ int i;
+
+ if (!c->congested_read_threshold_us &&
+ !c->congested_write_threshold_us)
+ return 0;
+
+ i = (local_clock_us() - c->congested_last_us) / 1024;
+ if (i < 0)
+ return 0;
+
+ i += atomic_read(&c->congested);
+ if (i >= 0)
+ return 0;
+
+ i += CONGESTED_MAX;
+
+ if (i > 0)
+ i = fract_exp_two(i, 6);
+
+ i -= hweight32(get_random_u32());
+
+ return i > 0 ? i : 1;
+}
+
+static void add_sequential(struct task_struct *t)
+{
+ ewma_add(t->sequential_io_avg,
+ t->sequential_io, 8, 0);
+
+ t->sequential_io = 0;
+}
+
+static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
+{
+ return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
+}
+
+static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
+{
+ struct cache_set *c = dc->disk.c;
+ unsigned int mode = cache_mode(dc);
+ unsigned int sectors, congested;
+ struct task_struct *task = current;
+ struct io *i;
+
+ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
+ c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
+ (bio_op(bio) == REQ_OP_DISCARD))
+ goto skip;
+
+ if (mode == CACHE_MODE_NONE ||
+ (mode == CACHE_MODE_WRITEAROUND &&
+ op_is_write(bio_op(bio))))
+ goto skip;
+
+ /*
+ * If the bio is for read-ahead or background IO, bypass it or
+ * not depends on the following situations,
+ * - If the IO is for meta data, always cache it and no bypass
+ * - If the IO is not meta data, check dc->cache_reada_policy,
+ * BCH_CACHE_READA_ALL: cache it and not bypass
+ * BCH_CACHE_READA_META_ONLY: not cache it and bypass
+ * That is, read-ahead request for metadata always get cached
+ * (eg, for gfs2 or xfs).
+ */
+ if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) {
+ if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
+ (dc->cache_readahead_policy != BCH_CACHE_READA_ALL))
+ goto skip;
+ }
+
+ if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
+ bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
+ pr_debug("skipping unaligned io\n");
+ goto skip;
+ }
+
+ if (bypass_torture_test(dc)) {
+ if (get_random_u32_below(4) == 3)
+ goto skip;
+ else
+ goto rescale;
+ }
+
+ congested = bch_get_congested(c);
+ if (!congested && !dc->sequential_cutoff)
+ goto rescale;
+
+ spin_lock(&dc->io_lock);
+
+ hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
+ if (i->last == bio->bi_iter.bi_sector &&
+ time_before(jiffies, i->jiffies))
+ goto found;
+
+ i = list_first_entry(&dc->io_lru, struct io, lru);
+
+ add_sequential(task);
+ i->sequential = 0;
+found:
+ if (i->sequential + bio->bi_iter.bi_size > i->sequential)
+ i->sequential += bio->bi_iter.bi_size;
+
+ i->last = bio_end_sector(bio);
+ i->jiffies = jiffies + msecs_to_jiffies(5000);
+ task->sequential_io = i->sequential;
+
+ hlist_del(&i->hash);
+ hlist_add_head(&i->hash, iohash(dc, i->last));
+ list_move_tail(&i->lru, &dc->io_lru);
+
+ spin_unlock(&dc->io_lock);
+
+ sectors = max(task->sequential_io,
+ task->sequential_io_avg) >> 9;
+
+ if (dc->sequential_cutoff &&
+ sectors >= dc->sequential_cutoff >> 9) {
+ trace_bcache_bypass_sequential(bio);
+ goto skip;
+ }
+
+ if (congested && sectors >= congested) {
+ trace_bcache_bypass_congested(bio);
+ goto skip;
+ }
+
+rescale:
+ bch_rescale_priorities(c, bio_sectors(bio));
+ return false;
+skip:
+ bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
+ return true;
+}
+
+/* Cache lookup */
+
+struct search {
+ /* Stack frame for bio_complete */
+ struct closure cl;
+
+ struct bbio bio;
+ struct bio *orig_bio;
+ struct bio *cache_miss;
+ struct bcache_device *d;
+
+ unsigned int insert_bio_sectors;
+ unsigned int recoverable:1;
+ unsigned int write:1;
+ unsigned int read_dirty_data:1;
+ unsigned int cache_missed:1;
+
+ struct block_device *orig_bdev;
+ unsigned long start_time;
+
+ struct btree_op op;
+ struct data_insert_op iop;
+};
+
+static void bch_cache_read_endio(struct bio *bio)
+{
+ struct bbio *b = container_of(bio, struct bbio, bio);
+ struct closure *cl = bio->bi_private;
+ struct search *s = container_of(cl, struct search, cl);
+
+ /*
+ * If the bucket was reused while our bio was in flight, we might have
+ * read the wrong data. Set s->error but not error so it doesn't get
+ * counted against the cache device, but we'll still reread the data
+ * from the backing device.
+ */
+
+ if (bio->bi_status)
+ s->iop.status = bio->bi_status;
+ else if (!KEY_DIRTY(&b->key) &&
+ ptr_stale(s->iop.c, &b->key, 0)) {
+ atomic_long_inc(&s->iop.c->cache_read_races);
+ s->iop.status = BLK_STS_IOERR;
+ }
+
+ bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
+}
+
+/*
+ * Read from a single key, handling the initial cache miss if the key starts in
+ * the middle of the bio
+ */
+static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
+{
+ struct search *s = container_of(op, struct search, op);
+ struct bio *n, *bio = &s->bio.bio;
+ struct bkey *bio_key;
+ unsigned int ptr;
+
+ if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
+ return MAP_CONTINUE;
+
+ if (KEY_INODE(k) != s->iop.inode ||
+ KEY_START(k) > bio->bi_iter.bi_sector) {
+ unsigned int bio_sectors = bio_sectors(bio);
+ unsigned int sectors = KEY_INODE(k) == s->iop.inode
+ ? min_t(uint64_t, INT_MAX,
+ KEY_START(k) - bio->bi_iter.bi_sector)
+ : INT_MAX;
+ int ret = s->d->cache_miss(b, s, bio, sectors);
+
+ if (ret != MAP_CONTINUE)
+ return ret;
+
+ /* if this was a complete miss we shouldn't get here */
+ BUG_ON(bio_sectors <= sectors);
+ }
+
+ if (!KEY_SIZE(k))
+ return MAP_CONTINUE;
+
+ /* XXX: figure out best pointer - for multiple cache devices */
+ ptr = 0;
+
+ PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
+
+ if (KEY_DIRTY(k))
+ s->read_dirty_data = true;
+
+ n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
+ KEY_OFFSET(k) - bio->bi_iter.bi_sector),
+ GFP_NOIO, &s->d->bio_split);
+
+ bio_key = &container_of(n, struct bbio, bio)->key;
+ bch_bkey_copy_single_ptr(bio_key, k, ptr);
+
+ bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
+ bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
+
+ n->bi_end_io = bch_cache_read_endio;
+ n->bi_private = &s->cl;
+
+ /*
+ * The bucket we're reading from might be reused while our bio
+ * is in flight, and we could then end up reading the wrong
+ * data.
+ *
+ * We guard against this by checking (in cache_read_endio()) if
+ * the pointer is stale again; if so, we treat it as an error
+ * and reread from the backing device (but we don't pass that
+ * error up anywhere).
+ */
+
+ __bch_submit_bbio(n, b->c);
+ return n == bio ? MAP_DONE : MAP_CONTINUE;
+}
+
+static void cache_lookup(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, iop.cl);
+ struct bio *bio = &s->bio.bio;
+ struct cached_dev *dc;
+ int ret;
+
+ bch_btree_op_init(&s->op, -1);
+
+ ret = bch_btree_map_keys(&s->op, s->iop.c,
+ &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
+ cache_lookup_fn, MAP_END_KEY);
+ if (ret == -EAGAIN) {
+ continue_at(cl, cache_lookup, bcache_wq);
+ return;
+ }
+
+ /*
+ * We might meet err when searching the btree, If that happens, we will
+ * get negative ret, in this scenario we should not recover data from
+ * backing device (when cache device is dirty) because we don't know
+ * whether bkeys the read request covered are all clean.
+ *
+ * And after that happened, s->iop.status is still its initial value
+ * before we submit s->bio.bio
+ */
+ if (ret < 0) {
+ BUG_ON(ret == -EINTR);
+ if (s->d && s->d->c &&
+ !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
+ dc = container_of(s->d, struct cached_dev, disk);
+ if (dc && atomic_read(&dc->has_dirty))
+ s->recoverable = false;
+ }
+ if (!s->iop.status)
+ s->iop.status = BLK_STS_IOERR;
+ }
+
+ closure_return(cl);
+}
+
+/* Common code for the make_request functions */
+
+static void request_endio(struct bio *bio)
+{
+ struct closure *cl = bio->bi_private;
+
+ if (bio->bi_status) {
+ struct search *s = container_of(cl, struct search, cl);
+
+ s->iop.status = bio->bi_status;
+ /* Only cache read errors are recoverable */
+ s->recoverable = false;
+ }
+
+ bio_put(bio);
+ closure_put(cl);
+}
+
+static void backing_request_endio(struct bio *bio)
+{
+ struct closure *cl = bio->bi_private;
+
+ if (bio->bi_status) {
+ struct search *s = container_of(cl, struct search, cl);
+ struct cached_dev *dc = container_of(s->d,
+ struct cached_dev, disk);
+ /*
+ * If a bio has REQ_PREFLUSH for writeback mode, it is
+ * speically assembled in cached_dev_write() for a non-zero
+ * write request which has REQ_PREFLUSH. we don't set
+ * s->iop.status by this failure, the status will be decided
+ * by result of bch_data_insert() operation.
+ */
+ if (unlikely(s->iop.writeback &&
+ bio->bi_opf & REQ_PREFLUSH)) {
+ pr_err("Can't flush %pg: returned bi_status %i\n",
+ dc->bdev, bio->bi_status);
+ } else {
+ /* set to orig_bio->bi_status in bio_complete() */
+ s->iop.status = bio->bi_status;
+ }
+ s->recoverable = false;
+ /* should count I/O error for backing device here */
+ bch_count_backing_io_errors(dc, bio);
+ }
+
+ bio_put(bio);
+ closure_put(cl);
+}
+
+static void bio_complete(struct search *s)
+{
+ if (s->orig_bio) {
+ /* Count on bcache device */
+ bio_end_io_acct_remapped(s->orig_bio, s->start_time,
+ s->orig_bdev);
+ trace_bcache_request_end(s->d, s->orig_bio);
+ s->orig_bio->bi_status = s->iop.status;
+ bio_endio(s->orig_bio);
+ s->orig_bio = NULL;
+ }
+}
+
+static void do_bio_hook(struct search *s,
+ struct bio *orig_bio,
+ bio_end_io_t *end_io_fn)
+{
+ struct bio *bio = &s->bio.bio;
+
+ bio_init_clone(orig_bio->bi_bdev, bio, orig_bio, GFP_NOIO);
+ /*
+ * bi_end_io can be set separately somewhere else, e.g. the
+ * variants in,
+ * - cache_bio->bi_end_io from cached_dev_cache_miss()
+ * - n->bi_end_io from cache_lookup_fn()
+ */
+ bio->bi_end_io = end_io_fn;
+ bio->bi_private = &s->cl;
+
+ bio_cnt_set(bio, 3);
+}
+
+static void search_free(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+
+ atomic_dec(&s->iop.c->search_inflight);
+
+ if (s->iop.bio)
+ bio_put(s->iop.bio);
+
+ bio_complete(s);
+ closure_debug_destroy(cl);
+ mempool_free(s, &s->iop.c->search);
+}
+
+static inline struct search *search_alloc(struct bio *bio,
+ struct bcache_device *d, struct block_device *orig_bdev,
+ unsigned long start_time)
+{
+ struct search *s;
+
+ s = mempool_alloc(&d->c->search, GFP_NOIO);
+
+ closure_init(&s->cl, NULL);
+ do_bio_hook(s, bio, request_endio);
+ atomic_inc(&d->c->search_inflight);
+
+ s->orig_bio = bio;
+ s->cache_miss = NULL;
+ s->cache_missed = 0;
+ s->d = d;
+ s->recoverable = 1;
+ s->write = op_is_write(bio_op(bio));
+ s->read_dirty_data = 0;
+ /* Count on the bcache device */
+ s->orig_bdev = orig_bdev;
+ s->start_time = start_time;
+ s->iop.c = d->c;
+ s->iop.bio = NULL;
+ s->iop.inode = d->id;
+ s->iop.write_point = hash_long((unsigned long) current, 16);
+ s->iop.write_prio = 0;
+ s->iop.status = 0;
+ s->iop.flags = 0;
+ s->iop.flush_journal = op_is_flush(bio->bi_opf);
+ s->iop.wq = bcache_wq;
+
+ return s;
+}
+
+/* Cached devices */
+
+static void cached_dev_bio_complete(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+
+ cached_dev_put(dc);
+ search_free(cl);
+}
+
+/* Process reads */
+
+static void cached_dev_read_error_done(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+
+ if (s->iop.replace_collision)
+ bch_mark_cache_miss_collision(s->iop.c, s->d);
+
+ if (s->iop.bio)
+ bio_free_pages(s->iop.bio);
+
+ cached_dev_bio_complete(cl);
+}
+
+static void cached_dev_read_error(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ struct bio *bio = &s->bio.bio;
+
+ /*
+ * If read request hit dirty data (s->read_dirty_data is true),
+ * then recovery a failed read request from cached device may
+ * get a stale data back. So read failure recovery is only
+ * permitted when read request hit clean data in cache device,
+ * or when cache read race happened.
+ */
+ if (s->recoverable && !s->read_dirty_data) {
+ /* Retry from the backing device: */
+ trace_bcache_read_retry(s->orig_bio);
+
+ s->iop.status = 0;
+ do_bio_hook(s, s->orig_bio, backing_request_endio);
+
+ /* XXX: invalidate cache */
+
+ /* I/O request sent to backing device */
+ closure_bio_submit(s->iop.c, bio, cl);
+ }
+
+ continue_at(cl, cached_dev_read_error_done, NULL);
+}
+
+static void cached_dev_cache_miss_done(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ struct bcache_device *d = s->d;
+
+ if (s->iop.replace_collision)
+ bch_mark_cache_miss_collision(s->iop.c, s->d);
+
+ if (s->iop.bio)
+ bio_free_pages(s->iop.bio);
+
+ cached_dev_bio_complete(cl);
+ closure_put(&d->cl);
+}
+
+static void cached_dev_read_done(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+
+ /*
+ * We had a cache miss; cache_bio now contains data ready to be inserted
+ * into the cache.
+ *
+ * First, we copy the data we just read from cache_bio's bounce buffers
+ * to the buffers the original bio pointed to:
+ */
+
+ if (s->iop.bio) {
+ bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ);
+ s->iop.bio->bi_iter.bi_sector =
+ s->cache_miss->bi_iter.bi_sector;
+ s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
+ bio_clone_blkg_association(s->iop.bio, s->cache_miss);
+ bch_bio_map(s->iop.bio, NULL);
+
+ bio_copy_data(s->cache_miss, s->iop.bio);
+
+ bio_put(s->cache_miss);
+ s->cache_miss = NULL;
+ }
+
+ if (verify(dc) && s->recoverable && !s->read_dirty_data)
+ bch_data_verify(dc, s->orig_bio);
+
+ closure_get(&dc->disk.cl);
+ bio_complete(s);
+
+ if (s->iop.bio &&
+ !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
+ BUG_ON(!s->iop.replace);
+ closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
+ }
+
+ continue_at(cl, cached_dev_cache_miss_done, NULL);
+}
+
+static void cached_dev_read_done_bh(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+
+ bch_mark_cache_accounting(s->iop.c, s->d,
+ !s->cache_missed, s->iop.bypass);
+ trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
+
+ if (s->iop.status)
+ continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
+ else if (s->iop.bio || verify(dc))
+ continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
+ else
+ continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
+}
+
+static int cached_dev_cache_miss(struct btree *b, struct search *s,
+ struct bio *bio, unsigned int sectors)
+{
+ int ret = MAP_CONTINUE;
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ struct bio *miss, *cache_bio;
+ unsigned int size_limit;
+
+ s->cache_missed = 1;
+
+ if (s->cache_miss || s->iop.bypass) {
+ miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
+ ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
+ goto out_submit;
+ }
+
+ /* Limitation for valid replace key size and cache_bio bvecs number */
+ size_limit = min_t(unsigned int, BIO_MAX_VECS * PAGE_SECTORS,
+ (1 << KEY_SIZE_BITS) - 1);
+ s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio));
+
+ s->iop.replace_key = KEY(s->iop.inode,
+ bio->bi_iter.bi_sector + s->insert_bio_sectors,
+ s->insert_bio_sectors);
+
+ ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
+ if (ret)
+ return ret;
+
+ s->iop.replace = true;
+
+ miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO,
+ &s->d->bio_split);
+
+ /* btree_search_recurse()'s btree iterator is no good anymore */
+ ret = miss == bio ? MAP_DONE : -EINTR;
+
+ cache_bio = bio_alloc_bioset(miss->bi_bdev,
+ DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
+ 0, GFP_NOWAIT, &dc->disk.bio_split);
+ if (!cache_bio)
+ goto out_submit;
+
+ cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
+ cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
+
+ cache_bio->bi_end_io = backing_request_endio;
+ cache_bio->bi_private = &s->cl;
+
+ bch_bio_map(cache_bio, NULL);
+ if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
+ goto out_put;
+
+ s->cache_miss = miss;
+ s->iop.bio = cache_bio;
+ bio_get(cache_bio);
+ /* I/O request sent to backing device */
+ closure_bio_submit(s->iop.c, cache_bio, &s->cl);
+
+ return ret;
+out_put:
+ bio_put(cache_bio);
+out_submit:
+ miss->bi_end_io = backing_request_endio;
+ miss->bi_private = &s->cl;
+ /* I/O request sent to backing device */
+ closure_bio_submit(s->iop.c, miss, &s->cl);
+ return ret;
+}
+
+static void cached_dev_read(struct cached_dev *dc, struct search *s)
+{
+ struct closure *cl = &s->cl;
+
+ closure_call(&s->iop.cl, cache_lookup, NULL, cl);
+ continue_at(cl, cached_dev_read_done_bh, NULL);
+}
+
+/* Process writes */
+
+static void cached_dev_write_complete(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+
+ up_read_non_owner(&dc->writeback_lock);
+ cached_dev_bio_complete(cl);
+}
+
+static void cached_dev_write(struct cached_dev *dc, struct search *s)
+{
+ struct closure *cl = &s->cl;
+ struct bio *bio = &s->bio.bio;
+ struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
+ struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
+
+ bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
+
+ down_read_non_owner(&dc->writeback_lock);
+ if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
+ /*
+ * We overlap with some dirty data undergoing background
+ * writeback, force this write to writeback
+ */
+ s->iop.bypass = false;
+ s->iop.writeback = true;
+ }
+
+ /*
+ * Discards aren't _required_ to do anything, so skipping if
+ * check_overlapping returned true is ok
+ *
+ * But check_overlapping drops dirty keys for which io hasn't started,
+ * so we still want to call it.
+ */
+ if (bio_op(bio) == REQ_OP_DISCARD)
+ s->iop.bypass = true;
+
+ if (should_writeback(dc, s->orig_bio,
+ cache_mode(dc),
+ s->iop.bypass)) {
+ s->iop.bypass = false;
+ s->iop.writeback = true;
+ }
+
+ if (s->iop.bypass) {
+ s->iop.bio = s->orig_bio;
+ bio_get(s->iop.bio);
+
+ if (bio_op(bio) == REQ_OP_DISCARD &&
+ !bdev_max_discard_sectors(dc->bdev))
+ goto insert_data;
+
+ /* I/O request sent to backing device */
+ bio->bi_end_io = backing_request_endio;
+ closure_bio_submit(s->iop.c, bio, cl);
+
+ } else if (s->iop.writeback) {
+ bch_writeback_add(dc);
+ s->iop.bio = bio;
+
+ if (bio->bi_opf & REQ_PREFLUSH) {
+ /*
+ * Also need to send a flush to the backing
+ * device.
+ */
+ struct bio *flush;
+
+ flush = bio_alloc_bioset(bio->bi_bdev, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH,
+ GFP_NOIO, &dc->disk.bio_split);
+ if (!flush) {
+ s->iop.status = BLK_STS_RESOURCE;
+ goto insert_data;
+ }
+ flush->bi_end_io = backing_request_endio;
+ flush->bi_private = cl;
+ /* I/O request sent to backing device */
+ closure_bio_submit(s->iop.c, flush, cl);
+ }
+ } else {
+ s->iop.bio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
+ &dc->disk.bio_split);
+ /* I/O request sent to backing device */
+ bio->bi_end_io = backing_request_endio;
+ closure_bio_submit(s->iop.c, bio, cl);
+ }
+
+insert_data:
+ closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
+ continue_at(cl, cached_dev_write_complete, NULL);
+}
+
+static void cached_dev_nodata(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ struct bio *bio = &s->bio.bio;
+
+ if (s->iop.flush_journal)
+ bch_journal_meta(s->iop.c, cl);
+
+ /* If it's a flush, we send the flush to the backing device too */
+ bio->bi_end_io = backing_request_endio;
+ closure_bio_submit(s->iop.c, bio, cl);
+
+ continue_at(cl, cached_dev_bio_complete, NULL);
+}
+
+struct detached_dev_io_private {
+ struct bcache_device *d;
+ unsigned long start_time;
+ bio_end_io_t *bi_end_io;
+ void *bi_private;
+ struct block_device *orig_bdev;
+};
+
+static void detached_dev_end_io(struct bio *bio)
+{
+ struct detached_dev_io_private *ddip;
+
+ ddip = bio->bi_private;
+ bio->bi_end_io = ddip->bi_end_io;
+ bio->bi_private = ddip->bi_private;
+
+ /* Count on the bcache device */
+ bio_end_io_acct_remapped(bio, ddip->start_time, ddip->orig_bdev);
+
+ if (bio->bi_status) {
+ struct cached_dev *dc = container_of(ddip->d,
+ struct cached_dev, disk);
+ /* should count I/O error for backing device here */
+ bch_count_backing_io_errors(dc, bio);
+ }
+
+ kfree(ddip);
+ bio->bi_end_io(bio);
+}
+
+static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
+ struct block_device *orig_bdev, unsigned long start_time)
+{
+ struct detached_dev_io_private *ddip;
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+
+ /*
+ * no need to call closure_get(&dc->disk.cl),
+ * because upper layer had already opened bcache device,
+ * which would call closure_get(&dc->disk.cl)
+ */
+ ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
+ if (!ddip) {
+ bio->bi_status = BLK_STS_RESOURCE;
+ bio->bi_end_io(bio);
+ return;
+ }
+
+ ddip->d = d;
+ /* Count on the bcache device */
+ ddip->orig_bdev = orig_bdev;
+ ddip->start_time = start_time;
+ ddip->bi_end_io = bio->bi_end_io;
+ ddip->bi_private = bio->bi_private;
+ bio->bi_end_io = detached_dev_end_io;
+ bio->bi_private = ddip;
+
+ if ((bio_op(bio) == REQ_OP_DISCARD) &&
+ !bdev_max_discard_sectors(dc->bdev))
+ bio->bi_end_io(bio);
+ else
+ submit_bio_noacct(bio);
+}
+
+static void quit_max_writeback_rate(struct cache_set *c,
+ struct cached_dev *this_dc)
+{
+ int i;
+ struct bcache_device *d;
+ struct cached_dev *dc;
+
+ /*
+ * mutex bch_register_lock may compete with other parallel requesters,
+ * or attach/detach operations on other backing device. Waiting to
+ * the mutex lock may increase I/O request latency for seconds or more.
+ * To avoid such situation, if mutext_trylock() failed, only writeback
+ * rate of current cached device is set to 1, and __update_write_back()
+ * will decide writeback rate of other cached devices (remember now
+ * c->idle_counter is 0 already).
+ */
+ if (mutex_trylock(&bch_register_lock)) {
+ for (i = 0; i < c->devices_max_used; i++) {
+ if (!c->devices[i])
+ continue;
+
+ if (UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+
+ d = c->devices[i];
+ dc = container_of(d, struct cached_dev, disk);
+ /*
+ * set writeback rate to default minimum value,
+ * then let update_writeback_rate() to decide the
+ * upcoming rate.
+ */
+ atomic_long_set(&dc->writeback_rate.rate, 1);
+ }
+ mutex_unlock(&bch_register_lock);
+ } else
+ atomic_long_set(&this_dc->writeback_rate.rate, 1);
+}
+
+/* Cached devices - read & write stuff */
+
+void cached_dev_submit_bio(struct bio *bio)
+{
+ struct search *s;
+ struct block_device *orig_bdev = bio->bi_bdev;
+ struct bcache_device *d = orig_bdev->bd_disk->private_data;
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+ unsigned long start_time;
+ int rw = bio_data_dir(bio);
+
+ if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
+ dc->io_disable)) {
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
+ return;
+ }
+
+ if (likely(d->c)) {
+ if (atomic_read(&d->c->idle_counter))
+ atomic_set(&d->c->idle_counter, 0);
+ /*
+ * If at_max_writeback_rate of cache set is true and new I/O
+ * comes, quit max writeback rate of all cached devices
+ * attached to this cache set, and set at_max_writeback_rate
+ * to false.
+ */
+ if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
+ atomic_set(&d->c->at_max_writeback_rate, 0);
+ quit_max_writeback_rate(d->c, dc);
+ }
+ }
+
+ start_time = bio_start_io_acct(bio);
+
+ bio_set_dev(bio, dc->bdev);
+ bio->bi_iter.bi_sector += dc->sb.data_offset;
+
+ if (cached_dev_get(dc)) {
+ s = search_alloc(bio, d, orig_bdev, start_time);
+ trace_bcache_request_start(s->d, bio);
+
+ if (!bio->bi_iter.bi_size) {
+ /*
+ * can't call bch_journal_meta from under
+ * submit_bio_noacct
+ */
+ continue_at_nobarrier(&s->cl,
+ cached_dev_nodata,
+ bcache_wq);
+ } else {
+ s->iop.bypass = check_should_bypass(dc, bio);
+
+ if (rw)
+ cached_dev_write(dc, s);
+ else
+ cached_dev_read(dc, s);
+ }
+ } else
+ /* I/O request sent to backing device */
+ detached_dev_do_request(d, bio, orig_bdev, start_time);
+}
+
+static int cached_dev_ioctl(struct bcache_device *d, blk_mode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+
+ if (dc->io_disable)
+ return -EIO;
+ if (!dc->bdev->bd_disk->fops->ioctl)
+ return -ENOTTY;
+ return dc->bdev->bd_disk->fops->ioctl(dc->bdev, mode, cmd, arg);
+}
+
+void bch_cached_dev_request_init(struct cached_dev *dc)
+{
+ dc->disk.cache_miss = cached_dev_cache_miss;
+ dc->disk.ioctl = cached_dev_ioctl;
+}
+
+/* Flash backed devices */
+
+static int flash_dev_cache_miss(struct btree *b, struct search *s,
+ struct bio *bio, unsigned int sectors)
+{
+ unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
+
+ swap(bio->bi_iter.bi_size, bytes);
+ zero_fill_bio(bio);
+ swap(bio->bi_iter.bi_size, bytes);
+
+ bio_advance(bio, bytes);
+
+ if (!bio->bi_iter.bi_size)
+ return MAP_DONE;
+
+ return MAP_CONTINUE;
+}
+
+static void flash_dev_nodata(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+
+ if (s->iop.flush_journal)
+ bch_journal_meta(s->iop.c, cl);
+
+ continue_at(cl, search_free, NULL);
+}
+
+void flash_dev_submit_bio(struct bio *bio)
+{
+ struct search *s;
+ struct closure *cl;
+ struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
+
+ if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
+ return;
+ }
+
+ s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio));
+ cl = &s->cl;
+ bio = &s->bio.bio;
+
+ trace_bcache_request_start(s->d, bio);
+
+ if (!bio->bi_iter.bi_size) {
+ /*
+ * can't call bch_journal_meta from under submit_bio_noacct
+ */
+ continue_at_nobarrier(&s->cl,
+ flash_dev_nodata,
+ bcache_wq);
+ return;
+ } else if (bio_data_dir(bio)) {
+ bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
+ &KEY(d->id, bio->bi_iter.bi_sector, 0),
+ &KEY(d->id, bio_end_sector(bio), 0));
+
+ s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
+ s->iop.writeback = true;
+ s->iop.bio = bio;
+
+ closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
+ } else {
+ closure_call(&s->iop.cl, cache_lookup, NULL, cl);
+ }
+
+ continue_at(cl, search_free, NULL);
+}
+
+static int flash_dev_ioctl(struct bcache_device *d, blk_mode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ return -ENOTTY;
+}
+
+void bch_flash_dev_request_init(struct bcache_device *d)
+{
+ d->cache_miss = flash_dev_cache_miss;
+ d->ioctl = flash_dev_ioctl;
+}
+
+void bch_request_exit(void)
+{
+ kmem_cache_destroy(bch_search_cache);
+}
+
+int __init bch_request_init(void)
+{
+ bch_search_cache = KMEM_CACHE(search, 0);
+ if (!bch_search_cache)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
new file mode 100644
index 0000000000..38ab4856ea
--- /dev/null
+++ b/drivers/md/bcache/request.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHE_REQUEST_H_
+#define _BCACHE_REQUEST_H_
+
+struct data_insert_op {
+ struct closure cl;
+ struct cache_set *c;
+ struct bio *bio;
+ struct workqueue_struct *wq;
+
+ unsigned int inode;
+ uint16_t write_point;
+ uint16_t write_prio;
+ blk_status_t status;
+
+ union {
+ uint16_t flags;
+
+ struct {
+ unsigned int bypass:1;
+ unsigned int writeback:1;
+ unsigned int flush_journal:1;
+ unsigned int csum:1;
+
+ unsigned int replace:1;
+ unsigned int replace_collision:1;
+
+ unsigned int insert_data_done:1;
+ };
+ };
+
+ struct keylist insert_keys;
+ BKEY_PADDED(replace_key);
+};
+
+unsigned int bch_get_congested(const struct cache_set *c);
+void bch_data_insert(struct closure *cl);
+
+void bch_cached_dev_request_init(struct cached_dev *dc);
+void cached_dev_submit_bio(struct bio *bio);
+
+void bch_flash_dev_request_init(struct bcache_device *d);
+void flash_dev_submit_bio(struct bio *bio);
+
+extern struct kmem_cache *bch_search_cache;
+
+#endif /* _BCACHE_REQUEST_H_ */
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
new file mode 100644
index 0000000000..68b0221603
--- /dev/null
+++ b/drivers/md/bcache/stats.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bcache stats code
+ *
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "stats.h"
+#include "btree.h"
+#include "sysfs.h"
+
+/*
+ * We keep absolute totals of various statistics, and addionally a set of three
+ * rolling averages.
+ *
+ * Every so often, a timer goes off and rescales the rolling averages.
+ * accounting_rescale[] is how many times the timer has to go off before we
+ * rescale each set of numbers; that gets us half lives of 5 minutes, one hour,
+ * and one day.
+ *
+ * accounting_delay is how often the timer goes off - 22 times in 5 minutes,
+ * and accounting_weight is what we use to rescale:
+ *
+ * pow(31 / 32, 22) ~= 1/2
+ *
+ * So that we don't have to increment each set of numbers every time we (say)
+ * get a cache hit, we increment a single atomic_t in acc->collector, and when
+ * the rescale function runs it resets the atomic counter to 0 and adds its
+ * old value to each of the exported numbers.
+ *
+ * To reduce rounding error, the numbers in struct cache_stats are all
+ * stored left shifted by 16, and scaled back in the sysfs show() function.
+ */
+
+static const unsigned int DAY_RESCALE = 288;
+static const unsigned int HOUR_RESCALE = 12;
+static const unsigned int FIVE_MINUTE_RESCALE = 1;
+static const unsigned int accounting_delay = (HZ * 300) / 22;
+static const unsigned int accounting_weight = 32;
+
+/* sysfs reading/writing */
+
+read_attribute(cache_hits);
+read_attribute(cache_misses);
+read_attribute(cache_bypass_hits);
+read_attribute(cache_bypass_misses);
+read_attribute(cache_hit_ratio);
+read_attribute(cache_miss_collisions);
+read_attribute(bypassed);
+
+SHOW(bch_stats)
+{
+ struct cache_stats *s =
+ container_of(kobj, struct cache_stats, kobj);
+#define var(stat) (s->stat >> 16)
+ var_print(cache_hits);
+ var_print(cache_misses);
+ var_print(cache_bypass_hits);
+ var_print(cache_bypass_misses);
+
+ sysfs_print(cache_hit_ratio,
+ DIV_SAFE(var(cache_hits) * 100,
+ var(cache_hits) + var(cache_misses)));
+
+ var_print(cache_miss_collisions);
+ sysfs_hprint(bypassed, var(sectors_bypassed) << 9);
+#undef var
+ return 0;
+}
+
+STORE(bch_stats)
+{
+ return size;
+}
+
+static void bch_stats_release(struct kobject *k)
+{
+}
+
+static struct attribute *bch_stats_attrs[] = {
+ &sysfs_cache_hits,
+ &sysfs_cache_misses,
+ &sysfs_cache_bypass_hits,
+ &sysfs_cache_bypass_misses,
+ &sysfs_cache_hit_ratio,
+ &sysfs_cache_miss_collisions,
+ &sysfs_bypassed,
+ NULL
+};
+ATTRIBUTE_GROUPS(bch_stats);
+static KTYPE(bch_stats);
+
+int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
+ struct kobject *parent)
+{
+ int ret = kobject_add(&acc->total.kobj, parent,
+ "stats_total");
+ ret = ret ?: kobject_add(&acc->five_minute.kobj, parent,
+ "stats_five_minute");
+ ret = ret ?: kobject_add(&acc->hour.kobj, parent,
+ "stats_hour");
+ ret = ret ?: kobject_add(&acc->day.kobj, parent,
+ "stats_day");
+ return ret;
+}
+
+void bch_cache_accounting_clear(struct cache_accounting *acc)
+{
+ acc->total.cache_hits = 0;
+ acc->total.cache_misses = 0;
+ acc->total.cache_bypass_hits = 0;
+ acc->total.cache_bypass_misses = 0;
+ acc->total.cache_miss_collisions = 0;
+ acc->total.sectors_bypassed = 0;
+}
+
+void bch_cache_accounting_destroy(struct cache_accounting *acc)
+{
+ kobject_put(&acc->total.kobj);
+ kobject_put(&acc->five_minute.kobj);
+ kobject_put(&acc->hour.kobj);
+ kobject_put(&acc->day.kobj);
+
+ atomic_set(&acc->closing, 1);
+ if (del_timer_sync(&acc->timer))
+ closure_return(&acc->cl);
+}
+
+/* EWMA scaling */
+
+static void scale_stat(unsigned long *stat)
+{
+ *stat = ewma_add(*stat, 0, accounting_weight, 0);
+}
+
+static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
+{
+ if (++stats->rescale == rescale_at) {
+ stats->rescale = 0;
+ scale_stat(&stats->cache_hits);
+ scale_stat(&stats->cache_misses);
+ scale_stat(&stats->cache_bypass_hits);
+ scale_stat(&stats->cache_bypass_misses);
+ scale_stat(&stats->cache_miss_collisions);
+ scale_stat(&stats->sectors_bypassed);
+ }
+}
+
+static void scale_accounting(struct timer_list *t)
+{
+ struct cache_accounting *acc = from_timer(acc, t, timer);
+
+#define move_stat(name) do { \
+ unsigned int t = atomic_xchg(&acc->collector.name, 0); \
+ t <<= 16; \
+ acc->five_minute.name += t; \
+ acc->hour.name += t; \
+ acc->day.name += t; \
+ acc->total.name += t; \
+} while (0)
+
+ move_stat(cache_hits);
+ move_stat(cache_misses);
+ move_stat(cache_bypass_hits);
+ move_stat(cache_bypass_misses);
+ move_stat(cache_miss_collisions);
+ move_stat(sectors_bypassed);
+
+ scale_stats(&acc->total, 0);
+ scale_stats(&acc->day, DAY_RESCALE);
+ scale_stats(&acc->hour, HOUR_RESCALE);
+ scale_stats(&acc->five_minute, FIVE_MINUTE_RESCALE);
+
+ acc->timer.expires += accounting_delay;
+
+ if (!atomic_read(&acc->closing))
+ add_timer(&acc->timer);
+ else
+ closure_return(&acc->cl);
+}
+
+static void mark_cache_stats(struct cache_stat_collector *stats,
+ bool hit, bool bypass)
+{
+ if (!bypass)
+ if (hit)
+ atomic_inc(&stats->cache_hits);
+ else
+ atomic_inc(&stats->cache_misses);
+ else
+ if (hit)
+ atomic_inc(&stats->cache_bypass_hits);
+ else
+ atomic_inc(&stats->cache_bypass_misses);
+}
+
+void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
+ bool hit, bool bypass)
+{
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+
+ mark_cache_stats(&dc->accounting.collector, hit, bypass);
+ mark_cache_stats(&c->accounting.collector, hit, bypass);
+}
+
+void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
+{
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+
+ atomic_inc(&dc->accounting.collector.cache_miss_collisions);
+ atomic_inc(&c->accounting.collector.cache_miss_collisions);
+}
+
+void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc,
+ int sectors)
+{
+ atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
+ atomic_add(sectors, &c->accounting.collector.sectors_bypassed);
+}
+
+void bch_cache_accounting_init(struct cache_accounting *acc,
+ struct closure *parent)
+{
+ kobject_init(&acc->total.kobj, &bch_stats_ktype);
+ kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
+ kobject_init(&acc->hour.kobj, &bch_stats_ktype);
+ kobject_init(&acc->day.kobj, &bch_stats_ktype);
+
+ closure_init(&acc->cl, parent);
+ timer_setup(&acc->timer, scale_accounting, 0);
+ acc->timer.expires = jiffies + accounting_delay;
+ add_timer(&acc->timer);
+}
diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
new file mode 100644
index 0000000000..21b445f8af
--- /dev/null
+++ b/drivers/md/bcache/stats.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHE_STATS_H_
+#define _BCACHE_STATS_H_
+
+struct cache_stat_collector {
+ atomic_t cache_hits;
+ atomic_t cache_misses;
+ atomic_t cache_bypass_hits;
+ atomic_t cache_bypass_misses;
+ atomic_t cache_miss_collisions;
+ atomic_t sectors_bypassed;
+};
+
+struct cache_stats {
+ struct kobject kobj;
+
+ unsigned long cache_hits;
+ unsigned long cache_misses;
+ unsigned long cache_bypass_hits;
+ unsigned long cache_bypass_misses;
+ unsigned long cache_miss_collisions;
+ unsigned long sectors_bypassed;
+
+ unsigned int rescale;
+};
+
+struct cache_accounting {
+ struct closure cl;
+ struct timer_list timer;
+ atomic_t closing;
+
+ struct cache_stat_collector collector;
+
+ struct cache_stats total;
+ struct cache_stats five_minute;
+ struct cache_stats hour;
+ struct cache_stats day;
+};
+
+struct cache_set;
+struct cached_dev;
+struct bcache_device;
+
+void bch_cache_accounting_init(struct cache_accounting *acc,
+ struct closure *parent);
+
+int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
+ struct kobject *parent);
+
+void bch_cache_accounting_clear(struct cache_accounting *acc);
+
+void bch_cache_accounting_destroy(struct cache_accounting *acc);
+
+void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
+ bool hit, bool bypass);
+void bch_mark_cache_miss_collision(struct cache_set *c,
+ struct bcache_device *d);
+void bch_mark_sectors_bypassed(struct cache_set *c,
+ struct cached_dev *dc,
+ int sectors);
+
+#endif /* _BCACHE_STATS_H_ */
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
new file mode 100644
index 0000000000..1e677af385
--- /dev/null
+++ b/drivers/md/bcache/super.c
@@ -0,0 +1,2934 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bcache setup/teardown code, and some metadata io - read a superblock and
+ * figure out what to do with it.
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "extents.h"
+#include "request.h"
+#include "writeback.h"
+#include "features.h"
+
+#include <linux/blkdev.h>
+#include <linux/pagemap.h>
+#include <linux/debugfs.h>
+#include <linux/idr.h>
+#include <linux/kthread.h>
+#include <linux/workqueue.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/reboot.h>
+#include <linux/sysfs.h>
+
+unsigned int bch_cutoff_writeback;
+unsigned int bch_cutoff_writeback_sync;
+
+static const char bcache_magic[] = {
+ 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
+ 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
+};
+
+static const char invalid_uuid[] = {
+ 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
+ 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
+};
+
+static struct kobject *bcache_kobj;
+struct mutex bch_register_lock;
+bool bcache_is_reboot;
+LIST_HEAD(bch_cache_sets);
+static LIST_HEAD(uncached_devices);
+
+static int bcache_major;
+static DEFINE_IDA(bcache_device_idx);
+static wait_queue_head_t unregister_wait;
+struct workqueue_struct *bcache_wq;
+struct workqueue_struct *bch_flush_wq;
+struct workqueue_struct *bch_journal_wq;
+
+
+#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
+/* limitation of partitions number on single bcache device */
+#define BCACHE_MINORS 128
+/* limitation of bcache devices number on single system */
+#define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS)
+
+/* Superblock */
+
+static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s)
+{
+ unsigned int bucket_size = le16_to_cpu(s->bucket_size);
+
+ if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
+ if (bch_has_feature_large_bucket(sb)) {
+ unsigned int max, order;
+
+ max = sizeof(unsigned int) * BITS_PER_BYTE - 1;
+ order = le16_to_cpu(s->bucket_size);
+ /*
+ * bcache tool will make sure the overflow won't
+ * happen, an error message here is enough.
+ */
+ if (order > max)
+ pr_err("Bucket size (1 << %u) overflows\n",
+ order);
+ bucket_size = 1 << order;
+ } else if (bch_has_feature_obso_large_bucket(sb)) {
+ bucket_size +=
+ le16_to_cpu(s->obso_bucket_size_hi) << 16;
+ }
+ }
+
+ return bucket_size;
+}
+
+static const char *read_super_common(struct cache_sb *sb, struct block_device *bdev,
+ struct cache_sb_disk *s)
+{
+ const char *err;
+ unsigned int i;
+
+ sb->first_bucket= le16_to_cpu(s->first_bucket);
+ sb->nbuckets = le64_to_cpu(s->nbuckets);
+ sb->bucket_size = get_bucket_size(sb, s);
+
+ sb->nr_in_set = le16_to_cpu(s->nr_in_set);
+ sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
+
+ err = "Too many journal buckets";
+ if (sb->keys > SB_JOURNAL_BUCKETS)
+ goto err;
+
+ err = "Too many buckets";
+ if (sb->nbuckets > LONG_MAX)
+ goto err;
+
+ err = "Not enough buckets";
+ if (sb->nbuckets < 1 << 7)
+ goto err;
+
+ err = "Bad block size (not power of 2)";
+ if (!is_power_of_2(sb->block_size))
+ goto err;
+
+ err = "Bad block size (larger than page size)";
+ if (sb->block_size > PAGE_SECTORS)
+ goto err;
+
+ err = "Bad bucket size (not power of 2)";
+ if (!is_power_of_2(sb->bucket_size))
+ goto err;
+
+ err = "Bad bucket size (smaller than page size)";
+ if (sb->bucket_size < PAGE_SECTORS)
+ goto err;
+
+ err = "Invalid superblock: device too small";
+ if (get_capacity(bdev->bd_disk) <
+ sb->bucket_size * sb->nbuckets)
+ goto err;
+
+ err = "Bad UUID";
+ if (bch_is_zero(sb->set_uuid, 16))
+ goto err;
+
+ err = "Bad cache device number in set";
+ if (!sb->nr_in_set ||
+ sb->nr_in_set <= sb->nr_this_dev ||
+ sb->nr_in_set > MAX_CACHES_PER_SET)
+ goto err;
+
+ err = "Journal buckets not sequential";
+ for (i = 0; i < sb->keys; i++)
+ if (sb->d[i] != sb->first_bucket + i)
+ goto err;
+
+ err = "Too many journal buckets";
+ if (sb->first_bucket + sb->keys > sb->nbuckets)
+ goto err;
+
+ err = "Invalid superblock: first bucket comes before end of super";
+ if (sb->first_bucket * sb->bucket_size < 16)
+ goto err;
+
+ err = NULL;
+err:
+ return err;
+}
+
+
+static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
+ struct cache_sb_disk **res)
+{
+ const char *err;
+ struct cache_sb_disk *s;
+ struct page *page;
+ unsigned int i;
+
+ page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
+ SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
+ if (IS_ERR(page))
+ return "IO error";
+ s = page_address(page) + offset_in_page(SB_OFFSET);
+
+ sb->offset = le64_to_cpu(s->offset);
+ sb->version = le64_to_cpu(s->version);
+
+ memcpy(sb->magic, s->magic, 16);
+ memcpy(sb->uuid, s->uuid, 16);
+ memcpy(sb->set_uuid, s->set_uuid, 16);
+ memcpy(sb->label, s->label, SB_LABEL_SIZE);
+
+ sb->flags = le64_to_cpu(s->flags);
+ sb->seq = le64_to_cpu(s->seq);
+ sb->last_mount = le32_to_cpu(s->last_mount);
+ sb->keys = le16_to_cpu(s->keys);
+
+ for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
+ sb->d[i] = le64_to_cpu(s->d[i]);
+
+ pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u\n",
+ sb->version, sb->flags, sb->seq, sb->keys);
+
+ err = "Not a bcache superblock (bad offset)";
+ if (sb->offset != SB_SECTOR)
+ goto err;
+
+ err = "Not a bcache superblock (bad magic)";
+ if (memcmp(sb->magic, bcache_magic, 16))
+ goto err;
+
+ err = "Bad checksum";
+ if (s->csum != csum_set(s))
+ goto err;
+
+ err = "Bad UUID";
+ if (bch_is_zero(sb->uuid, 16))
+ goto err;
+
+ sb->block_size = le16_to_cpu(s->block_size);
+
+ err = "Superblock block size smaller than device block size";
+ if (sb->block_size << 9 < bdev_logical_block_size(bdev))
+ goto err;
+
+ switch (sb->version) {
+ case BCACHE_SB_VERSION_BDEV:
+ sb->data_offset = BDEV_DATA_START_DEFAULT;
+ break;
+ case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
+ case BCACHE_SB_VERSION_BDEV_WITH_FEATURES:
+ sb->data_offset = le64_to_cpu(s->data_offset);
+
+ err = "Bad data offset";
+ if (sb->data_offset < BDEV_DATA_START_DEFAULT)
+ goto err;
+
+ break;
+ case BCACHE_SB_VERSION_CDEV:
+ case BCACHE_SB_VERSION_CDEV_WITH_UUID:
+ err = read_super_common(sb, bdev, s);
+ if (err)
+ goto err;
+ break;
+ case BCACHE_SB_VERSION_CDEV_WITH_FEATURES:
+ /*
+ * Feature bits are needed in read_super_common(),
+ * convert them firstly.
+ */
+ sb->feature_compat = le64_to_cpu(s->feature_compat);
+ sb->feature_incompat = le64_to_cpu(s->feature_incompat);
+ sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
+
+ /* Check incompatible features */
+ err = "Unsupported compatible feature found";
+ if (bch_has_unknown_compat_features(sb))
+ goto err;
+
+ err = "Unsupported read-only compatible feature found";
+ if (bch_has_unknown_ro_compat_features(sb))
+ goto err;
+
+ err = "Unsupported incompatible feature found";
+ if (bch_has_unknown_incompat_features(sb))
+ goto err;
+
+ err = read_super_common(sb, bdev, s);
+ if (err)
+ goto err;
+ break;
+ default:
+ err = "Unsupported superblock version";
+ goto err;
+ }
+
+ sb->last_mount = (u32)ktime_get_real_seconds();
+ *res = s;
+ return NULL;
+err:
+ put_page(page);
+ return err;
+}
+
+static void write_bdev_super_endio(struct bio *bio)
+{
+ struct cached_dev *dc = bio->bi_private;
+
+ if (bio->bi_status)
+ bch_count_backing_io_errors(dc, bio);
+
+ closure_put(&dc->sb_write);
+}
+
+static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
+ struct bio *bio)
+{
+ unsigned int i;
+
+ bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META;
+ bio->bi_iter.bi_sector = SB_SECTOR;
+ __bio_add_page(bio, virt_to_page(out), SB_SIZE,
+ offset_in_page(out));
+
+ out->offset = cpu_to_le64(sb->offset);
+
+ memcpy(out->uuid, sb->uuid, 16);
+ memcpy(out->set_uuid, sb->set_uuid, 16);
+ memcpy(out->label, sb->label, SB_LABEL_SIZE);
+
+ out->flags = cpu_to_le64(sb->flags);
+ out->seq = cpu_to_le64(sb->seq);
+
+ out->last_mount = cpu_to_le32(sb->last_mount);
+ out->first_bucket = cpu_to_le16(sb->first_bucket);
+ out->keys = cpu_to_le16(sb->keys);
+
+ for (i = 0; i < sb->keys; i++)
+ out->d[i] = cpu_to_le64(sb->d[i]);
+
+ if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
+ out->feature_compat = cpu_to_le64(sb->feature_compat);
+ out->feature_incompat = cpu_to_le64(sb->feature_incompat);
+ out->feature_ro_compat = cpu_to_le64(sb->feature_ro_compat);
+ }
+
+ out->version = cpu_to_le64(sb->version);
+ out->csum = csum_set(out);
+
+ pr_debug("ver %llu, flags %llu, seq %llu\n",
+ sb->version, sb->flags, sb->seq);
+
+ submit_bio(bio);
+}
+
+static void bch_write_bdev_super_unlock(struct closure *cl)
+{
+ struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
+
+ up(&dc->sb_write_mutex);
+}
+
+void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
+{
+ struct closure *cl = &dc->sb_write;
+ struct bio *bio = &dc->sb_bio;
+
+ down(&dc->sb_write_mutex);
+ closure_init(cl, parent);
+
+ bio_init(bio, dc->bdev, dc->sb_bv, 1, 0);
+ bio->bi_end_io = write_bdev_super_endio;
+ bio->bi_private = dc;
+
+ closure_get(cl);
+ /* I/O request sent to backing device */
+ __write_super(&dc->sb, dc->sb_disk, bio);
+
+ closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
+}
+
+static void write_super_endio(struct bio *bio)
+{
+ struct cache *ca = bio->bi_private;
+
+ /* is_read = 0 */
+ bch_count_io_errors(ca, bio->bi_status, 0,
+ "writing superblock");
+ closure_put(&ca->set->sb_write);
+}
+
+static void bcache_write_super_unlock(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, sb_write);
+
+ up(&c->sb_write_mutex);
+}
+
+void bcache_write_super(struct cache_set *c)
+{
+ struct closure *cl = &c->sb_write;
+ struct cache *ca = c->cache;
+ struct bio *bio = &ca->sb_bio;
+ unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
+
+ down(&c->sb_write_mutex);
+ closure_init(cl, &c->cl);
+
+ ca->sb.seq++;
+
+ if (ca->sb.version < version)
+ ca->sb.version = version;
+
+ bio_init(bio, ca->bdev, ca->sb_bv, 1, 0);
+ bio->bi_end_io = write_super_endio;
+ bio->bi_private = ca;
+
+ closure_get(cl);
+ __write_super(&ca->sb, ca->sb_disk, bio);
+
+ closure_return_with_destructor(cl, bcache_write_super_unlock);
+}
+
+/* UUID io */
+
+static void uuid_endio(struct bio *bio)
+{
+ struct closure *cl = bio->bi_private;
+ struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
+
+ cache_set_err_on(bio->bi_status, c, "accessing uuids");
+ bch_bbio_free(bio, c);
+ closure_put(cl);
+}
+
+static void uuid_io_unlock(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
+
+ up(&c->uuid_write_mutex);
+}
+
+static void uuid_io(struct cache_set *c, blk_opf_t opf, struct bkey *k,
+ struct closure *parent)
+{
+ struct closure *cl = &c->uuid_write;
+ struct uuid_entry *u;
+ unsigned int i;
+ char buf[80];
+
+ BUG_ON(!parent);
+ down(&c->uuid_write_mutex);
+ closure_init(cl, parent);
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ struct bio *bio = bch_bbio_alloc(c);
+
+ bio->bi_opf = opf | REQ_SYNC | REQ_META;
+ bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
+
+ bio->bi_end_io = uuid_endio;
+ bio->bi_private = cl;
+ bch_bio_map(bio, c->uuids);
+
+ bch_submit_bbio(bio, c, k, i);
+
+ if ((opf & REQ_OP_MASK) != REQ_OP_WRITE)
+ break;
+ }
+
+ bch_extent_to_text(buf, sizeof(buf), k);
+ pr_debug("%s UUIDs at %s\n", (opf & REQ_OP_MASK) == REQ_OP_WRITE ?
+ "wrote" : "read", buf);
+
+ for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
+ if (!bch_is_zero(u->uuid, 16))
+ pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u\n",
+ u - c->uuids, u->uuid, u->label,
+ u->first_reg, u->last_reg, u->invalidated);
+
+ closure_return_with_destructor(cl, uuid_io_unlock);
+}
+
+static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
+{
+ struct bkey *k = &j->uuid_bucket;
+
+ if (__bch_btree_ptr_invalid(c, k))
+ return "bad uuid pointer";
+
+ bkey_copy(&c->uuid_bucket, k);
+ uuid_io(c, REQ_OP_READ, k, cl);
+
+ if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
+ struct uuid_entry_v0 *u0 = (void *) c->uuids;
+ struct uuid_entry *u1 = (void *) c->uuids;
+ int i;
+
+ closure_sync(cl);
+
+ /*
+ * Since the new uuid entry is bigger than the old, we have to
+ * convert starting at the highest memory address and work down
+ * in order to do it in place
+ */
+
+ for (i = c->nr_uuids - 1;
+ i >= 0;
+ --i) {
+ memcpy(u1[i].uuid, u0[i].uuid, 16);
+ memcpy(u1[i].label, u0[i].label, 32);
+
+ u1[i].first_reg = u0[i].first_reg;
+ u1[i].last_reg = u0[i].last_reg;
+ u1[i].invalidated = u0[i].invalidated;
+
+ u1[i].flags = 0;
+ u1[i].sectors = 0;
+ }
+ }
+
+ return NULL;
+}
+
+static int __uuid_write(struct cache_set *c)
+{
+ BKEY_PADDED(key) k;
+ struct closure cl;
+ struct cache *ca = c->cache;
+ unsigned int size;
+
+ closure_init_stack(&cl);
+ lockdep_assert_held(&bch_register_lock);
+
+ if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
+ return 1;
+
+ size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
+ SET_KEY_SIZE(&k.key, size);
+ uuid_io(c, REQ_OP_WRITE, &k.key, &cl);
+ closure_sync(&cl);
+
+ /* Only one bucket used for uuid write */
+ atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
+
+ bkey_copy(&c->uuid_bucket, &k.key);
+ bkey_put(c, &k.key);
+ return 0;
+}
+
+int bch_uuid_write(struct cache_set *c)
+{
+ int ret = __uuid_write(c);
+
+ if (!ret)
+ bch_journal_meta(c, NULL);
+
+ return ret;
+}
+
+static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
+{
+ struct uuid_entry *u;
+
+ for (u = c->uuids;
+ u < c->uuids + c->nr_uuids; u++)
+ if (!memcmp(u->uuid, uuid, 16))
+ return u;
+
+ return NULL;
+}
+
+static struct uuid_entry *uuid_find_empty(struct cache_set *c)
+{
+ static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
+
+ return uuid_find(c, zero_uuid);
+}
+
+/*
+ * Bucket priorities/gens:
+ *
+ * For each bucket, we store on disk its
+ * 8 bit gen
+ * 16 bit priority
+ *
+ * See alloc.c for an explanation of the gen. The priority is used to implement
+ * lru (and in the future other) cache replacement policies; for most purposes
+ * it's just an opaque integer.
+ *
+ * The gens and the priorities don't have a whole lot to do with each other, and
+ * it's actually the gens that must be written out at specific times - it's no
+ * big deal if the priorities don't get written, if we lose them we just reuse
+ * buckets in suboptimal order.
+ *
+ * On disk they're stored in a packed array, and in as many buckets are required
+ * to fit them all. The buckets we use to store them form a list; the journal
+ * header points to the first bucket, the first bucket points to the second
+ * bucket, et cetera.
+ *
+ * This code is used by the allocation code; periodically (whenever it runs out
+ * of buckets to allocate from) the allocation code will invalidate some
+ * buckets, but it can't use those buckets until their new gens are safely on
+ * disk.
+ */
+
+static void prio_endio(struct bio *bio)
+{
+ struct cache *ca = bio->bi_private;
+
+ cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
+ bch_bbio_free(bio, ca->set);
+ closure_put(&ca->prio);
+}
+
+static void prio_io(struct cache *ca, uint64_t bucket, blk_opf_t opf)
+{
+ struct closure *cl = &ca->prio;
+ struct bio *bio = bch_bbio_alloc(ca->set);
+
+ closure_init_stack(cl);
+
+ bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
+ bio_set_dev(bio, ca->bdev);
+ bio->bi_iter.bi_size = meta_bucket_bytes(&ca->sb);
+
+ bio->bi_end_io = prio_endio;
+ bio->bi_private = ca;
+ bio->bi_opf = opf | REQ_SYNC | REQ_META;
+ bch_bio_map(bio, ca->disk_buckets);
+
+ closure_bio_submit(ca->set, bio, &ca->prio);
+ closure_sync(cl);
+}
+
+int bch_prio_write(struct cache *ca, bool wait)
+{
+ int i;
+ struct bucket *b;
+ struct closure cl;
+
+ pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu\n",
+ fifo_used(&ca->free[RESERVE_PRIO]),
+ fifo_used(&ca->free[RESERVE_NONE]),
+ fifo_used(&ca->free_inc));
+
+ /*
+ * Pre-check if there are enough free buckets. In the non-blocking
+ * scenario it's better to fail early rather than starting to allocate
+ * buckets and do a cleanup later in case of failure.
+ */
+ if (!wait) {
+ size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) +
+ fifo_used(&ca->free[RESERVE_NONE]);
+ if (prio_buckets(ca) > avail)
+ return -ENOMEM;
+ }
+
+ closure_init_stack(&cl);
+
+ lockdep_assert_held(&ca->set->bucket_lock);
+
+ ca->disk_buckets->seq++;
+
+ atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
+ &ca->meta_sectors_written);
+
+ for (i = prio_buckets(ca) - 1; i >= 0; --i) {
+ long bucket;
+ struct prio_set *p = ca->disk_buckets;
+ struct bucket_disk *d = p->data;
+ struct bucket_disk *end = d + prios_per_bucket(ca);
+
+ for (b = ca->buckets + i * prios_per_bucket(ca);
+ b < ca->buckets + ca->sb.nbuckets && d < end;
+ b++, d++) {
+ d->prio = cpu_to_le16(b->prio);
+ d->gen = b->gen;
+ }
+
+ p->next_bucket = ca->prio_buckets[i + 1];
+ p->magic = pset_magic(&ca->sb);
+ p->csum = bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8);
+
+ bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
+ BUG_ON(bucket == -1);
+
+ mutex_unlock(&ca->set->bucket_lock);
+ prio_io(ca, bucket, REQ_OP_WRITE);
+ mutex_lock(&ca->set->bucket_lock);
+
+ ca->prio_buckets[i] = bucket;
+ atomic_dec_bug(&ca->buckets[bucket].pin);
+ }
+
+ mutex_unlock(&ca->set->bucket_lock);
+
+ bch_journal_meta(ca->set, &cl);
+ closure_sync(&cl);
+
+ mutex_lock(&ca->set->bucket_lock);
+
+ /*
+ * Don't want the old priorities to get garbage collected until after we
+ * finish writing the new ones, and they're journalled
+ */
+ for (i = 0; i < prio_buckets(ca); i++) {
+ if (ca->prio_last_buckets[i])
+ __bch_bucket_free(ca,
+ &ca->buckets[ca->prio_last_buckets[i]]);
+
+ ca->prio_last_buckets[i] = ca->prio_buckets[i];
+ }
+ return 0;
+}
+
+static int prio_read(struct cache *ca, uint64_t bucket)
+{
+ struct prio_set *p = ca->disk_buckets;
+ struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
+ struct bucket *b;
+ unsigned int bucket_nr = 0;
+ int ret = -EIO;
+
+ for (b = ca->buckets;
+ b < ca->buckets + ca->sb.nbuckets;
+ b++, d++) {
+ if (d == end) {
+ ca->prio_buckets[bucket_nr] = bucket;
+ ca->prio_last_buckets[bucket_nr] = bucket;
+ bucket_nr++;
+
+ prio_io(ca, bucket, REQ_OP_READ);
+
+ if (p->csum !=
+ bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) {
+ pr_warn("bad csum reading priorities\n");
+ goto out;
+ }
+
+ if (p->magic != pset_magic(&ca->sb)) {
+ pr_warn("bad magic reading priorities\n");
+ goto out;
+ }
+
+ bucket = p->next_bucket;
+ d = p->data;
+ }
+
+ b->prio = le16_to_cpu(d->prio);
+ b->gen = b->last_gc = d->gen;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+/* Bcache device */
+
+static int open_dev(struct gendisk *disk, blk_mode_t mode)
+{
+ struct bcache_device *d = disk->private_data;
+
+ if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
+ return -ENXIO;
+
+ closure_get(&d->cl);
+ return 0;
+}
+
+static void release_dev(struct gendisk *b)
+{
+ struct bcache_device *d = b->private_data;
+
+ closure_put(&d->cl);
+}
+
+static int ioctl_dev(struct block_device *b, blk_mode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct bcache_device *d = b->bd_disk->private_data;
+
+ return d->ioctl(d, mode, cmd, arg);
+}
+
+static const struct block_device_operations bcache_cached_ops = {
+ .submit_bio = cached_dev_submit_bio,
+ .open = open_dev,
+ .release = release_dev,
+ .ioctl = ioctl_dev,
+ .owner = THIS_MODULE,
+};
+
+static const struct block_device_operations bcache_flash_ops = {
+ .submit_bio = flash_dev_submit_bio,
+ .open = open_dev,
+ .release = release_dev,
+ .ioctl = ioctl_dev,
+ .owner = THIS_MODULE,
+};
+
+void bcache_device_stop(struct bcache_device *d)
+{
+ if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
+ /*
+ * closure_fn set to
+ * - cached device: cached_dev_flush()
+ * - flash dev: flash_dev_flush()
+ */
+ closure_queue(&d->cl);
+}
+
+static void bcache_device_unlink(struct bcache_device *d)
+{
+ lockdep_assert_held(&bch_register_lock);
+
+ if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
+ struct cache *ca = d->c->cache;
+
+ sysfs_remove_link(&d->c->kobj, d->name);
+ sysfs_remove_link(&d->kobj, "cache");
+
+ bd_unlink_disk_holder(ca->bdev, d->disk);
+ }
+}
+
+static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
+ const char *name)
+{
+ struct cache *ca = c->cache;
+ int ret;
+
+ bd_link_disk_holder(ca->bdev, d->disk);
+
+ snprintf(d->name, BCACHEDEVNAME_SIZE,
+ "%s%u", name, d->id);
+
+ ret = sysfs_create_link(&d->kobj, &c->kobj, "cache");
+ if (ret < 0)
+ pr_err("Couldn't create device -> cache set symlink\n");
+
+ ret = sysfs_create_link(&c->kobj, &d->kobj, d->name);
+ if (ret < 0)
+ pr_err("Couldn't create cache set -> device symlink\n");
+
+ clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
+}
+
+static void bcache_device_detach(struct bcache_device *d)
+{
+ lockdep_assert_held(&bch_register_lock);
+
+ atomic_dec(&d->c->attached_dev_nr);
+
+ if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
+ struct uuid_entry *u = d->c->uuids + d->id;
+
+ SET_UUID_FLASH_ONLY(u, 0);
+ memcpy(u->uuid, invalid_uuid, 16);
+ u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
+ bch_uuid_write(d->c);
+ }
+
+ bcache_device_unlink(d);
+
+ d->c->devices[d->id] = NULL;
+ closure_put(&d->c->caching);
+ d->c = NULL;
+}
+
+static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
+ unsigned int id)
+{
+ d->id = id;
+ d->c = c;
+ c->devices[id] = d;
+
+ if (id >= c->devices_max_used)
+ c->devices_max_used = id + 1;
+
+ closure_get(&c->caching);
+}
+
+static inline int first_minor_to_idx(int first_minor)
+{
+ return (first_minor/BCACHE_MINORS);
+}
+
+static inline int idx_to_first_minor(int idx)
+{
+ return (idx * BCACHE_MINORS);
+}
+
+static void bcache_device_free(struct bcache_device *d)
+{
+ struct gendisk *disk = d->disk;
+
+ lockdep_assert_held(&bch_register_lock);
+
+ if (disk)
+ pr_info("%s stopped\n", disk->disk_name);
+ else
+ pr_err("bcache device (NULL gendisk) stopped\n");
+
+ if (d->c)
+ bcache_device_detach(d);
+
+ if (disk) {
+ ida_simple_remove(&bcache_device_idx,
+ first_minor_to_idx(disk->first_minor));
+ put_disk(disk);
+ }
+
+ bioset_exit(&d->bio_split);
+ kvfree(d->full_dirty_stripes);
+ kvfree(d->stripe_sectors_dirty);
+
+ closure_debug_destroy(&d->cl);
+}
+
+static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
+ sector_t sectors, struct block_device *cached_bdev,
+ const struct block_device_operations *ops)
+{
+ struct request_queue *q;
+ const size_t max_stripes = min_t(size_t, INT_MAX,
+ SIZE_MAX / sizeof(atomic_t));
+ uint64_t n;
+ int idx;
+
+ if (!d->stripe_size)
+ d->stripe_size = 1 << 31;
+ else if (d->stripe_size < BCH_MIN_STRIPE_SZ)
+ d->stripe_size = roundup(BCH_MIN_STRIPE_SZ, d->stripe_size);
+
+ n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
+ if (!n || n > max_stripes) {
+ pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n",
+ n);
+ return -ENOMEM;
+ }
+ d->nr_stripes = n;
+
+ n = d->nr_stripes * sizeof(atomic_t);
+ d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
+ if (!d->stripe_sectors_dirty)
+ return -ENOMEM;
+
+ n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
+ d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
+ if (!d->full_dirty_stripes)
+ goto out_free_stripe_sectors_dirty;
+
+ idx = ida_simple_get(&bcache_device_idx, 0,
+ BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
+ if (idx < 0)
+ goto out_free_full_dirty_stripes;
+
+ if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
+ BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
+ goto out_ida_remove;
+
+ d->disk = blk_alloc_disk(NUMA_NO_NODE);
+ if (!d->disk)
+ goto out_bioset_exit;
+
+ set_capacity(d->disk, sectors);
+ snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
+
+ d->disk->major = bcache_major;
+ d->disk->first_minor = idx_to_first_minor(idx);
+ d->disk->minors = BCACHE_MINORS;
+ d->disk->fops = ops;
+ d->disk->private_data = d;
+
+ q = d->disk->queue;
+ q->limits.max_hw_sectors = UINT_MAX;
+ q->limits.max_sectors = UINT_MAX;
+ q->limits.max_segment_size = UINT_MAX;
+ q->limits.max_segments = BIO_MAX_VECS;
+ blk_queue_max_discard_sectors(q, UINT_MAX);
+ q->limits.discard_granularity = 512;
+ q->limits.io_min = block_size;
+ q->limits.logical_block_size = block_size;
+ q->limits.physical_block_size = block_size;
+
+ if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) {
+ /*
+ * This should only happen with BCACHE_SB_VERSION_BDEV.
+ * Block/page size is checked for BCACHE_SB_VERSION_CDEV.
+ */
+ pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n",
+ d->disk->disk_name, q->limits.logical_block_size,
+ PAGE_SIZE, bdev_logical_block_size(cached_bdev));
+
+ /* This also adjusts physical block size/min io size if needed */
+ blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev));
+ }
+
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
+
+ blk_queue_write_cache(q, true, true);
+
+ return 0;
+
+out_bioset_exit:
+ bioset_exit(&d->bio_split);
+out_ida_remove:
+ ida_simple_remove(&bcache_device_idx, idx);
+out_free_full_dirty_stripes:
+ kvfree(d->full_dirty_stripes);
+out_free_stripe_sectors_dirty:
+ kvfree(d->stripe_sectors_dirty);
+ return -ENOMEM;
+
+}
+
+/* Cached device */
+
+static void calc_cached_dev_sectors(struct cache_set *c)
+{
+ uint64_t sectors = 0;
+ struct cached_dev *dc;
+
+ list_for_each_entry(dc, &c->cached_devs, list)
+ sectors += bdev_nr_sectors(dc->bdev);
+
+ c->cached_dev_sectors = sectors;
+}
+
+#define BACKING_DEV_OFFLINE_TIMEOUT 5
+static int cached_dev_status_update(void *arg)
+{
+ struct cached_dev *dc = arg;
+ struct request_queue *q;
+
+ /*
+ * If this delayed worker is stopping outside, directly quit here.
+ * dc->io_disable might be set via sysfs interface, so check it
+ * here too.
+ */
+ while (!kthread_should_stop() && !dc->io_disable) {
+ q = bdev_get_queue(dc->bdev);
+ if (blk_queue_dying(q))
+ dc->offline_seconds++;
+ else
+ dc->offline_seconds = 0;
+
+ if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) {
+ pr_err("%pg: device offline for %d seconds\n",
+ dc->bdev,
+ BACKING_DEV_OFFLINE_TIMEOUT);
+ pr_err("%s: disable I/O request due to backing device offline\n",
+ dc->disk.name);
+ dc->io_disable = true;
+ /* let others know earlier that io_disable is true */
+ smp_mb();
+ bcache_device_stop(&dc->disk);
+ break;
+ }
+ schedule_timeout_interruptible(HZ);
+ }
+
+ wait_for_kthread_stop();
+ return 0;
+}
+
+
+int bch_cached_dev_run(struct cached_dev *dc)
+{
+ int ret = 0;
+ struct bcache_device *d = &dc->disk;
+ char *buf = kmemdup_nul(dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL);
+ char *env[] = {
+ "DRIVER=bcache",
+ kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
+ kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf ? : ""),
+ NULL,
+ };
+
+ if (dc->io_disable) {
+ pr_err("I/O disabled on cached dev %pg\n", dc->bdev);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (atomic_xchg(&dc->running, 1)) {
+ pr_info("cached dev %pg is running already\n", dc->bdev);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (!d->c &&
+ BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
+ struct closure cl;
+
+ closure_init_stack(&cl);
+
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
+ bch_write_bdev_super(dc, &cl);
+ closure_sync(&cl);
+ }
+
+ ret = add_disk(d->disk);
+ if (ret)
+ goto out;
+ bd_link_disk_holder(dc->bdev, dc->disk.disk);
+ /*
+ * won't show up in the uevent file, use udevadm monitor -e instead
+ * only class / kset properties are persistent
+ */
+ kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
+
+ if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
+ sysfs_create_link(&disk_to_dev(d->disk)->kobj,
+ &d->kobj, "bcache")) {
+ pr_err("Couldn't create bcache dev <-> disk sysfs symlinks\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dc->status_update_thread = kthread_run(cached_dev_status_update,
+ dc, "bcache_status_update");
+ if (IS_ERR(dc->status_update_thread)) {
+ pr_warn("failed to create bcache_status_update kthread, continue to run without monitoring backing device status\n");
+ }
+
+out:
+ kfree(env[1]);
+ kfree(env[2]);
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed
+ * work dc->writeback_rate_update is running. Wait until the routine
+ * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to
+ * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out
+ * seconds, give up waiting here and continue to cancel it too.
+ */
+static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
+{
+ int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ;
+
+ do {
+ if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING,
+ &dc->disk.flags))
+ break;
+ time_out--;
+ schedule_timeout_interruptible(1);
+ } while (time_out > 0);
+
+ if (time_out == 0)
+ pr_warn("give up waiting for dc->writeback_write_update to quit\n");
+
+ cancel_delayed_work_sync(&dc->writeback_rate_update);
+}
+
+static void cached_dev_detach_finish(struct work_struct *w)
+{
+ struct cached_dev *dc = container_of(w, struct cached_dev, detach);
+ struct cache_set *c = dc->disk.c;
+
+ BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
+ BUG_ON(refcount_read(&dc->count));
+
+
+ if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
+ cancel_writeback_rate_update_dwork(dc);
+
+ if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
+ kthread_stop(dc->writeback_thread);
+ dc->writeback_thread = NULL;
+ }
+
+ mutex_lock(&bch_register_lock);
+
+ bcache_device_detach(&dc->disk);
+ list_move(&dc->list, &uncached_devices);
+ calc_cached_dev_sectors(c);
+
+ clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
+ clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
+
+ mutex_unlock(&bch_register_lock);
+
+ pr_info("Caching disabled for %pg\n", dc->bdev);
+
+ /* Drop ref we took in cached_dev_detach() */
+ closure_put(&dc->disk.cl);
+}
+
+void bch_cached_dev_detach(struct cached_dev *dc)
+{
+ lockdep_assert_held(&bch_register_lock);
+
+ if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
+ return;
+
+ if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
+ return;
+
+ /*
+ * Block the device from being closed and freed until we're finished
+ * detaching
+ */
+ closure_get(&dc->disk.cl);
+
+ bch_writeback_queue(dc);
+
+ cached_dev_put(dc);
+}
+
+int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+ uint8_t *set_uuid)
+{
+ uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds());
+ struct uuid_entry *u;
+ struct cached_dev *exist_dc, *t;
+ int ret = 0;
+
+ if ((set_uuid && memcmp(set_uuid, c->set_uuid, 16)) ||
+ (!set_uuid && memcmp(dc->sb.set_uuid, c->set_uuid, 16)))
+ return -ENOENT;
+
+ if (dc->disk.c) {
+ pr_err("Can't attach %pg: already attached\n", dc->bdev);
+ return -EINVAL;
+ }
+
+ if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
+ pr_err("Can't attach %pg: shutting down\n", dc->bdev);
+ return -EINVAL;
+ }
+
+ if (dc->sb.block_size < c->cache->sb.block_size) {
+ /* Will die */
+ pr_err("Couldn't attach %pg: block size less than set's block size\n",
+ dc->bdev);
+ return -EINVAL;
+ }
+
+ /* Check whether already attached */
+ list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
+ if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
+ pr_err("Tried to attach %pg but duplicate UUID already attached\n",
+ dc->bdev);
+
+ return -EINVAL;
+ }
+ }
+
+ u = uuid_find(c, dc->sb.uuid);
+
+ if (u &&
+ (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
+ BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
+ memcpy(u->uuid, invalid_uuid, 16);
+ u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
+ u = NULL;
+ }
+
+ if (!u) {
+ if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+ pr_err("Couldn't find uuid for %pg in set\n", dc->bdev);
+ return -ENOENT;
+ }
+
+ u = uuid_find_empty(c);
+ if (!u) {
+ pr_err("Not caching %pg, no room for UUID\n", dc->bdev);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Deadlocks since we're called via sysfs...
+ * sysfs_remove_file(&dc->kobj, &sysfs_attach);
+ */
+
+ if (bch_is_zero(u->uuid, 16)) {
+ struct closure cl;
+
+ closure_init_stack(&cl);
+
+ memcpy(u->uuid, dc->sb.uuid, 16);
+ memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
+ u->first_reg = u->last_reg = rtime;
+ bch_uuid_write(c);
+
+ memcpy(dc->sb.set_uuid, c->set_uuid, 16);
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
+
+ bch_write_bdev_super(dc, &cl);
+ closure_sync(&cl);
+ } else {
+ u->last_reg = rtime;
+ bch_uuid_write(c);
+ }
+
+ bcache_device_attach(&dc->disk, c, u - c->uuids);
+ list_move(&dc->list, &c->cached_devs);
+ calc_cached_dev_sectors(c);
+
+ /*
+ * dc->c must be set before dc->count != 0 - paired with the mb in
+ * cached_dev_get()
+ */
+ smp_wmb();
+ refcount_set(&dc->count, 1);
+
+ /* Block writeback thread, but spawn it */
+ down_write(&dc->writeback_lock);
+ if (bch_cached_dev_writeback_start(dc)) {
+ up_write(&dc->writeback_lock);
+ pr_err("Couldn't start writeback facilities for %s\n",
+ dc->disk.disk->disk_name);
+ return -ENOMEM;
+ }
+
+ if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+ atomic_set(&dc->has_dirty, 1);
+ bch_writeback_queue(dc);
+ }
+
+ bch_sectors_dirty_init(&dc->disk);
+
+ ret = bch_cached_dev_run(dc);
+ if (ret && (ret != -EBUSY)) {
+ up_write(&dc->writeback_lock);
+ /*
+ * bch_register_lock is held, bcache_device_stop() is not
+ * able to be directly called. The kthread and kworker
+ * created previously in bch_cached_dev_writeback_start()
+ * have to be stopped manually here.
+ */
+ kthread_stop(dc->writeback_thread);
+ cancel_writeback_rate_update_dwork(dc);
+ pr_err("Couldn't run cached device %pg\n", dc->bdev);
+ return ret;
+ }
+
+ bcache_device_link(&dc->disk, c, "bdev");
+ atomic_inc(&c->attached_dev_nr);
+
+ if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) {
+ pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+ pr_err("Please update to the latest bcache-tools to create the cache device\n");
+ set_disk_ro(dc->disk.disk, 1);
+ }
+
+ /* Allow the writeback thread to proceed */
+ up_write(&dc->writeback_lock);
+
+ pr_info("Caching %pg as %s on set %pU\n",
+ dc->bdev,
+ dc->disk.disk->disk_name,
+ dc->disk.c->set_uuid);
+ return 0;
+}
+
+/* when dc->disk.kobj released */
+void bch_cached_dev_release(struct kobject *kobj)
+{
+ struct cached_dev *dc = container_of(kobj, struct cached_dev,
+ disk.kobj);
+ kfree(dc);
+ module_put(THIS_MODULE);
+}
+
+static void cached_dev_free(struct closure *cl)
+{
+ struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
+
+ if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
+ cancel_writeback_rate_update_dwork(dc);
+
+ if (!IS_ERR_OR_NULL(dc->writeback_thread))
+ kthread_stop(dc->writeback_thread);
+ if (!IS_ERR_OR_NULL(dc->status_update_thread))
+ kthread_stop(dc->status_update_thread);
+
+ mutex_lock(&bch_register_lock);
+
+ if (atomic_read(&dc->running)) {
+ bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
+ del_gendisk(dc->disk.disk);
+ }
+ bcache_device_free(&dc->disk);
+ list_del(&dc->list);
+
+ mutex_unlock(&bch_register_lock);
+
+ if (dc->sb_disk)
+ put_page(virt_to_page(dc->sb_disk));
+
+ if (!IS_ERR_OR_NULL(dc->bdev))
+ blkdev_put(dc->bdev, dc);
+
+ wake_up(&unregister_wait);
+
+ kobject_put(&dc->disk.kobj);
+}
+
+static void cached_dev_flush(struct closure *cl)
+{
+ struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
+ struct bcache_device *d = &dc->disk;
+
+ mutex_lock(&bch_register_lock);
+ bcache_device_unlink(d);
+ mutex_unlock(&bch_register_lock);
+
+ bch_cache_accounting_destroy(&dc->accounting);
+ kobject_del(&d->kobj);
+
+ continue_at(cl, cached_dev_free, system_wq);
+}
+
+static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
+{
+ int ret;
+ struct io *io;
+ struct request_queue *q = bdev_get_queue(dc->bdev);
+
+ __module_get(THIS_MODULE);
+ INIT_LIST_HEAD(&dc->list);
+ closure_init(&dc->disk.cl, NULL);
+ set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
+ kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
+ INIT_WORK(&dc->detach, cached_dev_detach_finish);
+ sema_init(&dc->sb_write_mutex, 1);
+ INIT_LIST_HEAD(&dc->io_lru);
+ spin_lock_init(&dc->io_lock);
+ bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
+
+ dc->sequential_cutoff = 4 << 20;
+
+ for (io = dc->io; io < dc->io + RECENT_IO; io++) {
+ list_add(&io->lru, &dc->io_lru);
+ hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
+ }
+
+ dc->disk.stripe_size = q->limits.io_opt >> 9;
+
+ if (dc->disk.stripe_size)
+ dc->partial_stripes_expensive =
+ q->limits.raid_partial_stripes_expensive;
+
+ ret = bcache_device_init(&dc->disk, block_size,
+ bdev_nr_sectors(dc->bdev) - dc->sb.data_offset,
+ dc->bdev, &bcache_cached_ops);
+ if (ret)
+ return ret;
+
+ blk_queue_io_opt(dc->disk.disk->queue,
+ max(queue_io_opt(dc->disk.disk->queue), queue_io_opt(q)));
+
+ atomic_set(&dc->io_errors, 0);
+ dc->io_disable = false;
+ dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT;
+ /* default to auto */
+ dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO;
+
+ bch_cached_dev_request_init(dc);
+ bch_cached_dev_writeback_init(dc);
+ return 0;
+}
+
+/* Cached device - bcache superblock */
+
+static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
+ struct block_device *bdev,
+ struct cached_dev *dc)
+{
+ const char *err = "cannot allocate memory";
+ struct cache_set *c;
+ int ret = -ENOMEM;
+
+ memcpy(&dc->sb, sb, sizeof(struct cache_sb));
+ dc->bdev = bdev;
+ dc->sb_disk = sb_disk;
+
+ if (cached_dev_init(dc, sb->block_size << 9))
+ goto err;
+
+ err = "error creating kobject";
+ if (kobject_add(&dc->disk.kobj, bdev_kobj(bdev), "bcache"))
+ goto err;
+ if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
+ goto err;
+
+ pr_info("registered backing device %pg\n", dc->bdev);
+
+ list_add(&dc->list, &uncached_devices);
+ /* attach to a matched cache set if it exists */
+ list_for_each_entry(c, &bch_cache_sets, list)
+ bch_cached_dev_attach(dc, c, NULL);
+
+ if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
+ BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) {
+ err = "failed to run cached device";
+ ret = bch_cached_dev_run(dc);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ pr_notice("error %pg: %s\n", dc->bdev, err);
+ bcache_device_stop(&dc->disk);
+ return ret;
+}
+
+/* Flash only volumes */
+
+/* When d->kobj released */
+void bch_flash_dev_release(struct kobject *kobj)
+{
+ struct bcache_device *d = container_of(kobj, struct bcache_device,
+ kobj);
+ kfree(d);
+}
+
+static void flash_dev_free(struct closure *cl)
+{
+ struct bcache_device *d = container_of(cl, struct bcache_device, cl);
+
+ mutex_lock(&bch_register_lock);
+ atomic_long_sub(bcache_dev_sectors_dirty(d),
+ &d->c->flash_dev_dirty_sectors);
+ del_gendisk(d->disk);
+ bcache_device_free(d);
+ mutex_unlock(&bch_register_lock);
+ kobject_put(&d->kobj);
+}
+
+static void flash_dev_flush(struct closure *cl)
+{
+ struct bcache_device *d = container_of(cl, struct bcache_device, cl);
+
+ mutex_lock(&bch_register_lock);
+ bcache_device_unlink(d);
+ mutex_unlock(&bch_register_lock);
+ kobject_del(&d->kobj);
+ continue_at(cl, flash_dev_free, system_wq);
+}
+
+static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
+{
+ int err = -ENOMEM;
+ struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
+ GFP_KERNEL);
+ if (!d)
+ goto err_ret;
+
+ closure_init(&d->cl, NULL);
+ set_closure_fn(&d->cl, flash_dev_flush, system_wq);
+
+ kobject_init(&d->kobj, &bch_flash_dev_ktype);
+
+ if (bcache_device_init(d, block_bytes(c->cache), u->sectors,
+ NULL, &bcache_flash_ops))
+ goto err;
+
+ bcache_device_attach(d, c, u - c->uuids);
+ bch_sectors_dirty_init(d);
+ bch_flash_dev_request_init(d);
+ err = add_disk(d->disk);
+ if (err)
+ goto err;
+
+ err = kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache");
+ if (err)
+ goto err;
+
+ bcache_device_link(d, c, "volume");
+
+ if (bch_has_feature_obso_large_bucket(&c->cache->sb)) {
+ pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+ pr_err("Please update to the latest bcache-tools to create the cache device\n");
+ set_disk_ro(d->disk, 1);
+ }
+
+ return 0;
+err:
+ kobject_put(&d->kobj);
+err_ret:
+ return err;
+}
+
+static int flash_devs_run(struct cache_set *c)
+{
+ int ret = 0;
+ struct uuid_entry *u;
+
+ for (u = c->uuids;
+ u < c->uuids + c->nr_uuids && !ret;
+ u++)
+ if (UUID_FLASH_ONLY(u))
+ ret = flash_dev_run(c, u);
+
+ return ret;
+}
+
+int bch_flash_dev_create(struct cache_set *c, uint64_t size)
+{
+ struct uuid_entry *u;
+
+ if (test_bit(CACHE_SET_STOPPING, &c->flags))
+ return -EINTR;
+
+ if (!test_bit(CACHE_SET_RUNNING, &c->flags))
+ return -EPERM;
+
+ u = uuid_find_empty(c);
+ if (!u) {
+ pr_err("Can't create volume, no room for UUID\n");
+ return -EINVAL;
+ }
+
+ get_random_bytes(u->uuid, 16);
+ memset(u->label, 0, 32);
+ u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds());
+
+ SET_UUID_FLASH_ONLY(u, 1);
+ u->sectors = size >> 9;
+
+ bch_uuid_write(c);
+
+ return flash_dev_run(c, u);
+}
+
+bool bch_cached_dev_error(struct cached_dev *dc)
+{
+ if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
+ return false;
+
+ dc->io_disable = true;
+ /* make others know io_disable is true earlier */
+ smp_mb();
+
+ pr_err("stop %s: too many IO errors on backing device %pg\n",
+ dc->disk.disk->disk_name, dc->bdev);
+
+ bcache_device_stop(&dc->disk);
+ return true;
+}
+
+/* Cache set */
+
+__printf(2, 3)
+bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (c->on_error != ON_ERROR_PANIC &&
+ test_bit(CACHE_SET_STOPPING, &c->flags))
+ return false;
+
+ if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
+ pr_info("CACHE_SET_IO_DISABLE already set\n");
+
+ /*
+ * XXX: we can be called from atomic context
+ * acquire_console_sem();
+ */
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ pr_err("error on %pU: %pV, disabling caching\n",
+ c->set_uuid, &vaf);
+
+ va_end(args);
+
+ if (c->on_error == ON_ERROR_PANIC)
+ panic("panic forced after error\n");
+
+ bch_cache_set_unregister(c);
+ return true;
+}
+
+/* When c->kobj released */
+void bch_cache_set_release(struct kobject *kobj)
+{
+ struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+
+ kfree(c);
+ module_put(THIS_MODULE);
+}
+
+static void cache_set_free(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, cl);
+ struct cache *ca;
+
+ debugfs_remove(c->debug);
+
+ bch_open_buckets_free(c);
+ bch_btree_cache_free(c);
+ bch_journal_free(c);
+
+ mutex_lock(&bch_register_lock);
+ bch_bset_sort_state_free(&c->sort);
+ free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
+
+ ca = c->cache;
+ if (ca) {
+ ca->set = NULL;
+ c->cache = NULL;
+ kobject_put(&ca->kobj);
+ }
+
+
+ if (c->moving_gc_wq)
+ destroy_workqueue(c->moving_gc_wq);
+ bioset_exit(&c->bio_split);
+ mempool_exit(&c->fill_iter);
+ mempool_exit(&c->bio_meta);
+ mempool_exit(&c->search);
+ kfree(c->devices);
+
+ list_del(&c->list);
+ mutex_unlock(&bch_register_lock);
+
+ pr_info("Cache set %pU unregistered\n", c->set_uuid);
+ wake_up(&unregister_wait);
+
+ closure_debug_destroy(&c->cl);
+ kobject_put(&c->kobj);
+}
+
+static void cache_set_flush(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, caching);
+ struct cache *ca = c->cache;
+ struct btree *b;
+
+ bch_cache_accounting_destroy(&c->accounting);
+
+ kobject_put(&c->internal);
+ kobject_del(&c->kobj);
+
+ if (!IS_ERR_OR_NULL(c->gc_thread))
+ kthread_stop(c->gc_thread);
+
+ if (!IS_ERR(c->root))
+ list_add(&c->root->list, &c->btree_cache);
+
+ /*
+ * Avoid flushing cached nodes if cache set is retiring
+ * due to too many I/O errors detected.
+ */
+ if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags))
+ list_for_each_entry(b, &c->btree_cache, list) {
+ mutex_lock(&b->write_lock);
+ if (btree_node_dirty(b))
+ __bch_btree_node_write(b, NULL);
+ mutex_unlock(&b->write_lock);
+ }
+
+ if (ca->alloc_thread)
+ kthread_stop(ca->alloc_thread);
+
+ if (c->journal.cur) {
+ cancel_delayed_work_sync(&c->journal.work);
+ /* flush last journal entry if needed */
+ c->journal.work.work.func(&c->journal.work.work);
+ }
+
+ closure_return(cl);
+}
+
+/*
+ * This function is only called when CACHE_SET_IO_DISABLE is set, which means
+ * cache set is unregistering due to too many I/O errors. In this condition,
+ * the bcache device might be stopped, it depends on stop_when_cache_set_failed
+ * value and whether the broken cache has dirty data:
+ *
+ * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device
+ * BCH_CACHED_STOP_AUTO 0 NO
+ * BCH_CACHED_STOP_AUTO 1 YES
+ * BCH_CACHED_DEV_STOP_ALWAYS 0 YES
+ * BCH_CACHED_DEV_STOP_ALWAYS 1 YES
+ *
+ * The expected behavior is, if stop_when_cache_set_failed is configured to
+ * "auto" via sysfs interface, the bcache device will not be stopped if the
+ * backing device is clean on the broken cache device.
+ */
+static void conditional_stop_bcache_device(struct cache_set *c,
+ struct bcache_device *d,
+ struct cached_dev *dc)
+{
+ if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
+ pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n",
+ d->disk->disk_name, c->set_uuid);
+ bcache_device_stop(d);
+ } else if (atomic_read(&dc->has_dirty)) {
+ /*
+ * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
+ * and dc->has_dirty == 1
+ */
+ pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.\n",
+ d->disk->disk_name);
+ /*
+ * There might be a small time gap that cache set is
+ * released but bcache device is not. Inside this time
+ * gap, regular I/O requests will directly go into
+ * backing device as no cache set attached to. This
+ * behavior may also introduce potential inconsistence
+ * data in writeback mode while cache is dirty.
+ * Therefore before calling bcache_device_stop() due
+ * to a broken cache device, dc->io_disable should be
+ * explicitly set to true.
+ */
+ dc->io_disable = true;
+ /* make others know io_disable is true earlier */
+ smp_mb();
+ bcache_device_stop(d);
+ } else {
+ /*
+ * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
+ * and dc->has_dirty == 0
+ */
+ pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.\n",
+ d->disk->disk_name);
+ }
+}
+
+static void __cache_set_unregister(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, caching);
+ struct cached_dev *dc;
+ struct bcache_device *d;
+ size_t i;
+
+ mutex_lock(&bch_register_lock);
+
+ for (i = 0; i < c->devices_max_used; i++) {
+ d = c->devices[i];
+ if (!d)
+ continue;
+
+ if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
+ test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
+ dc = container_of(d, struct cached_dev, disk);
+ bch_cached_dev_detach(dc);
+ if (test_bit(CACHE_SET_IO_DISABLE, &c->flags))
+ conditional_stop_bcache_device(c, d, dc);
+ } else {
+ bcache_device_stop(d);
+ }
+ }
+
+ mutex_unlock(&bch_register_lock);
+
+ continue_at(cl, cache_set_flush, system_wq);
+}
+
+void bch_cache_set_stop(struct cache_set *c)
+{
+ if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
+ /* closure_fn set to __cache_set_unregister() */
+ closure_queue(&c->caching);
+}
+
+void bch_cache_set_unregister(struct cache_set *c)
+{
+ set_bit(CACHE_SET_UNREGISTERING, &c->flags);
+ bch_cache_set_stop(c);
+}
+
+#define alloc_meta_bucket_pages(gfp, sb) \
+ ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb))))
+
+struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+{
+ int iter_size;
+ struct cache *ca = container_of(sb, struct cache, sb);
+ struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
+
+ if (!c)
+ return NULL;
+
+ __module_get(THIS_MODULE);
+ closure_init(&c->cl, NULL);
+ set_closure_fn(&c->cl, cache_set_free, system_wq);
+
+ closure_init(&c->caching, &c->cl);
+ set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
+
+ /* Maybe create continue_at_noreturn() and use it here? */
+ closure_set_stopped(&c->cl);
+ closure_put(&c->cl);
+
+ kobject_init(&c->kobj, &bch_cache_set_ktype);
+ kobject_init(&c->internal, &bch_cache_set_internal_ktype);
+
+ bch_cache_accounting_init(&c->accounting, &c->cl);
+
+ memcpy(c->set_uuid, sb->set_uuid, 16);
+
+ c->cache = ca;
+ c->cache->set = c;
+ c->bucket_bits = ilog2(sb->bucket_size);
+ c->block_bits = ilog2(sb->block_size);
+ c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry);
+ c->devices_max_used = 0;
+ atomic_set(&c->attached_dev_nr, 0);
+ c->btree_pages = meta_bucket_pages(sb);
+ if (c->btree_pages > BTREE_MAX_PAGES)
+ c->btree_pages = max_t(int, c->btree_pages / 4,
+ BTREE_MAX_PAGES);
+
+ sema_init(&c->sb_write_mutex, 1);
+ mutex_init(&c->bucket_lock);
+ init_waitqueue_head(&c->btree_cache_wait);
+ spin_lock_init(&c->btree_cannibalize_lock);
+ init_waitqueue_head(&c->bucket_wait);
+ init_waitqueue_head(&c->gc_wait);
+ sema_init(&c->uuid_write_mutex, 1);
+
+ spin_lock_init(&c->btree_gc_time.lock);
+ spin_lock_init(&c->btree_split_time.lock);
+ spin_lock_init(&c->btree_read_time.lock);
+
+ bch_moving_init_cache_set(c);
+
+ INIT_LIST_HEAD(&c->list);
+ INIT_LIST_HEAD(&c->cached_devs);
+ INIT_LIST_HEAD(&c->btree_cache);
+ INIT_LIST_HEAD(&c->btree_cache_freeable);
+ INIT_LIST_HEAD(&c->btree_cache_freed);
+ INIT_LIST_HEAD(&c->data_buckets);
+
+ iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) *
+ sizeof(struct btree_iter_set);
+
+ c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
+ if (!c->devices)
+ goto err;
+
+ if (mempool_init_slab_pool(&c->search, 32, bch_search_cache))
+ goto err;
+
+ if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
+ sizeof(struct bbio) +
+ sizeof(struct bio_vec) * meta_bucket_pages(sb)))
+ goto err;
+
+ if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
+ goto err;
+
+ if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
+ BIOSET_NEED_RESCUER))
+ goto err;
+
+ c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
+ if (!c->uuids)
+ goto err;
+
+ c->moving_gc_wq = alloc_workqueue("bcache_gc", WQ_MEM_RECLAIM, 0);
+ if (!c->moving_gc_wq)
+ goto err;
+
+ if (bch_journal_alloc(c))
+ goto err;
+
+ if (bch_btree_cache_alloc(c))
+ goto err;
+
+ if (bch_open_buckets_alloc(c))
+ goto err;
+
+ if (bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
+ goto err;
+
+ c->congested_read_threshold_us = 2000;
+ c->congested_write_threshold_us = 20000;
+ c->error_limit = DEFAULT_IO_ERROR_LIMIT;
+ c->idle_max_writeback_rate_enabled = 1;
+ WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags));
+
+ return c;
+err:
+ bch_cache_set_unregister(c);
+ return NULL;
+}
+
+static int run_cache_set(struct cache_set *c)
+{
+ const char *err = "cannot allocate memory";
+ struct cached_dev *dc, *t;
+ struct cache *ca = c->cache;
+ struct closure cl;
+ LIST_HEAD(journal);
+ struct journal_replay *l;
+
+ closure_init_stack(&cl);
+
+ c->nbuckets = ca->sb.nbuckets;
+ set_gc_sectors(c);
+
+ if (CACHE_SYNC(&c->cache->sb)) {
+ struct bkey *k;
+ struct jset *j;
+
+ err = "cannot allocate memory for journal";
+ if (bch_journal_read(c, &journal))
+ goto err;
+
+ pr_debug("btree_journal_read() done\n");
+
+ err = "no journal entries found";
+ if (list_empty(&journal))
+ goto err;
+
+ j = &list_entry(journal.prev, struct journal_replay, list)->j;
+
+ err = "IO error reading priorities";
+ if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
+ goto err;
+
+ /*
+ * If prio_read() fails it'll call cache_set_error and we'll
+ * tear everything down right away, but if we perhaps checked
+ * sooner we could avoid journal replay.
+ */
+
+ k = &j->btree_root;
+
+ err = "bad btree root";
+ if (__bch_btree_ptr_invalid(c, k))
+ goto err;
+
+ err = "error reading btree root";
+ c->root = bch_btree_node_get(c, NULL, k,
+ j->btree_level,
+ true, NULL);
+ if (IS_ERR(c->root))
+ goto err;
+
+ list_del_init(&c->root->list);
+ rw_unlock(true, c->root);
+
+ err = uuid_read(c, j, &cl);
+ if (err)
+ goto err;
+
+ err = "error in recovery";
+ if (bch_btree_check(c))
+ goto err;
+
+ bch_journal_mark(c, &journal);
+ bch_initial_gc_finish(c);
+ pr_debug("btree_check() done\n");
+
+ /*
+ * bcache_journal_next() can't happen sooner, or
+ * btree_gc_finish() will give spurious errors about last_gc >
+ * gc_gen - this is a hack but oh well.
+ */
+ bch_journal_next(&c->journal);
+
+ err = "error starting allocator thread";
+ if (bch_cache_allocator_start(ca))
+ goto err;
+
+ /*
+ * First place it's safe to allocate: btree_check() and
+ * btree_gc_finish() have to run before we have buckets to
+ * allocate, and bch_bucket_alloc_set() might cause a journal
+ * entry to be written so bcache_journal_next() has to be called
+ * first.
+ *
+ * If the uuids were in the old format we have to rewrite them
+ * before the next journal entry is written:
+ */
+ if (j->version < BCACHE_JSET_VERSION_UUID)
+ __uuid_write(c);
+
+ err = "bcache: replay journal failed";
+ if (bch_journal_replay(c, &journal))
+ goto err;
+ } else {
+ unsigned int j;
+
+ pr_notice("invalidating existing data\n");
+ ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
+ 2, SB_JOURNAL_BUCKETS);
+
+ for (j = 0; j < ca->sb.keys; j++)
+ ca->sb.d[j] = ca->sb.first_bucket + j;
+
+ bch_initial_gc_finish(c);
+
+ err = "error starting allocator thread";
+ if (bch_cache_allocator_start(ca))
+ goto err;
+
+ mutex_lock(&c->bucket_lock);
+ bch_prio_write(ca, true);
+ mutex_unlock(&c->bucket_lock);
+
+ err = "cannot allocate new UUID bucket";
+ if (__uuid_write(c))
+ goto err;
+
+ err = "cannot allocate new btree root";
+ c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
+ if (IS_ERR(c->root))
+ goto err;
+
+ mutex_lock(&c->root->write_lock);
+ bkey_copy_key(&c->root->key, &MAX_KEY);
+ bch_btree_node_write(c->root, &cl);
+ mutex_unlock(&c->root->write_lock);
+
+ bch_btree_set_root(c->root);
+ rw_unlock(true, c->root);
+
+ /*
+ * We don't want to write the first journal entry until
+ * everything is set up - fortunately journal entries won't be
+ * written until the SET_CACHE_SYNC() here:
+ */
+ SET_CACHE_SYNC(&c->cache->sb, true);
+
+ bch_journal_next(&c->journal);
+ bch_journal_meta(c, &cl);
+ }
+
+ err = "error starting gc thread";
+ if (bch_gc_thread_start(c))
+ goto err;
+
+ closure_sync(&cl);
+ c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
+ bcache_write_super(c);
+
+ if (bch_has_feature_obso_large_bucket(&c->cache->sb))
+ pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n");
+
+ list_for_each_entry_safe(dc, t, &uncached_devices, list)
+ bch_cached_dev_attach(dc, c, NULL);
+
+ flash_devs_run(c);
+
+ bch_journal_space_reserve(&c->journal);
+ set_bit(CACHE_SET_RUNNING, &c->flags);
+ return 0;
+err:
+ while (!list_empty(&journal)) {
+ l = list_first_entry(&journal, struct journal_replay, list);
+ list_del(&l->list);
+ kfree(l);
+ }
+
+ closure_sync(&cl);
+
+ bch_cache_set_error(c, "%s", err);
+
+ return -EIO;
+}
+
+static const char *register_cache_set(struct cache *ca)
+{
+ char buf[12];
+ const char *err = "cannot allocate memory";
+ struct cache_set *c;
+
+ list_for_each_entry(c, &bch_cache_sets, list)
+ if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) {
+ if (c->cache)
+ return "duplicate cache set member";
+
+ goto found;
+ }
+
+ c = bch_cache_set_alloc(&ca->sb);
+ if (!c)
+ return err;
+
+ err = "error creating kobject";
+ if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->set_uuid) ||
+ kobject_add(&c->internal, &c->kobj, "internal"))
+ goto err;
+
+ if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
+ goto err;
+
+ bch_debug_init_cache_set(c);
+
+ list_add(&c->list, &bch_cache_sets);
+found:
+ sprintf(buf, "cache%i", ca->sb.nr_this_dev);
+ if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
+ sysfs_create_link(&c->kobj, &ca->kobj, buf))
+ goto err;
+
+ kobject_get(&ca->kobj);
+ ca->set = c;
+ ca->set->cache = ca;
+
+ err = "failed to run cache set";
+ if (run_cache_set(c) < 0)
+ goto err;
+
+ return NULL;
+err:
+ bch_cache_set_unregister(c);
+ return err;
+}
+
+/* Cache device */
+
+/* When ca->kobj released */
+void bch_cache_release(struct kobject *kobj)
+{
+ struct cache *ca = container_of(kobj, struct cache, kobj);
+ unsigned int i;
+
+ if (ca->set) {
+ BUG_ON(ca->set->cache != ca);
+ ca->set->cache = NULL;
+ }
+
+ free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb)));
+ kfree(ca->prio_buckets);
+ vfree(ca->buckets);
+
+ free_heap(&ca->heap);
+ free_fifo(&ca->free_inc);
+
+ for (i = 0; i < RESERVE_NR; i++)
+ free_fifo(&ca->free[i]);
+
+ if (ca->sb_disk)
+ put_page(virt_to_page(ca->sb_disk));
+
+ if (!IS_ERR_OR_NULL(ca->bdev))
+ blkdev_put(ca->bdev, ca);
+
+ kfree(ca);
+ module_put(THIS_MODULE);
+}
+
+static int cache_alloc(struct cache *ca)
+{
+ size_t free;
+ size_t btree_buckets;
+ struct bucket *b;
+ int ret = -ENOMEM;
+ const char *err = NULL;
+
+ __module_get(THIS_MODULE);
+ kobject_init(&ca->kobj, &bch_cache_ktype);
+
+ bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0);
+
+ /*
+ * when ca->sb.njournal_buckets is not zero, journal exists,
+ * and in bch_journal_replay(), tree node may split,
+ * so bucket of RESERVE_BTREE type is needed,
+ * the worst situation is all journal buckets are valid journal,
+ * and all the keys need to replay,
+ * so the number of RESERVE_BTREE type buckets should be as much
+ * as journal buckets
+ */
+ btree_buckets = ca->sb.njournal_buckets ?: 8;
+ free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
+ if (!free) {
+ ret = -EPERM;
+ err = "ca->sb.nbuckets is too small";
+ goto err_free;
+ }
+
+ if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets,
+ GFP_KERNEL)) {
+ err = "ca->free[RESERVE_BTREE] alloc failed";
+ goto err_btree_alloc;
+ }
+
+ if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca),
+ GFP_KERNEL)) {
+ err = "ca->free[RESERVE_PRIO] alloc failed";
+ goto err_prio_alloc;
+ }
+
+ if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) {
+ err = "ca->free[RESERVE_MOVINGGC] alloc failed";
+ goto err_movinggc_alloc;
+ }
+
+ if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) {
+ err = "ca->free[RESERVE_NONE] alloc failed";
+ goto err_none_alloc;
+ }
+
+ if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) {
+ err = "ca->free_inc alloc failed";
+ goto err_free_inc_alloc;
+ }
+
+ if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) {
+ err = "ca->heap alloc failed";
+ goto err_heap_alloc;
+ }
+
+ ca->buckets = vzalloc(array_size(sizeof(struct bucket),
+ ca->sb.nbuckets));
+ if (!ca->buckets) {
+ err = "ca->buckets alloc failed";
+ goto err_buckets_alloc;
+ }
+
+ ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t),
+ prio_buckets(ca), 2),
+ GFP_KERNEL);
+ if (!ca->prio_buckets) {
+ err = "ca->prio_buckets alloc failed";
+ goto err_prio_buckets_alloc;
+ }
+
+ ca->disk_buckets = alloc_meta_bucket_pages(GFP_KERNEL, &ca->sb);
+ if (!ca->disk_buckets) {
+ err = "ca->disk_buckets alloc failed";
+ goto err_disk_buckets_alloc;
+ }
+
+ ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
+
+ for_each_bucket(b, ca)
+ atomic_set(&b->pin, 0);
+ return 0;
+
+err_disk_buckets_alloc:
+ kfree(ca->prio_buckets);
+err_prio_buckets_alloc:
+ vfree(ca->buckets);
+err_buckets_alloc:
+ free_heap(&ca->heap);
+err_heap_alloc:
+ free_fifo(&ca->free_inc);
+err_free_inc_alloc:
+ free_fifo(&ca->free[RESERVE_NONE]);
+err_none_alloc:
+ free_fifo(&ca->free[RESERVE_MOVINGGC]);
+err_movinggc_alloc:
+ free_fifo(&ca->free[RESERVE_PRIO]);
+err_prio_alloc:
+ free_fifo(&ca->free[RESERVE_BTREE]);
+err_btree_alloc:
+err_free:
+ module_put(THIS_MODULE);
+ if (err)
+ pr_notice("error %pg: %s\n", ca->bdev, err);
+ return ret;
+}
+
+static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
+ struct block_device *bdev, struct cache *ca)
+{
+ const char *err = NULL; /* must be set for any error case */
+ int ret = 0;
+
+ memcpy(&ca->sb, sb, sizeof(struct cache_sb));
+ ca->bdev = bdev;
+ ca->sb_disk = sb_disk;
+
+ if (bdev_max_discard_sectors((bdev)))
+ ca->discard = CACHE_DISCARD(&ca->sb);
+
+ ret = cache_alloc(ca);
+ if (ret != 0) {
+ /*
+ * If we failed here, it means ca->kobj is not initialized yet,
+ * kobject_put() won't be called and there is no chance to
+ * call blkdev_put() to bdev in bch_cache_release(). So we
+ * explicitly call blkdev_put() here.
+ */
+ blkdev_put(bdev, ca);
+ if (ret == -ENOMEM)
+ err = "cache_alloc(): -ENOMEM";
+ else if (ret == -EPERM)
+ err = "cache_alloc(): cache device is too small";
+ else
+ err = "cache_alloc(): unknown error";
+ goto err;
+ }
+
+ if (kobject_add(&ca->kobj, bdev_kobj(bdev), "bcache")) {
+ err = "error calling kobject_add";
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mutex_lock(&bch_register_lock);
+ err = register_cache_set(ca);
+ mutex_unlock(&bch_register_lock);
+
+ if (err) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ pr_info("registered cache device %pg\n", ca->bdev);
+
+out:
+ kobject_put(&ca->kobj);
+
+err:
+ if (err)
+ pr_notice("error %pg: %s\n", ca->bdev, err);
+
+ return ret;
+}
+
+/* Global interfaces/init */
+
+static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ const char *buffer, size_t size);
+static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
+ struct kobj_attribute *attr,
+ const char *buffer, size_t size);
+
+kobj_attribute_write(register, register_bcache);
+kobj_attribute_write(register_quiet, register_bcache);
+kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup);
+
+static bool bch_is_open_backing(dev_t dev)
+{
+ struct cache_set *c, *tc;
+ struct cached_dev *dc, *t;
+
+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
+ list_for_each_entry_safe(dc, t, &c->cached_devs, list)
+ if (dc->bdev->bd_dev == dev)
+ return true;
+ list_for_each_entry_safe(dc, t, &uncached_devices, list)
+ if (dc->bdev->bd_dev == dev)
+ return true;
+ return false;
+}
+
+static bool bch_is_open_cache(dev_t dev)
+{
+ struct cache_set *c, *tc;
+
+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
+ struct cache *ca = c->cache;
+
+ if (ca->bdev->bd_dev == dev)
+ return true;
+ }
+
+ return false;
+}
+
+static bool bch_is_open(dev_t dev)
+{
+ return bch_is_open_cache(dev) || bch_is_open_backing(dev);
+}
+
+struct async_reg_args {
+ struct delayed_work reg_work;
+ char *path;
+ struct cache_sb *sb;
+ struct cache_sb_disk *sb_disk;
+ struct block_device *bdev;
+ void *holder;
+};
+
+static void register_bdev_worker(struct work_struct *work)
+{
+ int fail = false;
+ struct async_reg_args *args =
+ container_of(work, struct async_reg_args, reg_work.work);
+
+ mutex_lock(&bch_register_lock);
+ if (register_bdev(args->sb, args->sb_disk, args->bdev, args->holder)
+ < 0)
+ fail = true;
+ mutex_unlock(&bch_register_lock);
+
+ if (fail)
+ pr_info("error %s: fail to register backing device\n",
+ args->path);
+ kfree(args->sb);
+ kfree(args->path);
+ kfree(args);
+ module_put(THIS_MODULE);
+}
+
+static void register_cache_worker(struct work_struct *work)
+{
+ int fail = false;
+ struct async_reg_args *args =
+ container_of(work, struct async_reg_args, reg_work.work);
+
+ /* blkdev_put() will be called in bch_cache_release() */
+ if (register_cache(args->sb, args->sb_disk, args->bdev, args->holder))
+ fail = true;
+
+ if (fail)
+ pr_info("error %s: fail to register cache device\n",
+ args->path);
+ kfree(args->sb);
+ kfree(args->path);
+ kfree(args);
+ module_put(THIS_MODULE);
+}
+
+static void register_device_async(struct async_reg_args *args)
+{
+ if (SB_IS_BDEV(args->sb))
+ INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker);
+ else
+ INIT_DELAYED_WORK(&args->reg_work, register_cache_worker);
+
+ /* 10 jiffies is enough for a delay */
+ queue_delayed_work(system_wq, &args->reg_work, 10);
+}
+
+static void *alloc_holder_object(struct cache_sb *sb)
+{
+ if (SB_IS_BDEV(sb))
+ return kzalloc(sizeof(struct cached_dev), GFP_KERNEL);
+ return kzalloc(sizeof(struct cache), GFP_KERNEL);
+}
+
+static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ const char *buffer, size_t size)
+{
+ const char *err;
+ char *path = NULL;
+ struct cache_sb *sb;
+ struct cache_sb_disk *sb_disk;
+ struct block_device *bdev, *bdev2;
+ void *holder = NULL;
+ ssize_t ret;
+ bool async_registration = false;
+ bool quiet = false;
+
+#ifdef CONFIG_BCACHE_ASYNC_REGISTRATION
+ async_registration = true;
+#endif
+
+ ret = -EBUSY;
+ err = "failed to reference bcache module";
+ if (!try_module_get(THIS_MODULE))
+ goto out;
+
+ /* For latest state of bcache_is_reboot */
+ smp_mb();
+ err = "bcache is in reboot";
+ if (bcache_is_reboot)
+ goto out_module_put;
+
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
+ path = kstrndup(buffer, size, GFP_KERNEL);
+ if (!path)
+ goto out_module_put;
+
+ sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
+ if (!sb)
+ goto out_free_path;
+
+ ret = -EINVAL;
+ err = "failed to open device";
+ bdev = blkdev_get_by_path(strim(path), BLK_OPEN_READ, NULL, NULL);
+ if (IS_ERR(bdev))
+ goto out_free_sb;
+
+ err = "failed to set blocksize";
+ if (set_blocksize(bdev, 4096))
+ goto out_blkdev_put;
+
+ err = read_super(sb, bdev, &sb_disk);
+ if (err)
+ goto out_blkdev_put;
+
+ holder = alloc_holder_object(sb);
+ if (!holder) {
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
+ goto out_put_sb_page;
+ }
+
+ /* Now reopen in exclusive mode with proper holder */
+ bdev2 = blkdev_get_by_dev(bdev->bd_dev, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ holder, NULL);
+ blkdev_put(bdev, NULL);
+ bdev = bdev2;
+ if (IS_ERR(bdev)) {
+ ret = PTR_ERR(bdev);
+ bdev = NULL;
+ if (ret == -EBUSY) {
+ dev_t dev;
+
+ mutex_lock(&bch_register_lock);
+ if (lookup_bdev(strim(path), &dev) == 0 &&
+ bch_is_open(dev))
+ err = "device already registered";
+ else
+ err = "device busy";
+ mutex_unlock(&bch_register_lock);
+ if (attr == &ksysfs_register_quiet) {
+ quiet = true;
+ ret = size;
+ }
+ }
+ goto out_free_holder;
+ }
+
+ err = "failed to register device";
+
+ if (async_registration) {
+ /* register in asynchronous way */
+ struct async_reg_args *args =
+ kzalloc(sizeof(struct async_reg_args), GFP_KERNEL);
+
+ if (!args) {
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
+ goto out_free_holder;
+ }
+
+ args->path = path;
+ args->sb = sb;
+ args->sb_disk = sb_disk;
+ args->bdev = bdev;
+ args->holder = holder;
+ register_device_async(args);
+ /* No wait and returns to user space */
+ goto async_done;
+ }
+
+ if (SB_IS_BDEV(sb)) {
+ mutex_lock(&bch_register_lock);
+ ret = register_bdev(sb, sb_disk, bdev, holder);
+ mutex_unlock(&bch_register_lock);
+ /* blkdev_put() will be called in cached_dev_free() */
+ if (ret < 0)
+ goto out_free_sb;
+ } else {
+ /* blkdev_put() will be called in bch_cache_release() */
+ ret = register_cache(sb, sb_disk, bdev, holder);
+ if (ret)
+ goto out_free_sb;
+ }
+
+ kfree(sb);
+ kfree(path);
+ module_put(THIS_MODULE);
+async_done:
+ return size;
+
+out_free_holder:
+ kfree(holder);
+out_put_sb_page:
+ put_page(virt_to_page(sb_disk));
+out_blkdev_put:
+ if (bdev)
+ blkdev_put(bdev, holder);
+out_free_sb:
+ kfree(sb);
+out_free_path:
+ kfree(path);
+ path = NULL;
+out_module_put:
+ module_put(THIS_MODULE);
+out:
+ if (!quiet)
+ pr_info("error %s: %s\n", path?path:"", err);
+ return ret;
+}
+
+
+struct pdev {
+ struct list_head list;
+ struct cached_dev *dc;
+};
+
+static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
+ struct kobj_attribute *attr,
+ const char *buffer,
+ size_t size)
+{
+ LIST_HEAD(pending_devs);
+ ssize_t ret = size;
+ struct cached_dev *dc, *tdc;
+ struct pdev *pdev, *tpdev;
+ struct cache_set *c, *tc;
+
+ mutex_lock(&bch_register_lock);
+ list_for_each_entry_safe(dc, tdc, &uncached_devices, list) {
+ pdev = kmalloc(sizeof(struct pdev), GFP_KERNEL);
+ if (!pdev)
+ break;
+ pdev->dc = dc;
+ list_add(&pdev->list, &pending_devs);
+ }
+
+ list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
+ char *pdev_set_uuid = pdev->dc->sb.set_uuid;
+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
+ char *set_uuid = c->set_uuid;
+
+ if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
+ list_del(&pdev->list);
+ kfree(pdev);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&bch_register_lock);
+
+ list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
+ pr_info("delete pdev %p\n", pdev);
+ list_del(&pdev->list);
+ bcache_device_stop(&pdev->dc->disk);
+ kfree(pdev);
+ }
+
+ return ret;
+}
+
+static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
+{
+ if (bcache_is_reboot)
+ return NOTIFY_DONE;
+
+ if (code == SYS_DOWN ||
+ code == SYS_HALT ||
+ code == SYS_POWER_OFF) {
+ DEFINE_WAIT(wait);
+ unsigned long start = jiffies;
+ bool stopped = false;
+
+ struct cache_set *c, *tc;
+ struct cached_dev *dc, *tdc;
+
+ mutex_lock(&bch_register_lock);
+
+ if (bcache_is_reboot)
+ goto out;
+
+ /* New registration is rejected since now */
+ bcache_is_reboot = true;
+ /*
+ * Make registering caller (if there is) on other CPU
+ * core know bcache_is_reboot set to true earlier
+ */
+ smp_mb();
+
+ if (list_empty(&bch_cache_sets) &&
+ list_empty(&uncached_devices))
+ goto out;
+
+ mutex_unlock(&bch_register_lock);
+
+ pr_info("Stopping all devices:\n");
+
+ /*
+ * The reason bch_register_lock is not held to call
+ * bch_cache_set_stop() and bcache_device_stop() is to
+ * avoid potential deadlock during reboot, because cache
+ * set or bcache device stopping process will acquire
+ * bch_register_lock too.
+ *
+ * We are safe here because bcache_is_reboot sets to
+ * true already, register_bcache() will reject new
+ * registration now. bcache_is_reboot also makes sure
+ * bcache_reboot() won't be re-entered on by other thread,
+ * so there is no race in following list iteration by
+ * list_for_each_entry_safe().
+ */
+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
+ bch_cache_set_stop(c);
+
+ list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
+ bcache_device_stop(&dc->disk);
+
+
+ /*
+ * Give an early chance for other kthreads and
+ * kworkers to stop themselves
+ */
+ schedule();
+
+ /* What's a condition variable? */
+ while (1) {
+ long timeout = start + 10 * HZ - jiffies;
+
+ mutex_lock(&bch_register_lock);
+ stopped = list_empty(&bch_cache_sets) &&
+ list_empty(&uncached_devices);
+
+ if (timeout < 0 || stopped)
+ break;
+
+ prepare_to_wait(&unregister_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ mutex_unlock(&bch_register_lock);
+ schedule_timeout(timeout);
+ }
+
+ finish_wait(&unregister_wait, &wait);
+
+ if (stopped)
+ pr_info("All devices stopped\n");
+ else
+ pr_notice("Timeout waiting for devices to be closed\n");
+out:
+ mutex_unlock(&bch_register_lock);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block reboot = {
+ .notifier_call = bcache_reboot,
+ .priority = INT_MAX, /* before any real devices */
+};
+
+static void bcache_exit(void)
+{
+ bch_debug_exit();
+ bch_request_exit();
+ if (bcache_kobj)
+ kobject_put(bcache_kobj);
+ if (bcache_wq)
+ destroy_workqueue(bcache_wq);
+ if (bch_journal_wq)
+ destroy_workqueue(bch_journal_wq);
+ if (bch_flush_wq)
+ destroy_workqueue(bch_flush_wq);
+ bch_btree_exit();
+
+ if (bcache_major)
+ unregister_blkdev(bcache_major, "bcache");
+ unregister_reboot_notifier(&reboot);
+ mutex_destroy(&bch_register_lock);
+}
+
+/* Check and fixup module parameters */
+static void check_module_parameters(void)
+{
+ if (bch_cutoff_writeback_sync == 0)
+ bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC;
+ else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) {
+ pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u\n",
+ bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX);
+ bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX;
+ }
+
+ if (bch_cutoff_writeback == 0)
+ bch_cutoff_writeback = CUTOFF_WRITEBACK;
+ else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) {
+ pr_warn("set bch_cutoff_writeback (%u) to max value %u\n",
+ bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX);
+ bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX;
+ }
+
+ if (bch_cutoff_writeback > bch_cutoff_writeback_sync) {
+ pr_warn("set bch_cutoff_writeback (%u) to %u\n",
+ bch_cutoff_writeback, bch_cutoff_writeback_sync);
+ bch_cutoff_writeback = bch_cutoff_writeback_sync;
+ }
+}
+
+static int __init bcache_init(void)
+{
+ static const struct attribute *files[] = {
+ &ksysfs_register.attr,
+ &ksysfs_register_quiet.attr,
+ &ksysfs_pendings_cleanup.attr,
+ NULL
+ };
+
+ check_module_parameters();
+
+ mutex_init(&bch_register_lock);
+ init_waitqueue_head(&unregister_wait);
+ register_reboot_notifier(&reboot);
+
+ bcache_major = register_blkdev(0, "bcache");
+ if (bcache_major < 0) {
+ unregister_reboot_notifier(&reboot);
+ mutex_destroy(&bch_register_lock);
+ return bcache_major;
+ }
+
+ if (bch_btree_init())
+ goto err;
+
+ bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
+ if (!bcache_wq)
+ goto err;
+
+ /*
+ * Let's not make this `WQ_MEM_RECLAIM` for the following reasons:
+ *
+ * 1. It used `system_wq` before which also does no memory reclaim.
+ * 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and
+ * reduced throughput can be observed.
+ *
+ * We still want to user our own queue to not congest the `system_wq`.
+ */
+ bch_flush_wq = alloc_workqueue("bch_flush", 0, 0);
+ if (!bch_flush_wq)
+ goto err;
+
+ bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
+ if (!bch_journal_wq)
+ goto err;
+
+ bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
+ if (!bcache_kobj)
+ goto err;
+
+ if (bch_request_init() ||
+ sysfs_create_files(bcache_kobj, files))
+ goto err;
+
+ bch_debug_init();
+ closure_debug_init();
+
+ bcache_is_reboot = false;
+
+ return 0;
+err:
+ bcache_exit();
+ return -ENOMEM;
+}
+
+/*
+ * Module hooks
+ */
+module_exit(bcache_exit);
+module_init(bcache_init);
+
+module_param(bch_cutoff_writeback, uint, 0);
+MODULE_PARM_DESC(bch_cutoff_writeback, "threshold to cutoff writeback");
+
+module_param(bch_cutoff_writeback_sync, uint, 0);
+MODULE_PARM_DESC(bch_cutoff_writeback_sync, "hard threshold to cutoff writeback");
+
+MODULE_DESCRIPTION("Bcache: a Linux block layer cache");
+MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
new file mode 100644
index 0000000000..18ac98dc89
--- /dev/null
+++ b/drivers/md/bcache/sysfs.c
@@ -0,0 +1,1203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bcache sysfs interfaces
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "sysfs.h"
+#include "btree.h"
+#include "request.h"
+#include "writeback.h"
+#include "features.h"
+
+#include <linux/blkdev.h>
+#include <linux/sort.h>
+#include <linux/sched/clock.h>
+
+extern bool bcache_is_reboot;
+
+/* Default is 0 ("writethrough") */
+static const char * const bch_cache_modes[] = {
+ "writethrough",
+ "writeback",
+ "writearound",
+ "none",
+ NULL
+};
+
+static const char * const bch_reada_cache_policies[] = {
+ "all",
+ "meta-only",
+ NULL
+};
+
+/* Default is 0 ("auto") */
+static const char * const bch_stop_on_failure_modes[] = {
+ "auto",
+ "always",
+ NULL
+};
+
+static const char * const cache_replacement_policies[] = {
+ "lru",
+ "fifo",
+ "random",
+ NULL
+};
+
+static const char * const error_actions[] = {
+ "unregister",
+ "panic",
+ NULL
+};
+
+write_attribute(attach);
+write_attribute(detach);
+write_attribute(unregister);
+write_attribute(stop);
+write_attribute(clear_stats);
+write_attribute(trigger_gc);
+write_attribute(prune_cache);
+write_attribute(flash_vol_create);
+
+read_attribute(bucket_size);
+read_attribute(block_size);
+read_attribute(nbuckets);
+read_attribute(tree_depth);
+read_attribute(root_usage_percent);
+read_attribute(priority_stats);
+read_attribute(btree_cache_size);
+read_attribute(btree_cache_max_chain);
+read_attribute(cache_available_percent);
+read_attribute(written);
+read_attribute(btree_written);
+read_attribute(metadata_written);
+read_attribute(active_journal_entries);
+read_attribute(backing_dev_name);
+read_attribute(backing_dev_uuid);
+
+sysfs_time_stats_attribute(btree_gc, sec, ms);
+sysfs_time_stats_attribute(btree_split, sec, us);
+sysfs_time_stats_attribute(btree_sort, ms, us);
+sysfs_time_stats_attribute(btree_read, ms, us);
+
+read_attribute(btree_nodes);
+read_attribute(btree_used_percent);
+read_attribute(average_key_size);
+read_attribute(dirty_data);
+read_attribute(bset_tree_stats);
+read_attribute(feature_compat);
+read_attribute(feature_ro_compat);
+read_attribute(feature_incompat);
+
+read_attribute(state);
+read_attribute(cache_read_races);
+read_attribute(reclaim);
+read_attribute(reclaimed_journal_buckets);
+read_attribute(flush_write);
+read_attribute(writeback_keys_done);
+read_attribute(writeback_keys_failed);
+read_attribute(io_errors);
+read_attribute(congested);
+read_attribute(cutoff_writeback);
+read_attribute(cutoff_writeback_sync);
+rw_attribute(congested_read_threshold_us);
+rw_attribute(congested_write_threshold_us);
+
+rw_attribute(sequential_cutoff);
+rw_attribute(data_csum);
+rw_attribute(cache_mode);
+rw_attribute(readahead_cache_policy);
+rw_attribute(stop_when_cache_set_failed);
+rw_attribute(writeback_metadata);
+rw_attribute(writeback_running);
+rw_attribute(writeback_percent);
+rw_attribute(writeback_delay);
+rw_attribute(writeback_rate);
+rw_attribute(writeback_consider_fragment);
+
+rw_attribute(writeback_rate_update_seconds);
+rw_attribute(writeback_rate_i_term_inverse);
+rw_attribute(writeback_rate_p_term_inverse);
+rw_attribute(writeback_rate_fp_term_low);
+rw_attribute(writeback_rate_fp_term_mid);
+rw_attribute(writeback_rate_fp_term_high);
+rw_attribute(writeback_rate_minimum);
+read_attribute(writeback_rate_debug);
+
+read_attribute(stripe_size);
+read_attribute(partial_stripes_expensive);
+
+rw_attribute(synchronous);
+rw_attribute(journal_delay_ms);
+rw_attribute(io_disable);
+rw_attribute(discard);
+rw_attribute(running);
+rw_attribute(label);
+rw_attribute(errors);
+rw_attribute(io_error_limit);
+rw_attribute(io_error_halflife);
+rw_attribute(verify);
+rw_attribute(bypass_torture_test);
+rw_attribute(key_merging_disabled);
+rw_attribute(gc_always_rewrite);
+rw_attribute(expensive_debug_checks);
+rw_attribute(cache_replacement_policy);
+rw_attribute(btree_shrinker_disabled);
+rw_attribute(copy_gc_enabled);
+rw_attribute(idle_max_writeback_rate);
+rw_attribute(gc_after_writeback);
+rw_attribute(size);
+
+static ssize_t bch_snprint_string_list(char *buf,
+ size_t size,
+ const char * const list[],
+ size_t selected)
+{
+ char *out = buf;
+ size_t i;
+
+ for (i = 0; list[i]; i++)
+ out += scnprintf(out, buf + size - out,
+ i == selected ? "[%s] " : "%s ", list[i]);
+
+ out[-1] = '\n';
+ return out - buf;
+}
+
+SHOW(__bch_cached_dev)
+{
+ struct cached_dev *dc = container_of(kobj, struct cached_dev,
+ disk.kobj);
+ char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
+ int wb = dc->writeback_running;
+
+#define var(stat) (dc->stat)
+
+ if (attr == &sysfs_cache_mode)
+ return bch_snprint_string_list(buf, PAGE_SIZE,
+ bch_cache_modes,
+ BDEV_CACHE_MODE(&dc->sb));
+
+ if (attr == &sysfs_readahead_cache_policy)
+ return bch_snprint_string_list(buf, PAGE_SIZE,
+ bch_reada_cache_policies,
+ dc->cache_readahead_policy);
+
+ if (attr == &sysfs_stop_when_cache_set_failed)
+ return bch_snprint_string_list(buf, PAGE_SIZE,
+ bch_stop_on_failure_modes,
+ dc->stop_when_cache_set_failed);
+
+
+ sysfs_printf(data_csum, "%i", dc->disk.data_csum);
+ var_printf(verify, "%i");
+ var_printf(bypass_torture_test, "%i");
+ var_printf(writeback_metadata, "%i");
+ var_printf(writeback_running, "%i");
+ var_printf(writeback_consider_fragment, "%i");
+ var_print(writeback_delay);
+ var_print(writeback_percent);
+ sysfs_hprint(writeback_rate,
+ wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
+ sysfs_printf(io_errors, "%i", atomic_read(&dc->io_errors));
+ sysfs_printf(io_error_limit, "%i", dc->error_limit);
+ sysfs_printf(io_disable, "%i", dc->io_disable);
+ var_print(writeback_rate_update_seconds);
+ var_print(writeback_rate_i_term_inverse);
+ var_print(writeback_rate_p_term_inverse);
+ var_print(writeback_rate_fp_term_low);
+ var_print(writeback_rate_fp_term_mid);
+ var_print(writeback_rate_fp_term_high);
+ var_print(writeback_rate_minimum);
+
+ if (attr == &sysfs_writeback_rate_debug) {
+ char rate[20];
+ char dirty[20];
+ char target[20];
+ char proportional[20];
+ char integral[20];
+ char change[20];
+ s64 next_io;
+
+ /*
+ * Except for dirty and target, other values should
+ * be 0 if writeback is not running.
+ */
+ bch_hprint(rate,
+ wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
+ : 0);
+ bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
+ bch_hprint(target, dc->writeback_rate_target << 9);
+ bch_hprint(proportional,
+ wb ? dc->writeback_rate_proportional << 9 : 0);
+ bch_hprint(integral,
+ wb ? dc->writeback_rate_integral_scaled << 9 : 0);
+ bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
+ next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
+ NSEC_PER_MSEC) : 0;
+
+ return sprintf(buf,
+ "rate:\t\t%s/sec\n"
+ "dirty:\t\t%s\n"
+ "target:\t\t%s\n"
+ "proportional:\t%s\n"
+ "integral:\t%s\n"
+ "change:\t\t%s/sec\n"
+ "next io:\t%llims\n",
+ rate, dirty, target, proportional,
+ integral, change, next_io);
+ }
+
+ sysfs_hprint(dirty_data,
+ bcache_dev_sectors_dirty(&dc->disk) << 9);
+
+ sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9);
+ var_printf(partial_stripes_expensive, "%u");
+
+ var_hprint(sequential_cutoff);
+
+ sysfs_print(running, atomic_read(&dc->running));
+ sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
+
+ if (attr == &sysfs_label) {
+ memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
+ buf[SB_LABEL_SIZE + 1] = '\0';
+ strcat(buf, "\n");
+ return strlen(buf);
+ }
+
+ if (attr == &sysfs_backing_dev_name) {
+ snprintf(buf, BDEVNAME_SIZE + 1, "%pg", dc->bdev);
+ strcat(buf, "\n");
+ return strlen(buf);
+ }
+
+ if (attr == &sysfs_backing_dev_uuid) {
+ /* convert binary uuid into 36-byte string plus '\0' */
+ snprintf(buf, 36+1, "%pU", dc->sb.uuid);
+ strcat(buf, "\n");
+ return strlen(buf);
+ }
+
+#undef var
+ return 0;
+}
+SHOW_LOCKED(bch_cached_dev)
+
+STORE(__cached_dev)
+{
+ struct cached_dev *dc = container_of(kobj, struct cached_dev,
+ disk.kobj);
+ ssize_t v;
+ struct cache_set *c;
+ struct kobj_uevent_env *env;
+
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
+#define d_strtoul(var) sysfs_strtoul(var, dc->var)
+#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
+#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
+
+ sysfs_strtoul(data_csum, dc->disk.data_csum);
+ d_strtoul(verify);
+ sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
+ sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
+ sysfs_strtoul_bool(writeback_running, dc->writeback_running);
+ sysfs_strtoul_bool(writeback_consider_fragment, dc->writeback_consider_fragment);
+ sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
+
+ sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
+ 0, bch_cutoff_writeback);
+
+ if (attr == &sysfs_writeback_rate) {
+ ssize_t ret;
+ long int v = atomic_long_read(&dc->writeback_rate.rate);
+
+ ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
+
+ if (!ret) {
+ atomic_long_set(&dc->writeback_rate.rate, v);
+ ret = size;
+ }
+
+ return ret;
+ }
+
+ sysfs_strtoul_clamp(writeback_rate_update_seconds,
+ dc->writeback_rate_update_seconds,
+ 1, WRITEBACK_RATE_UPDATE_SECS_MAX);
+ sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
+ dc->writeback_rate_i_term_inverse,
+ 1, UINT_MAX);
+ sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
+ dc->writeback_rate_p_term_inverse,
+ 1, UINT_MAX);
+ sysfs_strtoul_clamp(writeback_rate_fp_term_low,
+ dc->writeback_rate_fp_term_low,
+ 1, dc->writeback_rate_fp_term_mid - 1);
+ sysfs_strtoul_clamp(writeback_rate_fp_term_mid,
+ dc->writeback_rate_fp_term_mid,
+ dc->writeback_rate_fp_term_low + 1,
+ dc->writeback_rate_fp_term_high - 1);
+ sysfs_strtoul_clamp(writeback_rate_fp_term_high,
+ dc->writeback_rate_fp_term_high,
+ dc->writeback_rate_fp_term_mid + 1, UINT_MAX);
+ sysfs_strtoul_clamp(writeback_rate_minimum,
+ dc->writeback_rate_minimum,
+ 1, UINT_MAX);
+
+ sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
+
+ if (attr == &sysfs_io_disable) {
+ int v = strtoul_or_return(buf);
+
+ dc->io_disable = v ? 1 : 0;
+ }
+
+ sysfs_strtoul_clamp(sequential_cutoff,
+ dc->sequential_cutoff,
+ 0, UINT_MAX);
+
+ if (attr == &sysfs_clear_stats)
+ bch_cache_accounting_clear(&dc->accounting);
+
+ if (attr == &sysfs_running &&
+ strtoul_or_return(buf)) {
+ v = bch_cached_dev_run(dc);
+ if (v)
+ return v;
+ }
+
+ if (attr == &sysfs_cache_mode) {
+ v = __sysfs_match_string(bch_cache_modes, -1, buf);
+ if (v < 0)
+ return v;
+
+ if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
+ SET_BDEV_CACHE_MODE(&dc->sb, v);
+ bch_write_bdev_super(dc, NULL);
+ }
+ }
+
+ if (attr == &sysfs_readahead_cache_policy) {
+ v = __sysfs_match_string(bch_reada_cache_policies, -1, buf);
+ if (v < 0)
+ return v;
+
+ if ((unsigned int) v != dc->cache_readahead_policy)
+ dc->cache_readahead_policy = v;
+ }
+
+ if (attr == &sysfs_stop_when_cache_set_failed) {
+ v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
+ if (v < 0)
+ return v;
+
+ dc->stop_when_cache_set_failed = v;
+ }
+
+ if (attr == &sysfs_label) {
+ if (size > SB_LABEL_SIZE)
+ return -EINVAL;
+ memcpy(dc->sb.label, buf, size);
+ if (size < SB_LABEL_SIZE)
+ dc->sb.label[size] = '\0';
+ if (size && dc->sb.label[size - 1] == '\n')
+ dc->sb.label[size - 1] = '\0';
+ bch_write_bdev_super(dc, NULL);
+ if (dc->disk.c) {
+ memcpy(dc->disk.c->uuids[dc->disk.id].label,
+ buf, SB_LABEL_SIZE);
+ bch_uuid_write(dc->disk.c);
+ }
+ env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
+ if (!env)
+ return -ENOMEM;
+ add_uevent_var(env, "DRIVER=bcache");
+ add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid);
+ add_uevent_var(env, "CACHED_LABEL=%s", buf);
+ kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
+ KOBJ_CHANGE,
+ env->envp);
+ kfree(env);
+ }
+
+ if (attr == &sysfs_attach) {
+ uint8_t set_uuid[16];
+
+ if (bch_parse_uuid(buf, set_uuid) < 16)
+ return -EINVAL;
+
+ v = -ENOENT;
+ list_for_each_entry(c, &bch_cache_sets, list) {
+ v = bch_cached_dev_attach(dc, c, set_uuid);
+ if (!v)
+ return size;
+ }
+ if (v == -ENOENT)
+ pr_err("Can't attach %s: cache set not found\n", buf);
+ return v;
+ }
+
+ if (attr == &sysfs_detach && dc->disk.c)
+ bch_cached_dev_detach(dc);
+
+ if (attr == &sysfs_stop)
+ bcache_device_stop(&dc->disk);
+
+ return size;
+}
+
+STORE(bch_cached_dev)
+{
+ struct cached_dev *dc = container_of(kobj, struct cached_dev,
+ disk.kobj);
+
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
+ mutex_lock(&bch_register_lock);
+ size = __cached_dev_store(kobj, attr, buf, size);
+
+ if (attr == &sysfs_writeback_running) {
+ /* dc->writeback_running changed in __cached_dev_store() */
+ if (IS_ERR_OR_NULL(dc->writeback_thread)) {
+ /*
+ * reject setting it to 1 via sysfs if writeback
+ * kthread is not created yet.
+ */
+ if (dc->writeback_running) {
+ dc->writeback_running = false;
+ pr_err("%s: failed to run non-existent writeback thread\n",
+ dc->disk.disk->disk_name);
+ }
+ } else
+ /*
+ * writeback kthread will check if dc->writeback_running
+ * is true or false.
+ */
+ bch_writeback_queue(dc);
+ }
+
+ /*
+ * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
+ * a cache set, otherwise it doesn't make sense.
+ */
+ if (attr == &sysfs_writeback_percent)
+ if ((dc->disk.c != NULL) &&
+ (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
+ schedule_delayed_work(&dc->writeback_rate_update,
+ dc->writeback_rate_update_seconds * HZ);
+
+ mutex_unlock(&bch_register_lock);
+ return size;
+}
+
+static struct attribute *bch_cached_dev_attrs[] = {
+ &sysfs_attach,
+ &sysfs_detach,
+ &sysfs_stop,
+#if 0
+ &sysfs_data_csum,
+#endif
+ &sysfs_cache_mode,
+ &sysfs_readahead_cache_policy,
+ &sysfs_stop_when_cache_set_failed,
+ &sysfs_writeback_metadata,
+ &sysfs_writeback_running,
+ &sysfs_writeback_delay,
+ &sysfs_writeback_percent,
+ &sysfs_writeback_rate,
+ &sysfs_writeback_consider_fragment,
+ &sysfs_writeback_rate_update_seconds,
+ &sysfs_writeback_rate_i_term_inverse,
+ &sysfs_writeback_rate_p_term_inverse,
+ &sysfs_writeback_rate_fp_term_low,
+ &sysfs_writeback_rate_fp_term_mid,
+ &sysfs_writeback_rate_fp_term_high,
+ &sysfs_writeback_rate_minimum,
+ &sysfs_writeback_rate_debug,
+ &sysfs_io_errors,
+ &sysfs_io_error_limit,
+ &sysfs_io_disable,
+ &sysfs_dirty_data,
+ &sysfs_stripe_size,
+ &sysfs_partial_stripes_expensive,
+ &sysfs_sequential_cutoff,
+ &sysfs_clear_stats,
+ &sysfs_running,
+ &sysfs_state,
+ &sysfs_label,
+#ifdef CONFIG_BCACHE_DEBUG
+ &sysfs_verify,
+ &sysfs_bypass_torture_test,
+#endif
+ &sysfs_backing_dev_name,
+ &sysfs_backing_dev_uuid,
+ NULL
+};
+ATTRIBUTE_GROUPS(bch_cached_dev);
+KTYPE(bch_cached_dev);
+
+SHOW(bch_flash_dev)
+{
+ struct bcache_device *d = container_of(kobj, struct bcache_device,
+ kobj);
+ struct uuid_entry *u = &d->c->uuids[d->id];
+
+ sysfs_printf(data_csum, "%i", d->data_csum);
+ sysfs_hprint(size, u->sectors << 9);
+
+ if (attr == &sysfs_label) {
+ memcpy(buf, u->label, SB_LABEL_SIZE);
+ buf[SB_LABEL_SIZE + 1] = '\0';
+ strcat(buf, "\n");
+ return strlen(buf);
+ }
+
+ return 0;
+}
+
+STORE(__bch_flash_dev)
+{
+ struct bcache_device *d = container_of(kobj, struct bcache_device,
+ kobj);
+ struct uuid_entry *u = &d->c->uuids[d->id];
+
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
+ sysfs_strtoul(data_csum, d->data_csum);
+
+ if (attr == &sysfs_size) {
+ uint64_t v;
+
+ strtoi_h_or_return(buf, v);
+
+ u->sectors = v >> 9;
+ bch_uuid_write(d->c);
+ set_capacity(d->disk, u->sectors);
+ }
+
+ if (attr == &sysfs_label) {
+ memcpy(u->label, buf, SB_LABEL_SIZE);
+ bch_uuid_write(d->c);
+ }
+
+ if (attr == &sysfs_unregister) {
+ set_bit(BCACHE_DEV_DETACHING, &d->flags);
+ bcache_device_stop(d);
+ }
+
+ return size;
+}
+STORE_LOCKED(bch_flash_dev)
+
+static struct attribute *bch_flash_dev_attrs[] = {
+ &sysfs_unregister,
+#if 0
+ &sysfs_data_csum,
+#endif
+ &sysfs_label,
+ &sysfs_size,
+ NULL
+};
+ATTRIBUTE_GROUPS(bch_flash_dev);
+KTYPE(bch_flash_dev);
+
+struct bset_stats_op {
+ struct btree_op op;
+ size_t nodes;
+ struct bset_stats stats;
+};
+
+static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
+{
+ struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
+
+ op->nodes++;
+ bch_btree_keys_stats(&b->keys, &op->stats);
+
+ return MAP_CONTINUE;
+}
+
+static int bch_bset_print_stats(struct cache_set *c, char *buf)
+{
+ struct bset_stats_op op;
+ int ret;
+
+ memset(&op, 0, sizeof(op));
+ bch_btree_op_init(&op.op, -1);
+
+ ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
+ if (ret < 0)
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE,
+ "btree nodes: %zu\n"
+ "written sets: %zu\n"
+ "unwritten sets: %zu\n"
+ "written key bytes: %zu\n"
+ "unwritten key bytes: %zu\n"
+ "floats: %zu\n"
+ "failed: %zu\n",
+ op.nodes,
+ op.stats.sets_written, op.stats.sets_unwritten,
+ op.stats.bytes_written, op.stats.bytes_unwritten,
+ op.stats.floats, op.stats.failed);
+}
+
+static unsigned int bch_root_usage(struct cache_set *c)
+{
+ unsigned int bytes = 0;
+ struct bkey *k;
+ struct btree *b;
+ struct btree_iter iter;
+
+ goto lock_root;
+
+ do {
+ rw_unlock(false, b);
+lock_root:
+ b = c->root;
+ rw_lock(false, b, b->level);
+ } while (b != c->root);
+
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
+ bytes += bkey_bytes(k);
+
+ rw_unlock(false, b);
+
+ return (bytes * 100) / btree_bytes(c);
+}
+
+static size_t bch_cache_size(struct cache_set *c)
+{
+ size_t ret = 0;
+ struct btree *b;
+
+ mutex_lock(&c->bucket_lock);
+ list_for_each_entry(b, &c->btree_cache, list)
+ ret += 1 << (b->keys.page_order + PAGE_SHIFT);
+
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+}
+
+static unsigned int bch_cache_max_chain(struct cache_set *c)
+{
+ unsigned int ret = 0;
+ struct hlist_head *h;
+
+ mutex_lock(&c->bucket_lock);
+
+ for (h = c->bucket_hash;
+ h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
+ h++) {
+ unsigned int i = 0;
+ struct hlist_node *p;
+
+ hlist_for_each(p, h)
+ i++;
+
+ ret = max(ret, i);
+ }
+
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+}
+
+static unsigned int bch_btree_used(struct cache_set *c)
+{
+ return div64_u64(c->gc_stats.key_bytes * 100,
+ (c->gc_stats.nodes ?: 1) * btree_bytes(c));
+}
+
+static unsigned int bch_average_key_size(struct cache_set *c)
+{
+ return c->gc_stats.nkeys
+ ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
+ : 0;
+}
+
+SHOW(__bch_cache_set)
+{
+ struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+
+ sysfs_print(synchronous, CACHE_SYNC(&c->cache->sb));
+ sysfs_print(journal_delay_ms, c->journal_delay_ms);
+ sysfs_hprint(bucket_size, bucket_bytes(c->cache));
+ sysfs_hprint(block_size, block_bytes(c->cache));
+ sysfs_print(tree_depth, c->root->level);
+ sysfs_print(root_usage_percent, bch_root_usage(c));
+
+ sysfs_hprint(btree_cache_size, bch_cache_size(c));
+ sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
+ sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
+
+ sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
+ sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
+ sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
+ sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
+
+ sysfs_print(btree_used_percent, bch_btree_used(c));
+ sysfs_print(btree_nodes, c->gc_stats.nodes);
+ sysfs_hprint(average_key_size, bch_average_key_size(c));
+
+ sysfs_print(cache_read_races,
+ atomic_long_read(&c->cache_read_races));
+
+ sysfs_print(reclaim,
+ atomic_long_read(&c->reclaim));
+
+ sysfs_print(reclaimed_journal_buckets,
+ atomic_long_read(&c->reclaimed_journal_buckets));
+
+ sysfs_print(flush_write,
+ atomic_long_read(&c->flush_write));
+
+ sysfs_print(writeback_keys_done,
+ atomic_long_read(&c->writeback_keys_done));
+ sysfs_print(writeback_keys_failed,
+ atomic_long_read(&c->writeback_keys_failed));
+
+ if (attr == &sysfs_errors)
+ return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
+ c->on_error);
+
+ /* See count_io_errors for why 88 */
+ sysfs_print(io_error_halflife, c->error_decay * 88);
+ sysfs_print(io_error_limit, c->error_limit);
+
+ sysfs_hprint(congested,
+ ((uint64_t) bch_get_congested(c)) << 9);
+ sysfs_print(congested_read_threshold_us,
+ c->congested_read_threshold_us);
+ sysfs_print(congested_write_threshold_us,
+ c->congested_write_threshold_us);
+
+ sysfs_print(cutoff_writeback, bch_cutoff_writeback);
+ sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
+
+ sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
+ sysfs_printf(verify, "%i", c->verify);
+ sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
+ sysfs_printf(expensive_debug_checks,
+ "%i", c->expensive_debug_checks);
+ sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
+ sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
+ sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
+ sysfs_printf(idle_max_writeback_rate, "%i",
+ c->idle_max_writeback_rate_enabled);
+ sysfs_printf(gc_after_writeback, "%i", c->gc_after_writeback);
+ sysfs_printf(io_disable, "%i",
+ test_bit(CACHE_SET_IO_DISABLE, &c->flags));
+
+ if (attr == &sysfs_bset_tree_stats)
+ return bch_bset_print_stats(c, buf);
+
+ if (attr == &sysfs_feature_compat)
+ return bch_print_cache_set_feature_compat(c, buf, PAGE_SIZE);
+ if (attr == &sysfs_feature_ro_compat)
+ return bch_print_cache_set_feature_ro_compat(c, buf, PAGE_SIZE);
+ if (attr == &sysfs_feature_incompat)
+ return bch_print_cache_set_feature_incompat(c, buf, PAGE_SIZE);
+
+ return 0;
+}
+SHOW_LOCKED(bch_cache_set)
+
+STORE(__bch_cache_set)
+{
+ struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+ ssize_t v;
+
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
+ if (attr == &sysfs_unregister)
+ bch_cache_set_unregister(c);
+
+ if (attr == &sysfs_stop)
+ bch_cache_set_stop(c);
+
+ if (attr == &sysfs_synchronous) {
+ bool sync = strtoul_or_return(buf);
+
+ if (sync != CACHE_SYNC(&c->cache->sb)) {
+ SET_CACHE_SYNC(&c->cache->sb, sync);
+ bcache_write_super(c);
+ }
+ }
+
+ if (attr == &sysfs_flash_vol_create) {
+ int r;
+ uint64_t v;
+
+ strtoi_h_or_return(buf, v);
+
+ r = bch_flash_dev_create(c, v);
+ if (r)
+ return r;
+ }
+
+ if (attr == &sysfs_clear_stats) {
+ atomic_long_set(&c->writeback_keys_done, 0);
+ atomic_long_set(&c->writeback_keys_failed, 0);
+
+ memset(&c->gc_stats, 0, sizeof(struct gc_stat));
+ bch_cache_accounting_clear(&c->accounting);
+ }
+
+ if (attr == &sysfs_trigger_gc)
+ force_wake_up_gc(c);
+
+ if (attr == &sysfs_prune_cache) {
+ struct shrink_control sc;
+
+ sc.gfp_mask = GFP_KERNEL;
+ sc.nr_to_scan = strtoul_or_return(buf);
+ c->shrink.scan_objects(&c->shrink, &sc);
+ }
+
+ sysfs_strtoul_clamp(congested_read_threshold_us,
+ c->congested_read_threshold_us,
+ 0, UINT_MAX);
+ sysfs_strtoul_clamp(congested_write_threshold_us,
+ c->congested_write_threshold_us,
+ 0, UINT_MAX);
+
+ if (attr == &sysfs_errors) {
+ v = __sysfs_match_string(error_actions, -1, buf);
+ if (v < 0)
+ return v;
+
+ c->on_error = v;
+ }
+
+ sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX);
+
+ /* See count_io_errors() for why 88 */
+ if (attr == &sysfs_io_error_halflife) {
+ unsigned long v = 0;
+ ssize_t ret;
+
+ ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
+ if (!ret) {
+ c->error_decay = v / 88;
+ return size;
+ }
+ return ret;
+ }
+
+ if (attr == &sysfs_io_disable) {
+ v = strtoul_or_return(buf);
+ if (v) {
+ if (test_and_set_bit(CACHE_SET_IO_DISABLE,
+ &c->flags))
+ pr_warn("CACHE_SET_IO_DISABLE already set\n");
+ } else {
+ if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
+ &c->flags))
+ pr_warn("CACHE_SET_IO_DISABLE already cleared\n");
+ }
+ }
+
+ sysfs_strtoul_clamp(journal_delay_ms,
+ c->journal_delay_ms,
+ 0, USHRT_MAX);
+ sysfs_strtoul_bool(verify, c->verify);
+ sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled);
+ sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
+ sysfs_strtoul_bool(gc_always_rewrite, c->gc_always_rewrite);
+ sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
+ sysfs_strtoul_bool(copy_gc_enabled, c->copy_gc_enabled);
+ sysfs_strtoul_bool(idle_max_writeback_rate,
+ c->idle_max_writeback_rate_enabled);
+
+ /*
+ * write gc_after_writeback here may overwrite an already set
+ * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
+ * set in next chance.
+ */
+ sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
+
+ return size;
+}
+STORE_LOCKED(bch_cache_set)
+
+SHOW(bch_cache_set_internal)
+{
+ struct cache_set *c = container_of(kobj, struct cache_set, internal);
+
+ return bch_cache_set_show(&c->kobj, attr, buf);
+}
+
+STORE(bch_cache_set_internal)
+{
+ struct cache_set *c = container_of(kobj, struct cache_set, internal);
+
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
+ return bch_cache_set_store(&c->kobj, attr, buf, size);
+}
+
+static void bch_cache_set_internal_release(struct kobject *k)
+{
+}
+
+static struct attribute *bch_cache_set_attrs[] = {
+ &sysfs_unregister,
+ &sysfs_stop,
+ &sysfs_synchronous,
+ &sysfs_journal_delay_ms,
+ &sysfs_flash_vol_create,
+
+ &sysfs_bucket_size,
+ &sysfs_block_size,
+ &sysfs_tree_depth,
+ &sysfs_root_usage_percent,
+ &sysfs_btree_cache_size,
+ &sysfs_cache_available_percent,
+
+ &sysfs_average_key_size,
+
+ &sysfs_errors,
+ &sysfs_io_error_limit,
+ &sysfs_io_error_halflife,
+ &sysfs_congested,
+ &sysfs_congested_read_threshold_us,
+ &sysfs_congested_write_threshold_us,
+ &sysfs_clear_stats,
+ NULL
+};
+ATTRIBUTE_GROUPS(bch_cache_set);
+KTYPE(bch_cache_set);
+
+static struct attribute *bch_cache_set_internal_attrs[] = {
+ &sysfs_active_journal_entries,
+
+ sysfs_time_stats_attribute_list(btree_gc, sec, ms)
+ sysfs_time_stats_attribute_list(btree_split, sec, us)
+ sysfs_time_stats_attribute_list(btree_sort, ms, us)
+ sysfs_time_stats_attribute_list(btree_read, ms, us)
+
+ &sysfs_btree_nodes,
+ &sysfs_btree_used_percent,
+ &sysfs_btree_cache_max_chain,
+
+ &sysfs_bset_tree_stats,
+ &sysfs_cache_read_races,
+ &sysfs_reclaim,
+ &sysfs_reclaimed_journal_buckets,
+ &sysfs_flush_write,
+ &sysfs_writeback_keys_done,
+ &sysfs_writeback_keys_failed,
+
+ &sysfs_trigger_gc,
+ &sysfs_prune_cache,
+#ifdef CONFIG_BCACHE_DEBUG
+ &sysfs_verify,
+ &sysfs_key_merging_disabled,
+ &sysfs_expensive_debug_checks,
+#endif
+ &sysfs_gc_always_rewrite,
+ &sysfs_btree_shrinker_disabled,
+ &sysfs_copy_gc_enabled,
+ &sysfs_idle_max_writeback_rate,
+ &sysfs_gc_after_writeback,
+ &sysfs_io_disable,
+ &sysfs_cutoff_writeback,
+ &sysfs_cutoff_writeback_sync,
+ &sysfs_feature_compat,
+ &sysfs_feature_ro_compat,
+ &sysfs_feature_incompat,
+ NULL
+};
+ATTRIBUTE_GROUPS(bch_cache_set_internal);
+KTYPE(bch_cache_set_internal);
+
+static int __bch_cache_cmp(const void *l, const void *r)
+{
+ cond_resched();
+ return *((uint16_t *)r) - *((uint16_t *)l);
+}
+
+SHOW(__bch_cache)
+{
+ struct cache *ca = container_of(kobj, struct cache, kobj);
+
+ sysfs_hprint(bucket_size, bucket_bytes(ca));
+ sysfs_hprint(block_size, block_bytes(ca));
+ sysfs_print(nbuckets, ca->sb.nbuckets);
+ sysfs_print(discard, ca->discard);
+ sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
+ sysfs_hprint(btree_written,
+ atomic_long_read(&ca->btree_sectors_written) << 9);
+ sysfs_hprint(metadata_written,
+ (atomic_long_read(&ca->meta_sectors_written) +
+ atomic_long_read(&ca->btree_sectors_written)) << 9);
+
+ sysfs_print(io_errors,
+ atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
+
+ if (attr == &sysfs_cache_replacement_policy)
+ return bch_snprint_string_list(buf, PAGE_SIZE,
+ cache_replacement_policies,
+ CACHE_REPLACEMENT(&ca->sb));
+
+ if (attr == &sysfs_priority_stats) {
+ struct bucket *b;
+ size_t n = ca->sb.nbuckets, i;
+ size_t unused = 0, available = 0, dirty = 0, meta = 0;
+ uint64_t sum = 0;
+ /* Compute 31 quantiles */
+ uint16_t q[31], *p, *cached;
+ ssize_t ret;
+
+ cached = p = vmalloc(array_size(sizeof(uint16_t),
+ ca->sb.nbuckets));
+ if (!p)
+ return -ENOMEM;
+
+ mutex_lock(&ca->set->bucket_lock);
+ for_each_bucket(b, ca) {
+ if (!GC_SECTORS_USED(b))
+ unused++;
+ if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
+ available++;
+ if (GC_MARK(b) == GC_MARK_DIRTY)
+ dirty++;
+ if (GC_MARK(b) == GC_MARK_METADATA)
+ meta++;
+ }
+
+ for (i = ca->sb.first_bucket; i < n; i++)
+ p[i] = ca->buckets[i].prio;
+ mutex_unlock(&ca->set->bucket_lock);
+
+ sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
+
+ while (n &&
+ !cached[n - 1])
+ --n;
+
+ while (cached < p + n &&
+ *cached == BTREE_PRIO) {
+ cached++;
+ n--;
+ }
+
+ for (i = 0; i < n; i++)
+ sum += INITIAL_PRIO - cached[i];
+
+ if (n)
+ sum = div64_u64(sum, n);
+
+ for (i = 0; i < ARRAY_SIZE(q); i++)
+ q[i] = INITIAL_PRIO - cached[n * (i + 1) /
+ (ARRAY_SIZE(q) + 1)];
+
+ vfree(p);
+
+ ret = sysfs_emit(buf,
+ "Unused: %zu%%\n"
+ "Clean: %zu%%\n"
+ "Dirty: %zu%%\n"
+ "Metadata: %zu%%\n"
+ "Average: %llu\n"
+ "Sectors per Q: %zu\n"
+ "Quantiles: [",
+ unused * 100 / (size_t) ca->sb.nbuckets,
+ available * 100 / (size_t) ca->sb.nbuckets,
+ dirty * 100 / (size_t) ca->sb.nbuckets,
+ meta * 100 / (size_t) ca->sb.nbuckets, sum,
+ n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
+
+ for (i = 0; i < ARRAY_SIZE(q); i++)
+ ret += sysfs_emit_at(buf, ret, "%u ", q[i]);
+ ret--;
+
+ ret += sysfs_emit_at(buf, ret, "]\n");
+
+ return ret;
+ }
+
+ return 0;
+}
+SHOW_LOCKED(bch_cache)
+
+STORE(__bch_cache)
+{
+ struct cache *ca = container_of(kobj, struct cache, kobj);
+ ssize_t v;
+
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
+ if (attr == &sysfs_discard) {
+ bool v = strtoul_or_return(buf);
+
+ if (bdev_max_discard_sectors(ca->bdev))
+ ca->discard = v;
+
+ if (v != CACHE_DISCARD(&ca->sb)) {
+ SET_CACHE_DISCARD(&ca->sb, v);
+ bcache_write_super(ca->set);
+ }
+ }
+
+ if (attr == &sysfs_cache_replacement_policy) {
+ v = __sysfs_match_string(cache_replacement_policies, -1, buf);
+ if (v < 0)
+ return v;
+
+ if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
+ mutex_lock(&ca->set->bucket_lock);
+ SET_CACHE_REPLACEMENT(&ca->sb, v);
+ mutex_unlock(&ca->set->bucket_lock);
+
+ bcache_write_super(ca->set);
+ }
+ }
+
+ if (attr == &sysfs_clear_stats) {
+ atomic_long_set(&ca->sectors_written, 0);
+ atomic_long_set(&ca->btree_sectors_written, 0);
+ atomic_long_set(&ca->meta_sectors_written, 0);
+ atomic_set(&ca->io_count, 0);
+ atomic_set(&ca->io_errors, 0);
+ }
+
+ return size;
+}
+STORE_LOCKED(bch_cache)
+
+static struct attribute *bch_cache_attrs[] = {
+ &sysfs_bucket_size,
+ &sysfs_block_size,
+ &sysfs_nbuckets,
+ &sysfs_priority_stats,
+ &sysfs_discard,
+ &sysfs_written,
+ &sysfs_btree_written,
+ &sysfs_metadata_written,
+ &sysfs_io_errors,
+ &sysfs_clear_stats,
+ &sysfs_cache_replacement_policy,
+ NULL
+};
+ATTRIBUTE_GROUPS(bch_cache);
+KTYPE(bch_cache);
diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h
new file mode 100644
index 0000000000..65b8bd975a
--- /dev/null
+++ b/drivers/md/bcache/sysfs.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHE_SYSFS_H_
+#define _BCACHE_SYSFS_H_
+
+#define KTYPE(type) \
+const struct kobj_type type ## _ktype = { \
+ .release = type ## _release, \
+ .sysfs_ops = &((const struct sysfs_ops) { \
+ .show = type ## _show, \
+ .store = type ## _store \
+ }), \
+ .default_groups = type ## _groups \
+}
+
+#define SHOW(fn) \
+static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
+ char *buf) \
+
+#define STORE(fn) \
+static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
+ const char *buf, size_t size) \
+
+#define SHOW_LOCKED(fn) \
+SHOW(fn) \
+{ \
+ ssize_t ret; \
+ mutex_lock(&bch_register_lock); \
+ ret = __ ## fn ## _show(kobj, attr, buf); \
+ mutex_unlock(&bch_register_lock); \
+ return ret; \
+}
+
+#define STORE_LOCKED(fn) \
+STORE(fn) \
+{ \
+ ssize_t ret; \
+ mutex_lock(&bch_register_lock); \
+ ret = __ ## fn ## _store(kobj, attr, buf, size); \
+ mutex_unlock(&bch_register_lock); \
+ return ret; \
+}
+
+#define __sysfs_attribute(_name, _mode) \
+ static struct attribute sysfs_##_name = \
+ { .name = #_name, .mode = _mode }
+
+#define write_attribute(n) __sysfs_attribute(n, 0200)
+#define read_attribute(n) __sysfs_attribute(n, 0444)
+#define rw_attribute(n) __sysfs_attribute(n, 0644)
+
+#define sysfs_printf(file, fmt, ...) \
+do { \
+ if (attr == &sysfs_ ## file) \
+ return sysfs_emit(buf, fmt "\n", __VA_ARGS__); \
+} while (0)
+
+#define sysfs_print(file, var) \
+do { \
+ if (attr == &sysfs_ ## file) \
+ return sysfs_emit(buf, \
+ __builtin_types_compatible_p(typeof(var), int) \
+ ? "%i\n" : \
+ __builtin_types_compatible_p(typeof(var), unsigned int) \
+ ? "%u\n" : \
+ __builtin_types_compatible_p(typeof(var), long) \
+ ? "%li\n" : \
+ __builtin_types_compatible_p(typeof(var), unsigned long)\
+ ? "%lu\n" : \
+ __builtin_types_compatible_p(typeof(var), int64_t) \
+ ? "%lli\n" : \
+ __builtin_types_compatible_p(typeof(var), uint64_t) \
+ ? "%llu\n" : \
+ __builtin_types_compatible_p(typeof(var), const char *) \
+ ? "%s\n" : "%i\n", var); \
+} while (0)
+
+#define sysfs_hprint(file, val) \
+do { \
+ if (attr == &sysfs_ ## file) { \
+ ssize_t ret = bch_hprint(buf, val); \
+ strcat(buf, "\n"); \
+ return ret + 1; \
+ } \
+} while (0)
+
+#define var_printf(_var, fmt) sysfs_printf(_var, fmt, var(_var))
+#define var_print(_var) sysfs_print(_var, var(_var))
+#define var_hprint(_var) sysfs_hprint(_var, var(_var))
+
+#define sysfs_strtoul(file, var) \
+do { \
+ if (attr == &sysfs_ ## file) \
+ return strtoul_safe(buf, var) ?: (ssize_t) size; \
+} while (0)
+
+#define sysfs_strtoul_bool(file, var) \
+do { \
+ if (attr == &sysfs_ ## file) { \
+ unsigned long v = strtoul_or_return(buf); \
+ \
+ var = v ? 1 : 0; \
+ return size; \
+ } \
+} while (0)
+
+#define sysfs_strtoul_clamp(file, var, min, max) \
+do { \
+ if (attr == &sysfs_ ## file) { \
+ unsigned long v = 0; \
+ ssize_t ret; \
+ ret = strtoul_safe_clamp(buf, v, min, max); \
+ if (!ret) { \
+ var = v; \
+ return size; \
+ } \
+ return ret; \
+ } \
+} while (0)
+
+#define strtoul_or_return(cp) \
+({ \
+ unsigned long _v; \
+ int _r = kstrtoul(cp, 10, &_v); \
+ if (_r) \
+ return _r; \
+ _v; \
+})
+
+#define strtoi_h_or_return(cp, v) \
+do { \
+ int _r = strtoi_h(cp, &v); \
+ if (_r) \
+ return _r; \
+} while (0)
+
+#define sysfs_hatoi(file, var) \
+do { \
+ if (attr == &sysfs_ ## file) \
+ return strtoi_h(buf, &var) ?: (ssize_t) size; \
+} while (0)
+
+#endif /* _BCACHE_SYSFS_H_ */
diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c
new file mode 100644
index 0000000000..a9a73f560c
--- /dev/null
+++ b/drivers/md/bcache/trace.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcache.h"
+#include "btree.h"
+
+#include <linux/blktrace_api.h>
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/bcache.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_end);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_bypass_sequential);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_bypass_congested);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read_retry);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_insert);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_replay_key);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_write);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_full);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_entry_full);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_cache_cannibalize);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_read);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_write);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_alloc);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_alloc_fail);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_free);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_gc_coalesce);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_end);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_copy);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_copy_collision);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_insert_key);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_split);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_compact);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_set_root);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_invalidate);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_fail);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback_collision);
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
new file mode 100644
index 0000000000..ae380bc399
--- /dev/null
+++ b/drivers/md/bcache/util.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * random utiility code, for bcache but in theory not specific to bcache
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
+#include <linux/sched/clock.h>
+
+#include "util.h"
+
+#define simple_strtoint(c, end, base) simple_strtol(c, end, base)
+#define simple_strtouint(c, end, base) simple_strtoul(c, end, base)
+
+#define STRTO_H(name, type) \
+int bch_ ## name ## _h(const char *cp, type *res) \
+{ \
+ int u = 0; \
+ char *e; \
+ type i = simple_ ## name(cp, &e, 10); \
+ \
+ switch (tolower(*e)) { \
+ default: \
+ return -EINVAL; \
+ case 'y': \
+ case 'z': \
+ u++; \
+ fallthrough; \
+ case 'e': \
+ u++; \
+ fallthrough; \
+ case 'p': \
+ u++; \
+ fallthrough; \
+ case 't': \
+ u++; \
+ fallthrough; \
+ case 'g': \
+ u++; \
+ fallthrough; \
+ case 'm': \
+ u++; \
+ fallthrough; \
+ case 'k': \
+ u++; \
+ if (e++ == cp) \
+ return -EINVAL; \
+ fallthrough; \
+ case '\n': \
+ case '\0': \
+ if (*e == '\n') \
+ e++; \
+ } \
+ \
+ if (*e) \
+ return -EINVAL; \
+ \
+ while (u--) { \
+ if ((type) ~0 > 0 && \
+ (type) ~0 / 1024 <= i) \
+ return -EINVAL; \
+ if ((i > 0 && ANYSINT_MAX(type) / 1024 < i) || \
+ (i < 0 && -ANYSINT_MAX(type) / 1024 > i)) \
+ return -EINVAL; \
+ i *= 1024; \
+ } \
+ \
+ *res = i; \
+ return 0; \
+} \
+
+STRTO_H(strtoint, int)
+STRTO_H(strtouint, unsigned int)
+STRTO_H(strtoll, long long)
+STRTO_H(strtoull, unsigned long long)
+
+/**
+ * bch_hprint - formats @v to human readable string for sysfs.
+ * @buf: the (at least 8 byte) buffer to format the result into.
+ * @v: signed 64 bit integer
+ *
+ * Returns the number of bytes used by format.
+ */
+ssize_t bch_hprint(char *buf, int64_t v)
+{
+ static const char units[] = "?kMGTPEZY";
+ int u = 0, t;
+
+ uint64_t q;
+
+ if (v < 0)
+ q = -v;
+ else
+ q = v;
+
+ /* For as long as the number is more than 3 digits, but at least
+ * once, shift right / divide by 1024. Keep the remainder for
+ * a digit after the decimal point.
+ */
+ do {
+ u++;
+
+ t = q & ~(~0 << 10);
+ q >>= 10;
+ } while (q >= 1000);
+
+ if (v < 0)
+ /* '-', up to 3 digits, '.', 1 digit, 1 character, null;
+ * yields 8 bytes.
+ */
+ return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]);
+ else
+ return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
+}
+
+bool bch_is_zero(const char *p, size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n; i++)
+ if (p[i])
+ return false;
+ return true;
+}
+
+int bch_parse_uuid(const char *s, char *uuid)
+{
+ size_t i, j, x;
+
+ memset(uuid, 0, 16);
+
+ for (i = 0, j = 0;
+ i < strspn(s, "-0123456789:ABCDEFabcdef") && j < 32;
+ i++) {
+ x = s[i] | 32;
+
+ switch (x) {
+ case '0'...'9':
+ x -= '0';
+ break;
+ case 'a'...'f':
+ x -= 'a' - 10;
+ break;
+ default:
+ continue;
+ }
+
+ if (!(j & 1))
+ x <<= 4;
+ uuid[j++ >> 1] |= x;
+ }
+ return i;
+}
+
+void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
+{
+ uint64_t now, duration, last;
+
+ spin_lock(&stats->lock);
+
+ now = local_clock();
+ duration = time_after64(now, start_time)
+ ? now - start_time : 0;
+ last = time_after64(now, stats->last)
+ ? now - stats->last : 0;
+
+ stats->max_duration = max(stats->max_duration, duration);
+
+ if (stats->last) {
+ ewma_add(stats->average_duration, duration, 8, 8);
+
+ if (stats->average_frequency)
+ ewma_add(stats->average_frequency, last, 8, 8);
+ else
+ stats->average_frequency = last << 8;
+ } else {
+ stats->average_duration = duration << 8;
+ }
+
+ stats->last = now ?: 1;
+
+ spin_unlock(&stats->lock);
+}
+
+/**
+ * bch_next_delay() - update ratelimiting statistics and calculate next delay
+ * @d: the struct bch_ratelimit to update
+ * @done: the amount of work done, in arbitrary units
+ *
+ * Increment @d by the amount of work done, and return how long to delay in
+ * jiffies until the next time to do some work.
+ */
+uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
+{
+ uint64_t now = local_clock();
+
+ d->next += div_u64(done * NSEC_PER_SEC, atomic_long_read(&d->rate));
+
+ /* Bound the time. Don't let us fall further than 2 seconds behind
+ * (this prevents unnecessary backlog that would make it impossible
+ * to catch up). If we're ahead of the desired writeback rate,
+ * don't let us sleep more than 2.5 seconds (so we can notice/respond
+ * if the control system tells us to speed up!).
+ */
+ if (time_before64(now + NSEC_PER_SEC * 5LLU / 2LLU, d->next))
+ d->next = now + NSEC_PER_SEC * 5LLU / 2LLU;
+
+ if (time_after64(now - NSEC_PER_SEC * 2, d->next))
+ d->next = now - NSEC_PER_SEC * 2;
+
+ return time_after64(d->next, now)
+ ? div_u64(d->next - now, NSEC_PER_SEC / HZ)
+ : 0;
+}
+
+/*
+ * Generally it isn't good to access .bi_io_vec and .bi_vcnt directly,
+ * the preferred way is bio_add_page, but in this case, bch_bio_map()
+ * supposes that the bvec table is empty, so it is safe to access
+ * .bi_vcnt & .bi_io_vec in this way even after multipage bvec is
+ * supported.
+ */
+void bch_bio_map(struct bio *bio, void *base)
+{
+ size_t size = bio->bi_iter.bi_size;
+ struct bio_vec *bv = bio->bi_io_vec;
+
+ BUG_ON(!bio->bi_iter.bi_size);
+ BUG_ON(bio->bi_vcnt);
+
+ bv->bv_offset = base ? offset_in_page(base) : 0;
+ goto start;
+
+ for (; size; bio->bi_vcnt++, bv++) {
+ bv->bv_offset = 0;
+start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
+ size);
+ if (base) {
+ bv->bv_page = is_vmalloc_addr(base)
+ ? vmalloc_to_page(base)
+ : virt_to_page(base);
+
+ base += bv->bv_len;
+ }
+
+ size -= bv->bv_len;
+ }
+}
+
+/**
+ * bch_bio_alloc_pages - allocates a single page for each bvec in a bio
+ * @bio: bio to allocate pages for
+ * @gfp_mask: flags for allocation
+ *
+ * Allocates pages up to @bio->bi_vcnt.
+ *
+ * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
+ * freed.
+ */
+int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
+{
+ int i;
+ struct bio_vec *bv;
+
+ /*
+ * This is called on freshly new bio, so it is safe to access the
+ * bvec table directly.
+ */
+ for (i = 0, bv = bio->bi_io_vec; i < bio->bi_vcnt; bv++, i++) {
+ bv->bv_page = alloc_page(gfp_mask);
+ if (!bv->bv_page) {
+ while (--bv >= bio->bi_io_vec)
+ __free_page(bv->bv_page);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
new file mode 100644
index 0000000000..6f3cb7c921
--- /dev/null
+++ b/drivers/md/bcache/util.h
@@ -0,0 +1,562 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _BCACHE_UTIL_H
+#define _BCACHE_UTIL_H
+
+#include <linux/blkdev.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched/clock.h>
+#include <linux/llist.h>
+#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <linux/crc64.h>
+
+#include "closure.h"
+
+struct closure;
+
+#ifdef CONFIG_BCACHE_DEBUG
+
+#define EBUG_ON(cond) BUG_ON(cond)
+#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
+#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
+
+#else /* DEBUG */
+
+#define EBUG_ON(cond) do { if (cond) do {} while (0); } while (0)
+#define atomic_dec_bug(v) atomic_dec(v)
+#define atomic_inc_bug(v, i) atomic_inc(v)
+
+#endif
+
+#define DECLARE_HEAP(type, name) \
+ struct { \
+ size_t size, used; \
+ type *data; \
+ } name
+
+#define init_heap(heap, _size, gfp) \
+({ \
+ size_t _bytes; \
+ (heap)->used = 0; \
+ (heap)->size = (_size); \
+ _bytes = (heap)->size * sizeof(*(heap)->data); \
+ (heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
+ (heap)->data; \
+})
+
+#define free_heap(heap) \
+do { \
+ kvfree((heap)->data); \
+ (heap)->data = NULL; \
+} while (0)
+
+#define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j])
+
+#define heap_sift(h, i, cmp) \
+do { \
+ size_t _r, _j = i; \
+ \
+ for (; _j * 2 + 1 < (h)->used; _j = _r) { \
+ _r = _j * 2 + 1; \
+ if (_r + 1 < (h)->used && \
+ cmp((h)->data[_r], (h)->data[_r + 1])) \
+ _r++; \
+ \
+ if (cmp((h)->data[_r], (h)->data[_j])) \
+ break; \
+ heap_swap(h, _r, _j); \
+ } \
+} while (0)
+
+#define heap_sift_down(h, i, cmp) \
+do { \
+ while (i) { \
+ size_t p = (i - 1) / 2; \
+ if (cmp((h)->data[i], (h)->data[p])) \
+ break; \
+ heap_swap(h, i, p); \
+ i = p; \
+ } \
+} while (0)
+
+#define heap_add(h, d, cmp) \
+({ \
+ bool _r = !heap_full(h); \
+ if (_r) { \
+ size_t _i = (h)->used++; \
+ (h)->data[_i] = d; \
+ \
+ heap_sift_down(h, _i, cmp); \
+ heap_sift(h, _i, cmp); \
+ } \
+ _r; \
+})
+
+#define heap_pop(h, d, cmp) \
+({ \
+ bool _r = (h)->used; \
+ if (_r) { \
+ (d) = (h)->data[0]; \
+ (h)->used--; \
+ heap_swap(h, 0, (h)->used); \
+ heap_sift(h, 0, cmp); \
+ } \
+ _r; \
+})
+
+#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
+
+#define heap_full(h) ((h)->used == (h)->size)
+
+#define DECLARE_FIFO(type, name) \
+ struct { \
+ size_t front, back, size, mask; \
+ type *data; \
+ } name
+
+#define fifo_for_each(c, fifo, iter) \
+ for (iter = (fifo)->front; \
+ c = (fifo)->data[iter], iter != (fifo)->back; \
+ iter = (iter + 1) & (fifo)->mask)
+
+#define __init_fifo(fifo, gfp) \
+({ \
+ size_t _allocated_size, _bytes; \
+ BUG_ON(!(fifo)->size); \
+ \
+ _allocated_size = roundup_pow_of_two((fifo)->size + 1); \
+ _bytes = _allocated_size * sizeof(*(fifo)->data); \
+ \
+ (fifo)->mask = _allocated_size - 1; \
+ (fifo)->front = (fifo)->back = 0; \
+ \
+ (fifo)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
+ (fifo)->data; \
+})
+
+#define init_fifo_exact(fifo, _size, gfp) \
+({ \
+ (fifo)->size = (_size); \
+ __init_fifo(fifo, gfp); \
+})
+
+#define init_fifo(fifo, _size, gfp) \
+({ \
+ (fifo)->size = (_size); \
+ if ((fifo)->size > 4) \
+ (fifo)->size = roundup_pow_of_two((fifo)->size) - 1; \
+ __init_fifo(fifo, gfp); \
+})
+
+#define free_fifo(fifo) \
+do { \
+ kvfree((fifo)->data); \
+ (fifo)->data = NULL; \
+} while (0)
+
+#define fifo_used(fifo) (((fifo)->back - (fifo)->front) & (fifo)->mask)
+#define fifo_free(fifo) ((fifo)->size - fifo_used(fifo))
+
+#define fifo_empty(fifo) (!fifo_used(fifo))
+#define fifo_full(fifo) (!fifo_free(fifo))
+
+#define fifo_front(fifo) ((fifo)->data[(fifo)->front])
+#define fifo_back(fifo) \
+ ((fifo)->data[((fifo)->back - 1) & (fifo)->mask])
+
+#define fifo_idx(fifo, p) (((p) - &fifo_front(fifo)) & (fifo)->mask)
+
+#define fifo_push_back(fifo, i) \
+({ \
+ bool _r = !fifo_full((fifo)); \
+ if (_r) { \
+ (fifo)->data[(fifo)->back++] = (i); \
+ (fifo)->back &= (fifo)->mask; \
+ } \
+ _r; \
+})
+
+#define fifo_pop_front(fifo, i) \
+({ \
+ bool _r = !fifo_empty((fifo)); \
+ if (_r) { \
+ (i) = (fifo)->data[(fifo)->front++]; \
+ (fifo)->front &= (fifo)->mask; \
+ } \
+ _r; \
+})
+
+#define fifo_push_front(fifo, i) \
+({ \
+ bool _r = !fifo_full((fifo)); \
+ if (_r) { \
+ --(fifo)->front; \
+ (fifo)->front &= (fifo)->mask; \
+ (fifo)->data[(fifo)->front] = (i); \
+ } \
+ _r; \
+})
+
+#define fifo_pop_back(fifo, i) \
+({ \
+ bool _r = !fifo_empty((fifo)); \
+ if (_r) { \
+ --(fifo)->back; \
+ (fifo)->back &= (fifo)->mask; \
+ (i) = (fifo)->data[(fifo)->back] \
+ } \
+ _r; \
+})
+
+#define fifo_push(fifo, i) fifo_push_back(fifo, (i))
+#define fifo_pop(fifo, i) fifo_pop_front(fifo, (i))
+
+#define fifo_swap(l, r) \
+do { \
+ swap((l)->front, (r)->front); \
+ swap((l)->back, (r)->back); \
+ swap((l)->size, (r)->size); \
+ swap((l)->mask, (r)->mask); \
+ swap((l)->data, (r)->data); \
+} while (0)
+
+#define fifo_move(dest, src) \
+do { \
+ typeof(*((dest)->data)) _t; \
+ while (!fifo_full(dest) && \
+ fifo_pop(src, _t)) \
+ fifo_push(dest, _t); \
+} while (0)
+
+/*
+ * Simple array based allocator - preallocates a number of elements and you can
+ * never allocate more than that, also has no locking.
+ *
+ * Handy because if you know you only need a fixed number of elements you don't
+ * have to worry about memory allocation failure, and sometimes a mempool isn't
+ * what you want.
+ *
+ * We treat the free elements as entries in a singly linked list, and the
+ * freelist as a stack - allocating and freeing push and pop off the freelist.
+ */
+
+#define DECLARE_ARRAY_ALLOCATOR(type, name, size) \
+ struct { \
+ type *freelist; \
+ type data[size]; \
+ } name
+
+#define array_alloc(array) \
+({ \
+ typeof((array)->freelist) _ret = (array)->freelist; \
+ \
+ if (_ret) \
+ (array)->freelist = *((typeof((array)->freelist) *) _ret);\
+ \
+ _ret; \
+})
+
+#define array_free(array, ptr) \
+do { \
+ typeof((array)->freelist) _ptr = ptr; \
+ \
+ *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \
+ (array)->freelist = _ptr; \
+} while (0)
+
+#define array_allocator_init(array) \
+do { \
+ typeof((array)->freelist) _i; \
+ \
+ BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *)); \
+ (array)->freelist = NULL; \
+ \
+ for (_i = (array)->data; \
+ _i < (array)->data + ARRAY_SIZE((array)->data); \
+ _i++) \
+ array_free(array, _i); \
+} while (0)
+
+#define array_freelist_empty(array) ((array)->freelist == NULL)
+
+#define ANYSINT_MAX(t) \
+ ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
+
+int bch_strtoint_h(const char *cp, int *res);
+int bch_strtouint_h(const char *cp, unsigned int *res);
+int bch_strtoll_h(const char *cp, long long *res);
+int bch_strtoull_h(const char *cp, unsigned long long *res);
+
+static inline int bch_strtol_h(const char *cp, long *res)
+{
+#if BITS_PER_LONG == 32
+ return bch_strtoint_h(cp, (int *) res);
+#else
+ return bch_strtoll_h(cp, (long long *) res);
+#endif
+}
+
+static inline int bch_strtoul_h(const char *cp, long *res)
+{
+#if BITS_PER_LONG == 32
+ return bch_strtouint_h(cp, (unsigned int *) res);
+#else
+ return bch_strtoull_h(cp, (unsigned long long *) res);
+#endif
+}
+
+#define strtoi_h(cp, res) \
+ (__builtin_types_compatible_p(typeof(*res), int) \
+ ? bch_strtoint_h(cp, (void *) res) \
+ : __builtin_types_compatible_p(typeof(*res), long) \
+ ? bch_strtol_h(cp, (void *) res) \
+ : __builtin_types_compatible_p(typeof(*res), long long) \
+ ? bch_strtoll_h(cp, (void *) res) \
+ : __builtin_types_compatible_p(typeof(*res), unsigned int) \
+ ? bch_strtouint_h(cp, (void *) res) \
+ : __builtin_types_compatible_p(typeof(*res), unsigned long) \
+ ? bch_strtoul_h(cp, (void *) res) \
+ : __builtin_types_compatible_p(typeof(*res), unsigned long long)\
+ ? bch_strtoull_h(cp, (void *) res) : -EINVAL)
+
+#define strtoul_safe(cp, var) \
+({ \
+ unsigned long _v; \
+ int _r = kstrtoul(cp, 10, &_v); \
+ if (!_r) \
+ var = _v; \
+ _r; \
+})
+
+#define strtoul_safe_clamp(cp, var, min, max) \
+({ \
+ unsigned long _v; \
+ int _r = kstrtoul(cp, 10, &_v); \
+ if (!_r) \
+ var = clamp_t(typeof(var), _v, min, max); \
+ _r; \
+})
+
+ssize_t bch_hprint(char *buf, int64_t v);
+
+bool bch_is_zero(const char *p, size_t n);
+int bch_parse_uuid(const char *s, char *uuid);
+
+struct time_stats {
+ spinlock_t lock;
+ /*
+ * all fields are in nanoseconds, averages are ewmas stored left shifted
+ * by 8
+ */
+ uint64_t max_duration;
+ uint64_t average_duration;
+ uint64_t average_frequency;
+ uint64_t last;
+};
+
+void bch_time_stats_update(struct time_stats *stats, uint64_t time);
+
+static inline unsigned int local_clock_us(void)
+{
+ return local_clock() >> 10;
+}
+
+#define NSEC_PER_ns 1L
+#define NSEC_PER_us NSEC_PER_USEC
+#define NSEC_PER_ms NSEC_PER_MSEC
+#define NSEC_PER_sec NSEC_PER_SEC
+
+#define __print_time_stat(stats, name, stat, units) \
+ sysfs_print(name ## _ ## stat ## _ ## units, \
+ div_u64((stats)->stat >> 8, NSEC_PER_ ## units))
+
+#define sysfs_print_time_stats(stats, name, \
+ frequency_units, \
+ duration_units) \
+do { \
+ __print_time_stat(stats, name, \
+ average_frequency, frequency_units); \
+ __print_time_stat(stats, name, \
+ average_duration, duration_units); \
+ sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \
+ div_u64((stats)->max_duration, \
+ NSEC_PER_ ## duration_units)); \
+ \
+ sysfs_print(name ## _last_ ## frequency_units, (stats)->last \
+ ? div_s64(local_clock() - (stats)->last, \
+ NSEC_PER_ ## frequency_units) \
+ : -1LL); \
+} while (0)
+
+#define sysfs_time_stats_attribute(name, \
+ frequency_units, \
+ duration_units) \
+read_attribute(name ## _average_frequency_ ## frequency_units); \
+read_attribute(name ## _average_duration_ ## duration_units); \
+read_attribute(name ## _max_duration_ ## duration_units); \
+read_attribute(name ## _last_ ## frequency_units)
+
+#define sysfs_time_stats_attribute_list(name, \
+ frequency_units, \
+ duration_units) \
+&sysfs_ ## name ## _average_frequency_ ## frequency_units, \
+&sysfs_ ## name ## _average_duration_ ## duration_units, \
+&sysfs_ ## name ## _max_duration_ ## duration_units, \
+&sysfs_ ## name ## _last_ ## frequency_units,
+
+#define ewma_add(ewma, val, weight, factor) \
+({ \
+ (ewma) *= (weight) - 1; \
+ (ewma) += (val) << factor; \
+ (ewma) /= (weight); \
+ (ewma) >> factor; \
+})
+
+struct bch_ratelimit {
+ /* Next time we want to do some work, in nanoseconds */
+ uint64_t next;
+
+ /*
+ * Rate at which we want to do work, in units per second
+ * The units here correspond to the units passed to bch_next_delay()
+ */
+ atomic_long_t rate;
+};
+
+static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
+{
+ d->next = local_clock();
+}
+
+uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
+
+#define __DIV_SAFE(n, d, zero) \
+({ \
+ typeof(n) _n = (n); \
+ typeof(d) _d = (d); \
+ _d ? _n / _d : zero; \
+})
+
+#define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0)
+
+#define container_of_or_null(ptr, type, member) \
+({ \
+ typeof(ptr) _ptr = ptr; \
+ _ptr ? container_of(_ptr, type, member) : NULL; \
+})
+
+#define RB_INSERT(root, new, member, cmp) \
+({ \
+ __label__ dup; \
+ struct rb_node **n = &(root)->rb_node, *parent = NULL; \
+ typeof(new) this; \
+ int res, ret = -1; \
+ \
+ while (*n) { \
+ parent = *n; \
+ this = container_of(*n, typeof(*(new)), member); \
+ res = cmp(new, this); \
+ if (!res) \
+ goto dup; \
+ n = res < 0 \
+ ? &(*n)->rb_left \
+ : &(*n)->rb_right; \
+ } \
+ \
+ rb_link_node(&(new)->member, parent, n); \
+ rb_insert_color(&(new)->member, root); \
+ ret = 0; \
+dup: \
+ ret; \
+})
+
+#define RB_SEARCH(root, search, member, cmp) \
+({ \
+ struct rb_node *n = (root)->rb_node; \
+ typeof(&(search)) this, ret = NULL; \
+ int res; \
+ \
+ while (n) { \
+ this = container_of(n, typeof(search), member); \
+ res = cmp(&(search), this); \
+ if (!res) { \
+ ret = this; \
+ break; \
+ } \
+ n = res < 0 \
+ ? n->rb_left \
+ : n->rb_right; \
+ } \
+ ret; \
+})
+
+#define RB_GREATER(root, search, member, cmp) \
+({ \
+ struct rb_node *n = (root)->rb_node; \
+ typeof(&(search)) this, ret = NULL; \
+ int res; \
+ \
+ while (n) { \
+ this = container_of(n, typeof(search), member); \
+ res = cmp(&(search), this); \
+ if (res < 0) { \
+ ret = this; \
+ n = n->rb_left; \
+ } else \
+ n = n->rb_right; \
+ } \
+ ret; \
+})
+
+#define RB_FIRST(root, type, member) \
+ container_of_or_null(rb_first(root), type, member)
+
+#define RB_LAST(root, type, member) \
+ container_of_or_null(rb_last(root), type, member)
+
+#define RB_NEXT(ptr, member) \
+ container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member)
+
+#define RB_PREV(ptr, member) \
+ container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member)
+
+static inline uint64_t bch_crc64(const void *p, size_t len)
+{
+ uint64_t crc = 0xffffffffffffffffULL;
+
+ crc = crc64_be(crc, p, len);
+ return crc ^ 0xffffffffffffffffULL;
+}
+
+/*
+ * A stepwise-linear pseudo-exponential. This returns 1 << (x >>
+ * frac_bits), with the less-significant bits filled in by linear
+ * interpolation.
+ *
+ * This can also be interpreted as a floating-point number format,
+ * where the low frac_bits are the mantissa (with implicit leading
+ * 1 bit), and the more significant bits are the exponent.
+ * The return value is 1.mantissa * 2^exponent.
+ *
+ * The way this is used, fract_bits is 6 and the largest possible
+ * input is CONGESTED_MAX-1 = 1023 (exponent 16, mantissa 0x1.fc),
+ * so the maximum output is 0x1fc00.
+ */
+static inline unsigned int fract_exp_two(unsigned int x,
+ unsigned int fract_bits)
+{
+ unsigned int mantissa = 1 << fract_bits; /* Implicit bit */
+
+ mantissa += x & (mantissa - 1);
+ x >>= fract_bits; /* The exponent */
+ /* Largest intermediate value 0x7f0000 */
+ return mantissa << x >> fract_bits;
+}
+
+void bch_bio_map(struct bio *bio, void *base);
+int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
+
+#endif /* _BCACHE_UTIL_H */
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
new file mode 100644
index 0000000000..3accfdaee6
--- /dev/null
+++ b/drivers/md/bcache/writeback.c
@@ -0,0 +1,1100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * background writeback - scan btree for dirty data and write it to the backing
+ * device
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "writeback.h"
+
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/sched/clock.h>
+#include <trace/events/bcache.h>
+
+static void update_gc_after_writeback(struct cache_set *c)
+{
+ if (c->gc_after_writeback != (BCH_ENABLE_AUTO_GC) ||
+ c->gc_stats.in_use < BCH_AUTO_GC_DIRTY_THRESHOLD)
+ return;
+
+ c->gc_after_writeback |= BCH_DO_AUTO_GC;
+}
+
+/* Rate limiting */
+static uint64_t __calc_target_rate(struct cached_dev *dc)
+{
+ struct cache_set *c = dc->disk.c;
+
+ /*
+ * This is the size of the cache, minus the amount used for
+ * flash-only devices
+ */
+ uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size -
+ atomic_long_read(&c->flash_dev_dirty_sectors);
+
+ /*
+ * Unfortunately there is no control of global dirty data. If the
+ * user states that they want 10% dirty data in the cache, and has,
+ * e.g., 5 backing volumes of equal size, we try and ensure each
+ * backing volume uses about 2% of the cache for dirty data.
+ */
+ uint32_t bdev_share =
+ div64_u64(bdev_nr_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
+ c->cached_dev_sectors);
+
+ uint64_t cache_dirty_target =
+ div_u64(cache_sectors * dc->writeback_percent, 100);
+
+ /* Ensure each backing dev gets at least one dirty share */
+ if (bdev_share < 1)
+ bdev_share = 1;
+
+ return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
+}
+
+static void __update_writeback_rate(struct cached_dev *dc)
+{
+ /*
+ * PI controller:
+ * Figures out the amount that should be written per second.
+ *
+ * First, the error (number of sectors that are dirty beyond our
+ * target) is calculated. The error is accumulated (numerically
+ * integrated).
+ *
+ * Then, the proportional value and integral value are scaled
+ * based on configured values. These are stored as inverses to
+ * avoid fixed point math and to make configuration easy-- e.g.
+ * the default value of 40 for writeback_rate_p_term_inverse
+ * attempts to write at a rate that would retire all the dirty
+ * blocks in 40 seconds.
+ *
+ * The writeback_rate_i_inverse value of 10000 means that 1/10000th
+ * of the error is accumulated in the integral term per second.
+ * This acts as a slow, long-term average that is not subject to
+ * variations in usage like the p term.
+ */
+ int64_t target = __calc_target_rate(dc);
+ int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
+ int64_t error = dirty - target;
+ int64_t proportional_scaled =
+ div_s64(error, dc->writeback_rate_p_term_inverse);
+ int64_t integral_scaled;
+ uint32_t new_rate;
+
+ /*
+ * We need to consider the number of dirty buckets as well
+ * when calculating the proportional_scaled, Otherwise we might
+ * have an unreasonable small writeback rate at a highly fragmented situation
+ * when very few dirty sectors consumed a lot dirty buckets, the
+ * worst case is when dirty buckets reached cutoff_writeback_sync and
+ * dirty data is still not even reached to writeback percent, so the rate
+ * still will be at the minimum value, which will cause the write
+ * stuck at a non-writeback mode.
+ */
+ struct cache_set *c = dc->disk.c;
+
+ int64_t dirty_buckets = c->nbuckets - c->avail_nbuckets;
+
+ if (dc->writeback_consider_fragment &&
+ c->gc_stats.in_use > BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW && dirty > 0) {
+ int64_t fragment =
+ div_s64((dirty_buckets * c->cache->sb.bucket_size), dirty);
+ int64_t fp_term;
+ int64_t fps;
+
+ if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) {
+ fp_term = (int64_t)dc->writeback_rate_fp_term_low *
+ (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW);
+ } else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) {
+ fp_term = (int64_t)dc->writeback_rate_fp_term_mid *
+ (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID);
+ } else {
+ fp_term = (int64_t)dc->writeback_rate_fp_term_high *
+ (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH);
+ }
+ fps = div_s64(dirty, dirty_buckets) * fp_term;
+ if (fragment > 3 && fps > proportional_scaled) {
+ /* Only overrite the p when fragment > 3 */
+ proportional_scaled = fps;
+ }
+ }
+
+ if ((error < 0 && dc->writeback_rate_integral > 0) ||
+ (error > 0 && time_before64(local_clock(),
+ dc->writeback_rate.next + NSEC_PER_MSEC))) {
+ /*
+ * Only decrease the integral term if it's more than
+ * zero. Only increase the integral term if the device
+ * is keeping up. (Don't wind up the integral
+ * ineffectively in either case).
+ *
+ * It's necessary to scale this by
+ * writeback_rate_update_seconds to keep the integral
+ * term dimensioned properly.
+ */
+ dc->writeback_rate_integral += error *
+ dc->writeback_rate_update_seconds;
+ }
+
+ integral_scaled = div_s64(dc->writeback_rate_integral,
+ dc->writeback_rate_i_term_inverse);
+
+ new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
+ dc->writeback_rate_minimum, NSEC_PER_SEC);
+
+ dc->writeback_rate_proportional = proportional_scaled;
+ dc->writeback_rate_integral_scaled = integral_scaled;
+ dc->writeback_rate_change = new_rate -
+ atomic_long_read(&dc->writeback_rate.rate);
+ atomic_long_set(&dc->writeback_rate.rate, new_rate);
+ dc->writeback_rate_target = target;
+}
+
+static bool idle_counter_exceeded(struct cache_set *c)
+{
+ int counter, dev_nr;
+
+ /*
+ * If c->idle_counter is overflow (idel for really long time),
+ * reset as 0 and not set maximum rate this time for code
+ * simplicity.
+ */
+ counter = atomic_inc_return(&c->idle_counter);
+ if (counter <= 0) {
+ atomic_set(&c->idle_counter, 0);
+ return false;
+ }
+
+ dev_nr = atomic_read(&c->attached_dev_nr);
+ if (dev_nr == 0)
+ return false;
+
+ /*
+ * c->idle_counter is increased by writeback thread of all
+ * attached backing devices, in order to represent a rough
+ * time period, counter should be divided by dev_nr.
+ * Otherwise the idle time cannot be larger with more backing
+ * device attached.
+ * The following calculation equals to checking
+ * (counter / dev_nr) < (dev_nr * 6)
+ */
+ if (counter < (dev_nr * dev_nr * 6))
+ return false;
+
+ return true;
+}
+
+/*
+ * Idle_counter is increased every time when update_writeback_rate() is
+ * called. If all backing devices attached to the same cache set have
+ * identical dc->writeback_rate_update_seconds values, it is about 6
+ * rounds of update_writeback_rate() on each backing device before
+ * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
+ * to each dc->writeback_rate.rate.
+ * In order to avoid extra locking cost for counting exact dirty cached
+ * devices number, c->attached_dev_nr is used to calculate the idle
+ * throushold. It might be bigger if not all cached device are in write-
+ * back mode, but it still works well with limited extra rounds of
+ * update_writeback_rate().
+ */
+static bool set_at_max_writeback_rate(struct cache_set *c,
+ struct cached_dev *dc)
+{
+ /* Don't sst max writeback rate if it is disabled */
+ if (!c->idle_max_writeback_rate_enabled)
+ return false;
+
+ /* Don't set max writeback rate if gc is running */
+ if (!c->gc_mark_valid)
+ return false;
+
+ if (!idle_counter_exceeded(c))
+ return false;
+
+ if (atomic_read(&c->at_max_writeback_rate) != 1)
+ atomic_set(&c->at_max_writeback_rate, 1);
+
+ atomic_long_set(&dc->writeback_rate.rate, INT_MAX);
+
+ /* keep writeback_rate_target as existing value */
+ dc->writeback_rate_proportional = 0;
+ dc->writeback_rate_integral_scaled = 0;
+ dc->writeback_rate_change = 0;
+
+ /*
+ * In case new I/O arrives during before
+ * set_at_max_writeback_rate() returns.
+ */
+ if (!idle_counter_exceeded(c) ||
+ !atomic_read(&c->at_max_writeback_rate))
+ return false;
+
+ return true;
+}
+
+static void update_writeback_rate(struct work_struct *work)
+{
+ struct cached_dev *dc = container_of(to_delayed_work(work),
+ struct cached_dev,
+ writeback_rate_update);
+ struct cache_set *c = dc->disk.c;
+
+ /*
+ * should check BCACHE_DEV_RATE_DW_RUNNING before calling
+ * cancel_delayed_work_sync().
+ */
+ set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
+ /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
+ smp_mb__after_atomic();
+
+ /*
+ * CACHE_SET_IO_DISABLE might be set via sysfs interface,
+ * check it here too.
+ */
+ if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) ||
+ test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
+ clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
+ /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
+ smp_mb__after_atomic();
+ return;
+ }
+
+ /*
+ * If the whole cache set is idle, set_at_max_writeback_rate()
+ * will set writeback rate to a max number. Then it is
+ * unncessary to update writeback rate for an idle cache set
+ * in maximum writeback rate number(s).
+ */
+ if (atomic_read(&dc->has_dirty) && dc->writeback_percent &&
+ !set_at_max_writeback_rate(c, dc)) {
+ do {
+ if (!down_read_trylock((&dc->writeback_lock))) {
+ dc->rate_update_retry++;
+ if (dc->rate_update_retry <=
+ BCH_WBRATE_UPDATE_MAX_SKIPS)
+ break;
+ down_read(&dc->writeback_lock);
+ dc->rate_update_retry = 0;
+ }
+ __update_writeback_rate(dc);
+ update_gc_after_writeback(c);
+ up_read(&dc->writeback_lock);
+ } while (0);
+ }
+
+
+ /*
+ * CACHE_SET_IO_DISABLE might be set via sysfs interface,
+ * check it here too.
+ */
+ if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) &&
+ !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
+ schedule_delayed_work(&dc->writeback_rate_update,
+ dc->writeback_rate_update_seconds * HZ);
+ }
+
+ /*
+ * should check BCACHE_DEV_RATE_DW_RUNNING before calling
+ * cancel_delayed_work_sync().
+ */
+ clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
+ /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
+ smp_mb__after_atomic();
+}
+
+static unsigned int writeback_delay(struct cached_dev *dc,
+ unsigned int sectors)
+{
+ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
+ !dc->writeback_percent)
+ return 0;
+
+ return bch_next_delay(&dc->writeback_rate, sectors);
+}
+
+struct dirty_io {
+ struct closure cl;
+ struct cached_dev *dc;
+ uint16_t sequence;
+ struct bio bio;
+};
+
+static void dirty_init(struct keybuf_key *w)
+{
+ struct dirty_io *io = w->private;
+ struct bio *bio = &io->bio;
+
+ bio_init(bio, NULL, bio->bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0);
+ if (!io->dc->writeback_percent)
+ bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+
+ bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
+ bio->bi_private = w;
+ bch_bio_map(bio, NULL);
+}
+
+static void dirty_io_destructor(struct closure *cl)
+{
+ struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+
+ kfree(io);
+}
+
+static void write_dirty_finish(struct closure *cl)
+{
+ struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+ struct keybuf_key *w = io->bio.bi_private;
+ struct cached_dev *dc = io->dc;
+
+ bio_free_pages(&io->bio);
+
+ /* This is kind of a dumb way of signalling errors. */
+ if (KEY_DIRTY(&w->key)) {
+ int ret;
+ unsigned int i;
+ struct keylist keys;
+
+ bch_keylist_init(&keys);
+
+ bkey_copy(keys.top, &w->key);
+ SET_KEY_DIRTY(keys.top, false);
+ bch_keylist_push(&keys);
+
+ for (i = 0; i < KEY_PTRS(&w->key); i++)
+ atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
+
+ ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
+
+ if (ret)
+ trace_bcache_writeback_collision(&w->key);
+
+ atomic_long_inc(ret
+ ? &dc->disk.c->writeback_keys_failed
+ : &dc->disk.c->writeback_keys_done);
+ }
+
+ bch_keybuf_del(&dc->writeback_keys, w);
+ up(&dc->in_flight);
+
+ closure_return_with_destructor(cl, dirty_io_destructor);
+}
+
+static void dirty_endio(struct bio *bio)
+{
+ struct keybuf_key *w = bio->bi_private;
+ struct dirty_io *io = w->private;
+
+ if (bio->bi_status) {
+ SET_KEY_DIRTY(&w->key, false);
+ bch_count_backing_io_errors(io->dc, bio);
+ }
+
+ closure_put(&io->cl);
+}
+
+static void write_dirty(struct closure *cl)
+{
+ struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+ struct keybuf_key *w = io->bio.bi_private;
+ struct cached_dev *dc = io->dc;
+
+ uint16_t next_sequence;
+
+ if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
+ /* Not our turn to write; wait for a write to complete */
+ closure_wait(&dc->writeback_ordering_wait, cl);
+
+ if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
+ /*
+ * Edge case-- it happened in indeterminate order
+ * relative to when we were added to wait list..
+ */
+ closure_wake_up(&dc->writeback_ordering_wait);
+ }
+
+ continue_at(cl, write_dirty, io->dc->writeback_write_wq);
+ return;
+ }
+
+ next_sequence = io->sequence + 1;
+
+ /*
+ * IO errors are signalled using the dirty bit on the key.
+ * If we failed to read, we should not attempt to write to the
+ * backing device. Instead, immediately go to write_dirty_finish
+ * to clean up.
+ */
+ if (KEY_DIRTY(&w->key)) {
+ dirty_init(w);
+ io->bio.bi_opf = REQ_OP_WRITE;
+ io->bio.bi_iter.bi_sector = KEY_START(&w->key);
+ bio_set_dev(&io->bio, io->dc->bdev);
+ io->bio.bi_end_io = dirty_endio;
+
+ /* I/O request sent to backing device */
+ closure_bio_submit(io->dc->disk.c, &io->bio, cl);
+ }
+
+ atomic_set(&dc->writeback_sequence_next, next_sequence);
+ closure_wake_up(&dc->writeback_ordering_wait);
+
+ continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
+}
+
+static void read_dirty_endio(struct bio *bio)
+{
+ struct keybuf_key *w = bio->bi_private;
+ struct dirty_io *io = w->private;
+
+ /* is_read = 1 */
+ bch_count_io_errors(io->dc->disk.c->cache,
+ bio->bi_status, 1,
+ "reading dirty data from cache");
+
+ dirty_endio(bio);
+}
+
+static void read_dirty_submit(struct closure *cl)
+{
+ struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+
+ closure_bio_submit(io->dc->disk.c, &io->bio, cl);
+
+ continue_at(cl, write_dirty, io->dc->writeback_write_wq);
+}
+
+static void read_dirty(struct cached_dev *dc)
+{
+ unsigned int delay = 0;
+ struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
+ size_t size;
+ int nk, i;
+ struct dirty_io *io;
+ struct closure cl;
+ uint16_t sequence = 0;
+
+ BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
+ atomic_set(&dc->writeback_sequence_next, sequence);
+ closure_init_stack(&cl);
+
+ /*
+ * XXX: if we error, background writeback just spins. Should use some
+ * mempools.
+ */
+
+ next = bch_keybuf_next(&dc->writeback_keys);
+
+ while (!kthread_should_stop() &&
+ !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
+ next) {
+ size = 0;
+ nk = 0;
+
+ do {
+ BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
+
+ /*
+ * Don't combine too many operations, even if they
+ * are all small.
+ */
+ if (nk >= MAX_WRITEBACKS_IN_PASS)
+ break;
+
+ /*
+ * If the current operation is very large, don't
+ * further combine operations.
+ */
+ if (size >= MAX_WRITESIZE_IN_PASS)
+ break;
+
+ /*
+ * Operations are only eligible to be combined
+ * if they are contiguous.
+ *
+ * TODO: add a heuristic willing to fire a
+ * certain amount of non-contiguous IO per pass,
+ * so that we can benefit from backing device
+ * command queueing.
+ */
+ if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
+ &START_KEY(&next->key)))
+ break;
+
+ size += KEY_SIZE(&next->key);
+ keys[nk++] = next;
+ } while ((next = bch_keybuf_next(&dc->writeback_keys)));
+
+ /* Now we have gathered a set of 1..5 keys to write back. */
+ for (i = 0; i < nk; i++) {
+ w = keys[i];
+
+ io = kzalloc(struct_size(io, bio.bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
+ GFP_KERNEL);
+ if (!io)
+ goto err;
+
+ w->private = io;
+ io->dc = dc;
+ io->sequence = sequence++;
+
+ dirty_init(w);
+ io->bio.bi_opf = REQ_OP_READ;
+ io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
+ bio_set_dev(&io->bio, dc->disk.c->cache->bdev);
+ io->bio.bi_end_io = read_dirty_endio;
+
+ if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
+ goto err_free;
+
+ trace_bcache_writeback(&w->key);
+
+ down(&dc->in_flight);
+
+ /*
+ * We've acquired a semaphore for the maximum
+ * simultaneous number of writebacks; from here
+ * everything happens asynchronously.
+ */
+ closure_call(&io->cl, read_dirty_submit, NULL, &cl);
+ }
+
+ delay = writeback_delay(dc, size);
+
+ while (!kthread_should_stop() &&
+ !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
+ delay) {
+ schedule_timeout_interruptible(delay);
+ delay = writeback_delay(dc, 0);
+ }
+ }
+
+ if (0) {
+err_free:
+ kfree(w->private);
+err:
+ bch_keybuf_del(&dc->writeback_keys, w);
+ }
+
+ /*
+ * Wait for outstanding writeback IOs to finish (and keybuf slots to be
+ * freed) before refilling again
+ */
+ closure_sync(&cl);
+}
+
+/* Scan for dirty data */
+
+void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
+ uint64_t offset, int nr_sectors)
+{
+ struct bcache_device *d = c->devices[inode];
+ unsigned int stripe_offset, sectors_dirty;
+ int stripe;
+
+ if (!d)
+ return;
+
+ stripe = offset_to_stripe(d, offset);
+ if (stripe < 0)
+ return;
+
+ if (UUID_FLASH_ONLY(&c->uuids[inode]))
+ atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
+
+ stripe_offset = offset & (d->stripe_size - 1);
+
+ while (nr_sectors) {
+ int s = min_t(unsigned int, abs(nr_sectors),
+ d->stripe_size - stripe_offset);
+
+ if (nr_sectors < 0)
+ s = -s;
+
+ if (stripe >= d->nr_stripes)
+ return;
+
+ sectors_dirty = atomic_add_return(s,
+ d->stripe_sectors_dirty + stripe);
+ if (sectors_dirty == d->stripe_size) {
+ if (!test_bit(stripe, d->full_dirty_stripes))
+ set_bit(stripe, d->full_dirty_stripes);
+ } else {
+ if (test_bit(stripe, d->full_dirty_stripes))
+ clear_bit(stripe, d->full_dirty_stripes);
+ }
+
+ nr_sectors -= s;
+ stripe_offset = 0;
+ stripe++;
+ }
+}
+
+static bool dirty_pred(struct keybuf *buf, struct bkey *k)
+{
+ struct cached_dev *dc = container_of(buf,
+ struct cached_dev,
+ writeback_keys);
+
+ BUG_ON(KEY_INODE(k) != dc->disk.id);
+
+ return KEY_DIRTY(k);
+}
+
+static void refill_full_stripes(struct cached_dev *dc)
+{
+ struct keybuf *buf = &dc->writeback_keys;
+ unsigned int start_stripe, next_stripe;
+ int stripe;
+ bool wrapped = false;
+
+ stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
+ if (stripe < 0)
+ stripe = 0;
+
+ start_stripe = stripe;
+
+ while (1) {
+ stripe = find_next_bit(dc->disk.full_dirty_stripes,
+ dc->disk.nr_stripes, stripe);
+
+ if (stripe == dc->disk.nr_stripes)
+ goto next;
+
+ next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
+ dc->disk.nr_stripes, stripe);
+
+ buf->last_scanned = KEY(dc->disk.id,
+ stripe * dc->disk.stripe_size, 0);
+
+ bch_refill_keybuf(dc->disk.c, buf,
+ &KEY(dc->disk.id,
+ next_stripe * dc->disk.stripe_size, 0),
+ dirty_pred);
+
+ if (array_freelist_empty(&buf->freelist))
+ return;
+
+ stripe = next_stripe;
+next:
+ if (wrapped && stripe > start_stripe)
+ return;
+
+ if (stripe == dc->disk.nr_stripes) {
+ stripe = 0;
+ wrapped = true;
+ }
+ }
+}
+
+/*
+ * Returns true if we scanned the entire disk
+ */
+static bool refill_dirty(struct cached_dev *dc)
+{
+ struct keybuf *buf = &dc->writeback_keys;
+ struct bkey start = KEY(dc->disk.id, 0, 0);
+ struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
+ struct bkey start_pos;
+
+ /*
+ * make sure keybuf pos is inside the range for this disk - at bringup
+ * we might not be attached yet so this disk's inode nr isn't
+ * initialized then
+ */
+ if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
+ bkey_cmp(&buf->last_scanned, &end) > 0)
+ buf->last_scanned = start;
+
+ if (dc->partial_stripes_expensive) {
+ refill_full_stripes(dc);
+ if (array_freelist_empty(&buf->freelist))
+ return false;
+ }
+
+ start_pos = buf->last_scanned;
+ bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
+
+ if (bkey_cmp(&buf->last_scanned, &end) < 0)
+ return false;
+
+ /*
+ * If we get to the end start scanning again from the beginning, and
+ * only scan up to where we initially started scanning from:
+ */
+ buf->last_scanned = start;
+ bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
+
+ return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
+}
+
+static int bch_writeback_thread(void *arg)
+{
+ struct cached_dev *dc = arg;
+ struct cache_set *c = dc->disk.c;
+ bool searched_full_index;
+
+ bch_ratelimit_reset(&dc->writeback_rate);
+
+ while (!kthread_should_stop() &&
+ !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
+ down_write(&dc->writeback_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ /*
+ * If the bache device is detaching, skip here and continue
+ * to perform writeback. Otherwise, if no dirty data on cache,
+ * or there is dirty data on cache but writeback is disabled,
+ * the writeback thread should sleep here and wait for others
+ * to wake up it.
+ */
+ if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
+ (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
+ up_write(&dc->writeback_lock);
+
+ if (kthread_should_stop() ||
+ test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
+ set_current_state(TASK_RUNNING);
+ break;
+ }
+
+ schedule();
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+
+ searched_full_index = refill_dirty(dc);
+
+ if (searched_full_index &&
+ RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
+ atomic_set(&dc->has_dirty, 0);
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
+ bch_write_bdev_super(dc, NULL);
+ /*
+ * If bcache device is detaching via sysfs interface,
+ * writeback thread should stop after there is no dirty
+ * data on cache. BCACHE_DEV_DETACHING flag is set in
+ * bch_cached_dev_detach().
+ */
+ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
+ struct closure cl;
+
+ closure_init_stack(&cl);
+ memset(&dc->sb.set_uuid, 0, 16);
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
+
+ bch_write_bdev_super(dc, &cl);
+ closure_sync(&cl);
+
+ up_write(&dc->writeback_lock);
+ break;
+ }
+
+ /*
+ * When dirty data rate is high (e.g. 50%+), there might
+ * be heavy buckets fragmentation after writeback
+ * finished, which hurts following write performance.
+ * If users really care about write performance they
+ * may set BCH_ENABLE_AUTO_GC via sysfs, then when
+ * BCH_DO_AUTO_GC is set, garbage collection thread
+ * will be wake up here. After moving gc, the shrunk
+ * btree and discarded free buckets SSD space may be
+ * helpful for following write requests.
+ */
+ if (c->gc_after_writeback ==
+ (BCH_ENABLE_AUTO_GC|BCH_DO_AUTO_GC)) {
+ c->gc_after_writeback &= ~BCH_DO_AUTO_GC;
+ force_wake_up_gc(c);
+ }
+ }
+
+ up_write(&dc->writeback_lock);
+
+ read_dirty(dc);
+
+ if (searched_full_index) {
+ unsigned int delay = dc->writeback_delay * HZ;
+
+ while (delay &&
+ !kthread_should_stop() &&
+ !test_bit(CACHE_SET_IO_DISABLE, &c->flags) &&
+ !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
+ delay = schedule_timeout_interruptible(delay);
+
+ bch_ratelimit_reset(&dc->writeback_rate);
+ }
+ }
+
+ if (dc->writeback_write_wq)
+ destroy_workqueue(dc->writeback_write_wq);
+
+ cached_dev_put(dc);
+ wait_for_kthread_stop();
+
+ return 0;
+}
+
+/* Init */
+#define INIT_KEYS_EACH_TIME 500000
+
+struct sectors_dirty_init {
+ struct btree_op op;
+ unsigned int inode;
+ size_t count;
+};
+
+static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
+ struct bkey *k)
+{
+ struct sectors_dirty_init *op = container_of(_op,
+ struct sectors_dirty_init, op);
+ if (KEY_INODE(k) > op->inode)
+ return MAP_DONE;
+
+ if (KEY_DIRTY(k))
+ bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
+ KEY_START(k), KEY_SIZE(k));
+
+ op->count++;
+ if (!(op->count % INIT_KEYS_EACH_TIME))
+ cond_resched();
+
+ return MAP_CONTINUE;
+}
+
+static int bch_root_node_dirty_init(struct cache_set *c,
+ struct bcache_device *d,
+ struct bkey *k)
+{
+ struct sectors_dirty_init op;
+ int ret;
+
+ bch_btree_op_init(&op.op, -1);
+ op.inode = d->id;
+ op.count = 0;
+
+ ret = bcache_btree(map_keys_recurse,
+ k,
+ c->root,
+ &op.op,
+ &KEY(op.inode, 0, 0),
+ sectors_dirty_init_fn,
+ 0);
+ if (ret < 0)
+ pr_warn("sectors dirty init failed, ret=%d!\n", ret);
+
+ /*
+ * The op may be added to cache_set's btree_cache_wait
+ * in mca_cannibalize(), must ensure it is removed from
+ * the list and release btree_cache_alloc_lock before
+ * free op memory.
+ * Otherwise, the btree_cache_wait will be damaged.
+ */
+ bch_cannibalize_unlock(c);
+ finish_wait(&c->btree_cache_wait, &(&op.op)->wait);
+
+ return ret;
+}
+
+static int bch_dirty_init_thread(void *arg)
+{
+ struct dirty_init_thrd_info *info = arg;
+ struct bch_dirty_init_state *state = info->state;
+ struct cache_set *c = state->c;
+ struct btree_iter iter;
+ struct bkey *k, *p;
+ int cur_idx, prev_idx, skip_nr;
+
+ k = p = NULL;
+ prev_idx = 0;
+
+ bch_btree_iter_init(&c->root->keys, &iter, NULL);
+ k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
+ BUG_ON(!k);
+
+ p = k;
+
+ while (k) {
+ spin_lock(&state->idx_lock);
+ cur_idx = state->key_idx;
+ state->key_idx++;
+ spin_unlock(&state->idx_lock);
+
+ skip_nr = cur_idx - prev_idx;
+
+ while (skip_nr) {
+ k = bch_btree_iter_next_filter(&iter,
+ &c->root->keys,
+ bch_ptr_bad);
+ if (k)
+ p = k;
+ else {
+ atomic_set(&state->enough, 1);
+ /* Update state->enough earlier */
+ smp_mb__after_atomic();
+ goto out;
+ }
+ skip_nr--;
+ }
+
+ if (p) {
+ if (bch_root_node_dirty_init(c, state->d, p) < 0)
+ goto out;
+ }
+
+ p = NULL;
+ prev_idx = cur_idx;
+ }
+
+out:
+ /* In order to wake up state->wait in time */
+ smp_mb__before_atomic();
+ if (atomic_dec_and_test(&state->started))
+ wake_up(&state->wait);
+
+ return 0;
+}
+
+static int bch_btre_dirty_init_thread_nr(void)
+{
+ int n = num_online_cpus()/2;
+
+ if (n == 0)
+ n = 1;
+ else if (n > BCH_DIRTY_INIT_THRD_MAX)
+ n = BCH_DIRTY_INIT_THRD_MAX;
+
+ return n;
+}
+
+void bch_sectors_dirty_init(struct bcache_device *d)
+{
+ int i;
+ struct btree *b = NULL;
+ struct bkey *k = NULL;
+ struct btree_iter iter;
+ struct sectors_dirty_init op;
+ struct cache_set *c = d->c;
+ struct bch_dirty_init_state state;
+
+retry_lock:
+ b = c->root;
+ rw_lock(0, b, b->level);
+ if (b != c->root) {
+ rw_unlock(0, b);
+ goto retry_lock;
+ }
+
+ /* Just count root keys if no leaf node */
+ if (c->root->level == 0) {
+ bch_btree_op_init(&op.op, -1);
+ op.inode = d->id;
+ op.count = 0;
+
+ for_each_key_filter(&c->root->keys,
+ k, &iter, bch_ptr_invalid) {
+ if (KEY_INODE(k) != op.inode)
+ continue;
+ sectors_dirty_init_fn(&op.op, c->root, k);
+ }
+
+ rw_unlock(0, b);
+ return;
+ }
+
+ memset(&state, 0, sizeof(struct bch_dirty_init_state));
+ state.c = c;
+ state.d = d;
+ state.total_threads = bch_btre_dirty_init_thread_nr();
+ state.key_idx = 0;
+ spin_lock_init(&state.idx_lock);
+ atomic_set(&state.started, 0);
+ atomic_set(&state.enough, 0);
+ init_waitqueue_head(&state.wait);
+
+ for (i = 0; i < state.total_threads; i++) {
+ /* Fetch latest state.enough earlier */
+ smp_mb__before_atomic();
+ if (atomic_read(&state.enough))
+ break;
+
+ atomic_inc(&state.started);
+ state.infos[i].state = &state;
+ state.infos[i].thread =
+ kthread_run(bch_dirty_init_thread, &state.infos[i],
+ "bch_dirtcnt[%d]", i);
+ if (IS_ERR(state.infos[i].thread)) {
+ pr_err("fails to run thread bch_dirty_init[%d]\n", i);
+ atomic_dec(&state.started);
+ for (--i; i >= 0; i--)
+ kthread_stop(state.infos[i].thread);
+ goto out;
+ }
+ }
+
+out:
+ /* Must wait for all threads to stop. */
+ wait_event(state.wait, atomic_read(&state.started) == 0);
+ rw_unlock(0, b);
+}
+
+void bch_cached_dev_writeback_init(struct cached_dev *dc)
+{
+ sema_init(&dc->in_flight, 64);
+ init_rwsem(&dc->writeback_lock);
+ bch_keybuf_init(&dc->writeback_keys);
+
+ dc->writeback_metadata = true;
+ dc->writeback_running = false;
+ dc->writeback_consider_fragment = true;
+ dc->writeback_percent = 10;
+ dc->writeback_delay = 30;
+ atomic_long_set(&dc->writeback_rate.rate, 1024);
+ dc->writeback_rate_minimum = 8;
+
+ dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
+ dc->writeback_rate_p_term_inverse = 40;
+ dc->writeback_rate_fp_term_low = 1;
+ dc->writeback_rate_fp_term_mid = 10;
+ dc->writeback_rate_fp_term_high = 1000;
+ dc->writeback_rate_i_term_inverse = 10000;
+
+ /* For dc->writeback_lock contention in update_writeback_rate() */
+ dc->rate_update_retry = 0;
+
+ WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
+ INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
+}
+
+int bch_cached_dev_writeback_start(struct cached_dev *dc)
+{
+ dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
+ WQ_MEM_RECLAIM, 0);
+ if (!dc->writeback_write_wq)
+ return -ENOMEM;
+
+ cached_dev_get(dc);
+ dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
+ "bcache_writeback");
+ if (IS_ERR(dc->writeback_thread)) {
+ cached_dev_put(dc);
+ destroy_workqueue(dc->writeback_write_wq);
+ return PTR_ERR(dc->writeback_thread);
+ }
+ dc->writeback_running = true;
+
+ WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
+ schedule_delayed_work(&dc->writeback_rate_update,
+ dc->writeback_rate_update_seconds * HZ);
+
+ bch_writeback_queue(dc);
+
+ return 0;
+}
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
new file mode 100644
index 0000000000..31df716951
--- /dev/null
+++ b/drivers/md/bcache/writeback.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHE_WRITEBACK_H
+#define _BCACHE_WRITEBACK_H
+
+#define CUTOFF_WRITEBACK 40
+#define CUTOFF_WRITEBACK_SYNC 70
+
+#define CUTOFF_WRITEBACK_MAX 70
+#define CUTOFF_WRITEBACK_SYNC_MAX 90
+
+#define MAX_WRITEBACKS_IN_PASS 5
+#define MAX_WRITESIZE_IN_PASS 5000 /* *512b */
+
+#define WRITEBACK_RATE_UPDATE_SECS_MAX 60
+#define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5
+
+#define BCH_AUTO_GC_DIRTY_THRESHOLD 50
+
+#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW 50
+#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57
+#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64
+
+#define BCH_DIRTY_INIT_THRD_MAX 12
+/*
+ * 14 (16384ths) is chosen here as something that each backing device
+ * should be a reasonable fraction of the share, and not to blow up
+ * until individual backing devices are a petabyte.
+ */
+#define WRITEBACK_SHARE_SHIFT 14
+
+struct bch_dirty_init_state;
+struct dirty_init_thrd_info {
+ struct bch_dirty_init_state *state;
+ struct task_struct *thread;
+};
+
+struct bch_dirty_init_state {
+ struct cache_set *c;
+ struct bcache_device *d;
+ int total_threads;
+ int key_idx;
+ spinlock_t idx_lock;
+ atomic_t started;
+ atomic_t enough;
+ wait_queue_head_t wait;
+ struct dirty_init_thrd_info infos[BCH_DIRTY_INIT_THRD_MAX];
+};
+
+static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
+{
+ uint64_t i, ret = 0;
+
+ for (i = 0; i < d->nr_stripes; i++)
+ ret += atomic_read(d->stripe_sectors_dirty + i);
+
+ return ret;
+}
+
+static inline int offset_to_stripe(struct bcache_device *d,
+ uint64_t offset)
+{
+ do_div(offset, d->stripe_size);
+
+ /* d->nr_stripes is in range [1, INT_MAX] */
+ if (unlikely(offset >= d->nr_stripes)) {
+ pr_err("Invalid stripe %llu (>= nr_stripes %d).\n",
+ offset, d->nr_stripes);
+ return -EINVAL;
+ }
+
+ /*
+ * Here offset is definitly smaller than INT_MAX,
+ * return it as int will never overflow.
+ */
+ return offset;
+}
+
+static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
+ uint64_t offset,
+ unsigned int nr_sectors)
+{
+ int stripe = offset_to_stripe(&dc->disk, offset);
+
+ if (stripe < 0)
+ return false;
+
+ while (1) {
+ if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
+ return true;
+
+ if (nr_sectors <= dc->disk.stripe_size)
+ return false;
+
+ nr_sectors -= dc->disk.stripe_size;
+ stripe++;
+ }
+}
+
+extern unsigned int bch_cutoff_writeback;
+extern unsigned int bch_cutoff_writeback_sync;
+
+static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
+ unsigned int cache_mode, bool would_skip)
+{
+ unsigned int in_use = dc->disk.c->gc_stats.in_use;
+
+ if (cache_mode != CACHE_MODE_WRITEBACK ||
+ test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
+ in_use > bch_cutoff_writeback_sync)
+ return false;
+
+ if (bio_op(bio) == REQ_OP_DISCARD)
+ return false;
+
+ if (dc->partial_stripes_expensive &&
+ bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
+ bio_sectors(bio)))
+ return true;
+
+ if (would_skip)
+ return false;
+
+ return (op_is_sync(bio->bi_opf) ||
+ bio->bi_opf & (REQ_META|REQ_PRIO) ||
+ in_use <= bch_cutoff_writeback);
+}
+
+static inline void bch_writeback_queue(struct cached_dev *dc)
+{
+ if (!IS_ERR_OR_NULL(dc->writeback_thread))
+ wake_up_process(dc->writeback_thread);
+}
+
+static inline void bch_writeback_add(struct cached_dev *dc)
+{
+ if (!atomic_read(&dc->has_dirty) &&
+ !atomic_xchg(&dc->has_dirty, 1)) {
+ if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
+ /* XXX: should do this synchronously */
+ bch_write_bdev_super(dc, NULL);
+ }
+
+ bch_writeback_queue(dc);
+ }
+}
+
+void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
+ uint64_t offset, int nr_sectors);
+
+void bch_sectors_dirty_init(struct bcache_device *d);
+void bch_cached_dev_writeback_init(struct cached_dev *dc);
+int bch_cached_dev_writeback_start(struct cached_dev *dc);
+
+#endif