diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:35:05 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:31 +0000 |
commit | 85c675d0d09a45a135bddd15d7b385f8758c32fb (patch) | |
tree | 76267dbc9b9a130337be3640948fe397b04ac629 /fs/bcachefs/movinggc.c | |
parent | Adding upstream version 6.6.15. (diff) | |
download | linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip |
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'fs/bcachefs/movinggc.c')
-rw-r--r-- | fs/bcachefs/movinggc.c | 431 |
1 files changed, 431 insertions, 0 deletions
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c new file mode 100644 index 0000000000..a84e79f79e --- /dev/null +++ b/fs/bcachefs/movinggc.c @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Moving/copying garbage collector + * + * Copyright 2012 Google, Inc. + */ + +#include "bcachefs.h" +#include "alloc_background.h" +#include "alloc_foreground.h" +#include "btree_iter.h" +#include "btree_update.h" +#include "btree_write_buffer.h" +#include "buckets.h" +#include "clock.h" +#include "errcode.h" +#include "error.h" +#include "lru.h" +#include "move.h" +#include "movinggc.h" +#include "trace.h" + +#include <linux/freezer.h> +#include <linux/kthread.h> +#include <linux/math64.h> +#include <linux/sched/task.h> +#include <linux/wait.h> + +struct buckets_in_flight { + struct rhashtable table; + struct move_bucket_in_flight *first; + struct move_bucket_in_flight *last; + size_t nr; + size_t sectors; +}; + +static const struct rhashtable_params bch_move_bucket_params = { + .head_offset = offsetof(struct move_bucket_in_flight, hash), + .key_offset = offsetof(struct move_bucket_in_flight, bucket.k), + .key_len = sizeof(struct move_bucket_key), +}; + +static struct move_bucket_in_flight * +move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b) +{ + struct move_bucket_in_flight *new = kzalloc(sizeof(*new), GFP_KERNEL); + int ret; + + if (!new) + return ERR_PTR(-ENOMEM); + + new->bucket = b; + + ret = rhashtable_lookup_insert_fast(&list->table, &new->hash, + bch_move_bucket_params); + if (ret) { + kfree(new); + return ERR_PTR(ret); + } + + if (!list->first) + list->first = new; + else + list->last->next = new; + + list->last = new; + list->nr++; + list->sectors += b.sectors; + return new; +} + +static int bch2_bucket_is_movable(struct btree_trans *trans, + struct move_bucket *b, u64 time) +{ + struct btree_iter iter; + struct bkey_s_c k; + struct bch_alloc_v4 _a; + const struct bch_alloc_v4 *a; + int ret; + + if (bch2_bucket_is_open(trans->c, + b->k.bucket.inode, + b->k.bucket.offset)) + return 0; + + k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, + b->k.bucket, BTREE_ITER_CACHED); + ret = bkey_err(k); + if (ret) + return ret; + + a = bch2_alloc_to_v4(k, &_a); + b->k.gen = a->gen; + b->sectors = a->dirty_sectors; + + ret = data_type_movable(a->data_type) && + a->fragmentation_lru && + a->fragmentation_lru <= time; + + bch2_trans_iter_exit(trans, &iter); + return ret; +} + +static void move_buckets_wait(struct moving_context *ctxt, + struct buckets_in_flight *list, + bool flush) +{ + struct move_bucket_in_flight *i; + int ret; + + while ((i = list->first)) { + if (flush) + move_ctxt_wait_event(ctxt, !atomic_read(&i->count)); + + if (atomic_read(&i->count)) + break; + + list->first = i->next; + if (!list->first) + list->last = NULL; + + list->nr--; + list->sectors -= i->bucket.sectors; + + ret = rhashtable_remove_fast(&list->table, &i->hash, + bch_move_bucket_params); + BUG_ON(ret); + kfree(i); + } + + bch2_trans_unlock_long(ctxt->trans); +} + +static bool bucket_in_flight(struct buckets_in_flight *list, + struct move_bucket_key k) +{ + return rhashtable_lookup_fast(&list->table, &k, bch_move_bucket_params); +} + +typedef DARRAY(struct move_bucket) move_buckets; + +static int bch2_copygc_get_buckets(struct moving_context *ctxt, + struct buckets_in_flight *buckets_in_flight, + move_buckets *buckets) +{ + struct btree_trans *trans = ctxt->trans; + struct bch_fs *c = trans->c; + struct btree_iter iter; + struct bkey_s_c k; + size_t nr_to_get = max_t(size_t, 16U, buckets_in_flight->nr / 4); + size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0; + int ret; + + move_buckets_wait(ctxt, buckets_in_flight, false); + + ret = bch2_btree_write_buffer_flush(trans); + if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_flush()", + __func__, bch2_err_str(ret))) + return ret; + + ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru, + lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0), + lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX), + 0, k, ({ + struct move_bucket b = { .k.bucket = u64_to_bucket(k.k->p.offset) }; + int ret2 = 0; + + saw++; + + if (!bch2_bucket_is_movable(trans, &b, lru_pos_time(k.k->p))) + not_movable++; + else if (bucket_in_flight(buckets_in_flight, b.k)) + in_flight++; + else { + ret2 = darray_push(buckets, b) ?: buckets->nr >= nr_to_get; + if (ret2 >= 0) + sectors += b.sectors; + } + ret2; + })); + + pr_debug("have: %zu (%zu) saw %zu in flight %zu not movable %zu got %zu (%zu)/%zu buckets ret %i", + buckets_in_flight->nr, buckets_in_flight->sectors, + saw, in_flight, not_movable, buckets->nr, sectors, nr_to_get, ret); + + return ret < 0 ? ret : 0; +} + +noinline +static int bch2_copygc(struct moving_context *ctxt, + struct buckets_in_flight *buckets_in_flight, + bool *did_work) +{ + struct btree_trans *trans = ctxt->trans; + struct bch_fs *c = trans->c; + struct data_update_opts data_opts = { + .btree_insert_flags = BCH_WATERMARK_copygc, + }; + move_buckets buckets = { 0 }; + struct move_bucket_in_flight *f; + struct move_bucket *i; + u64 moved = atomic64_read(&ctxt->stats->sectors_moved); + int ret = 0; + + ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight, &buckets); + if (ret) + goto err; + + darray_for_each(buckets, i) { + if (kthread_should_stop() || freezing(current)) + break; + + f = move_bucket_in_flight_add(buckets_in_flight, *i); + ret = PTR_ERR_OR_ZERO(f); + if (ret == -EEXIST) { /* rare race: copygc_get_buckets returned same bucket more than once */ + ret = 0; + continue; + } + if (ret == -ENOMEM) { /* flush IO, continue later */ + ret = 0; + break; + } + + ret = __bch2_evacuate_bucket(ctxt, f, f->bucket.k.bucket, + f->bucket.k.gen, data_opts); + if (ret) + goto err; + + *did_work = true; + } +err: + darray_exit(&buckets); + + /* no entries in LRU btree found, or got to end: */ + if (bch2_err_matches(ret, ENOENT)) + ret = 0; + + if (ret < 0 && !bch2_err_matches(ret, EROFS)) + bch_err_msg(c, ret, "from bch2_move_data()"); + + moved = atomic64_read(&ctxt->stats->sectors_moved) - moved; + trace_and_count(c, copygc, c, moved, 0, 0, 0); + return ret; +} + +/* + * Copygc runs when the amount of fragmented data is above some arbitrary + * threshold: + * + * The threshold at the limit - when the device is full - is the amount of space + * we reserved in bch2_recalc_capacity; we can't have more than that amount of + * disk space stranded due to fragmentation and store everything we have + * promised to store. + * + * But we don't want to be running copygc unnecessarily when the device still + * has plenty of free space - rather, we want copygc to smoothly run every so + * often and continually reduce the amount of fragmented space as the device + * fills up. So, we increase the threshold by half the current free space. + */ +unsigned long bch2_copygc_wait_amount(struct bch_fs *c) +{ + struct bch_dev *ca; + unsigned dev_idx; + s64 wait = S64_MAX, fragmented_allowed, fragmented; + unsigned i; + + for_each_rw_member(ca, c, dev_idx) { + struct bch_dev_usage usage = bch2_dev_usage_read(ca); + + fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) * + ca->mi.bucket_size) >> 1); + fragmented = 0; + + for (i = 0; i < BCH_DATA_NR; i++) + if (data_type_movable(i)) + fragmented += usage.d[i].fragmented; + + wait = min(wait, max(0LL, fragmented_allowed - fragmented)); + } + + return wait; +} + +void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c) +{ + prt_printf(out, "Currently waiting for: "); + prt_human_readable_u64(out, max(0LL, c->copygc_wait - + atomic64_read(&c->io_clock[WRITE].now)) << 9); + prt_newline(out); + + prt_printf(out, "Currently waiting since: "); + prt_human_readable_u64(out, max(0LL, + atomic64_read(&c->io_clock[WRITE].now) - + c->copygc_wait_at) << 9); + prt_newline(out); + + prt_printf(out, "Currently calculated wait: "); + prt_human_readable_u64(out, bch2_copygc_wait_amount(c)); + prt_newline(out); +} + +static int bch2_copygc_thread(void *arg) +{ + struct bch_fs *c = arg; + struct moving_context ctxt; + struct bch_move_stats move_stats; + struct io_clock *clock = &c->io_clock[WRITE]; + struct buckets_in_flight *buckets; + u64 last, wait; + int ret = 0; + + buckets = kzalloc(sizeof(struct buckets_in_flight), GFP_KERNEL); + if (!buckets) + return -ENOMEM; + ret = rhashtable_init(&buckets->table, &bch_move_bucket_params); + if (ret) { + kfree(buckets); + bch_err_msg(c, ret, "allocating copygc buckets in flight"); + return ret; + } + + set_freezable(); + + bch2_move_stats_init(&move_stats, "copygc"); + bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats, + writepoint_ptr(&c->copygc_write_point), + false); + + while (!ret && !kthread_should_stop()) { + bool did_work = false; + + bch2_trans_unlock_long(ctxt.trans); + cond_resched(); + + if (!c->copy_gc_enabled) { + move_buckets_wait(&ctxt, buckets, true); + kthread_wait_freezable(c->copy_gc_enabled); + } + + if (unlikely(freezing(current))) { + move_buckets_wait(&ctxt, buckets, true); + __refrigerator(false); + continue; + } + + last = atomic64_read(&clock->now); + wait = bch2_copygc_wait_amount(c); + + if (wait > clock->max_slop) { + c->copygc_wait_at = last; + c->copygc_wait = last + wait; + move_buckets_wait(&ctxt, buckets, true); + trace_and_count(c, copygc_wait, c, wait, last + wait); + bch2_kthread_io_clock_wait(clock, last + wait, + MAX_SCHEDULE_TIMEOUT); + continue; + } + + c->copygc_wait = 0; + + c->copygc_running = true; + ret = bch2_copygc(&ctxt, buckets, &did_work); + c->copygc_running = false; + + wake_up(&c->copygc_running_wq); + + if (!wait && !did_work) { + u64 min_member_capacity = bch2_min_rw_member_capacity(c); + + if (min_member_capacity == U64_MAX) + min_member_capacity = 128 * 2048; + + bch2_trans_unlock_long(ctxt.trans); + bch2_kthread_io_clock_wait(clock, last + (min_member_capacity >> 6), + MAX_SCHEDULE_TIMEOUT); + } + } + + move_buckets_wait(&ctxt, buckets, true); + + rhashtable_destroy(&buckets->table); + kfree(buckets); + bch2_moving_ctxt_exit(&ctxt); + bch2_move_stats_exit(&move_stats, c); + + return 0; +} + +void bch2_copygc_stop(struct bch_fs *c) +{ + if (c->copygc_thread) { + kthread_stop(c->copygc_thread); + put_task_struct(c->copygc_thread); + } + c->copygc_thread = NULL; +} + +int bch2_copygc_start(struct bch_fs *c) +{ + struct task_struct *t; + int ret; + + if (c->copygc_thread) + return 0; + + if (c->opts.nochanges) + return 0; + + if (bch2_fs_init_fault("copygc_start")) + return -ENOMEM; + + t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name); + ret = PTR_ERR_OR_ZERO(t); + if (ret) { + bch_err_msg(c, ret, "creating copygc thread"); + return ret; + } + + get_task_struct(t); + + c->copygc_thread = t; + wake_up_process(c->copygc_thread); + + return 0; +} + +void bch2_fs_copygc_init(struct bch_fs *c) +{ + init_waitqueue_head(&c->copygc_running_wq); + c->copygc_running = false; +} |