diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:35:05 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:31 +0000 |
commit | 85c675d0d09a45a135bddd15d7b385f8758c32fb (patch) | |
tree | 76267dbc9b9a130337be3640948fe397b04ac629 /mm/damon | |
parent | Adding upstream version 6.6.15. (diff) | |
download | linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip |
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'mm/damon')
-rw-r--r-- | mm/damon/Kconfig | 12 | ||||
-rw-r--r-- | mm/damon/core-test.h | 29 | ||||
-rw-r--r-- | mm/damon/core.c | 208 | ||||
-rw-r--r-- | mm/damon/dbgfs.c | 3 | ||||
-rw-r--r-- | mm/damon/lru_sort.c | 45 | ||||
-rw-r--r-- | mm/damon/paddr.c | 11 | ||||
-rw-r--r-- | mm/damon/reclaim.c | 20 | ||||
-rw-r--r-- | mm/damon/sysfs-common.h | 2 | ||||
-rw-r--r-- | mm/damon/sysfs-schemes.c | 170 | ||||
-rw-r--r-- | mm/damon/sysfs-test.h | 86 | ||||
-rw-r--r-- | mm/damon/sysfs.c | 36 | ||||
-rw-r--r-- | mm/damon/vaddr.c | 22 |
12 files changed, 592 insertions, 52 deletions
diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig index 436c6b4cb5..29f43fbc2e 100644 --- a/mm/damon/Kconfig +++ b/mm/damon/Kconfig @@ -59,6 +59,18 @@ config DAMON_SYSFS This builds the sysfs interface for DAMON. The user space can use the interface for arbitrary data access monitoring. +config DAMON_SYSFS_KUNIT_TEST + bool "Test for damon debugfs interface" if !KUNIT_ALL_TESTS + depends on DAMON_SYSFS && KUNIT=y + default KUNIT_ALL_TESTS + help + This builds the DAMON sysfs interface Kunit test suite. + + For more information on KUnit and unit tests in general, please refer + to the KUnit documentation. + + If unsure, say N. + config DAMON_DBGFS bool "DAMON debugfs interface (DEPRECATED!)" depends on DAMON_VADDR && DAMON_PADDR && DEBUG_FS diff --git a/mm/damon/core-test.h b/mm/damon/core-test.h index 6cc8b24558..649adf91eb 100644 --- a/mm/damon/core-test.h +++ b/mm/damon/core-test.h @@ -30,7 +30,7 @@ static void damon_test_regions(struct kunit *test) damon_add_region(r, t); KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t)); - damon_del_region(r, t); + damon_destroy_region(r, t); KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); damon_free_target(t); @@ -94,6 +94,7 @@ static void damon_test_aggregate(struct kunit *test) for (ir = 0; ir < 3; ir++) { r = damon_new_region(saddr[it][ir], eaddr[it][ir]); r->nr_accesses = accesses[it][ir]; + r->nr_accesses_bp = accesses[it][ir] * 10000; damon_add_region(r, t); } it++; @@ -147,9 +148,11 @@ static void damon_test_merge_two(struct kunit *test) t = damon_new_target(); r = damon_new_region(0, 100); r->nr_accesses = 10; + r->nr_accesses_bp = 100000; damon_add_region(r, t); r2 = damon_new_region(100, 300); r2->nr_accesses = 20; + r2->nr_accesses_bp = 200000; damon_add_region(r2, t); damon_merge_two_regions(t, r, r2); @@ -196,6 +199,7 @@ static void damon_test_merge_regions_of(struct kunit *test) for (i = 0; i < ARRAY_SIZE(sa); i++) { r = damon_new_region(sa[i], ea[i]); r->nr_accesses = nrs[i]; + r->nr_accesses_bp = nrs[i] * 10000; damon_add_region(r, t); } @@ -265,6 +269,8 @@ static void damon_test_ops_registration(struct kunit *test) /* Check double-registration failure again */ KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); + + damon_destroy_ctx(c); } static void damon_test_set_regions(struct kunit *test) @@ -297,6 +303,7 @@ static void damon_test_update_monitoring_result(struct kunit *test) struct damon_region *r = damon_new_region(3, 7); r->nr_accesses = 15; + r->nr_accesses_bp = 150000; r->age = 20; new_attrs = (struct damon_attrs){ @@ -316,6 +323,8 @@ static void damon_test_update_monitoring_result(struct kunit *test) damon_update_monitoring_result(r, &old_attrs, &new_attrs); KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); KUNIT_EXPECT_EQ(test, r->age, 20); + + damon_free_region(r); } static void damon_test_set_attrs(struct kunit *test) @@ -339,6 +348,23 @@ static void damon_test_set_attrs(struct kunit *test) invalid_attrs = valid_attrs; invalid_attrs.aggr_interval = 4999; KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); + + damon_destroy_ctx(c); +} + +static void damon_test_moving_sum(struct kunit *test) +{ + unsigned int mvsum = 50000, nomvsum = 50000, len_window = 10; + unsigned int new_values[] = {10000, 0, 10000, 0, 0, 0, 10000, 0, 0, 0}; + unsigned int expects[] = {55000, 50000, 55000, 50000, 45000, 40000, + 45000, 40000, 35000, 30000}; + int i; + + for (i = 0; i < ARRAY_SIZE(new_values); i++) { + mvsum = damon_moving_sum(mvsum, nomvsum, len_window, + new_values[i]); + KUNIT_EXPECT_EQ(test, mvsum, expects[i]); + } } static void damos_test_new_filter(struct kunit *test) @@ -425,6 +451,7 @@ static struct kunit_case damon_test_cases[] = { KUNIT_CASE(damon_test_set_regions), KUNIT_CASE(damon_test_update_monitoring_result), KUNIT_CASE(damon_test_set_attrs), + KUNIT_CASE(damon_test_moving_sum), KUNIT_CASE(damos_test_new_filter), KUNIT_CASE(damos_test_filter_out), {}, diff --git a/mm/damon/core.c b/mm/damon/core.c index aff611b6ea..fb38183576 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -128,6 +128,7 @@ struct damon_region *damon_new_region(unsigned long start, unsigned long end) region->ar.start = start; region->ar.end = end; region->nr_accesses = 0; + region->nr_accesses_bp = 0; INIT_LIST_HEAD(®ion->list); region->age = 0; @@ -312,7 +313,9 @@ static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota) } struct damos *damon_new_scheme(struct damos_access_pattern *pattern, - enum damos_action action, struct damos_quota *quota, + enum damos_action action, + unsigned long apply_interval_us, + struct damos_quota *quota, struct damos_watermarks *wmarks) { struct damos *scheme; @@ -322,6 +325,13 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern, return NULL; scheme->pattern = *pattern; scheme->action = action; + scheme->apply_interval_us = apply_interval_us; + /* + * next_apply_sis will be set when kdamond starts. While kdamond is + * running, it will also updated when it is added to the DAMON context, + * or damon_attrs are updated. + */ + scheme->next_apply_sis = 0; INIT_LIST_HEAD(&scheme->filters); scheme->stat = (struct damos_stat){}; INIT_LIST_HEAD(&scheme->list); @@ -334,9 +344,21 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern, return scheme; } +static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx) +{ + unsigned long sample_interval = ctx->attrs.sample_interval ? + ctx->attrs.sample_interval : 1; + unsigned long apply_interval = s->apply_interval_us ? + s->apply_interval_us : ctx->attrs.aggr_interval; + + s->next_apply_sis = ctx->passed_sample_intervals + + apply_interval / sample_interval; +} + void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) { list_add_tail(&s->list, &ctx->schemes); + damos_set_next_apply_sis(s, ctx); } static void damon_del_scheme(struct damos *s) @@ -504,6 +526,7 @@ static void damon_update_monitoring_result(struct damon_region *r, { r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses, old_attrs, new_attrs); + r->nr_accesses_bp = r->nr_accesses * 10000; r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); } @@ -539,7 +562,11 @@ static void damon_update_monitoring_results(struct damon_ctx *ctx, * @ctx: monitoring context * @attrs: monitoring attributes * - * This function should not be called while the kdamond is running. + * This function should be called while the kdamond is not running, or an + * access check results aggregation is not ongoing (e.g., from + * &struct damon_callback->after_aggregation or + * &struct damon_callback->after_wmarks_check callbacks). + * * Every time interval is in micro-seconds. * * Return: 0 on success, negative error code otherwise. @@ -548,6 +575,7 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) { unsigned long sample_interval = attrs->sample_interval ? attrs->sample_interval : 1; + struct damos *s; if (attrs->min_nr_regions < 3) return -EINVAL; @@ -563,6 +591,10 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) damon_update_monitoring_results(ctx, attrs); ctx->attrs = *attrs; + + damon_for_each_scheme(s, ctx) + damos_set_next_apply_sis(s, ctx); + return 0; } @@ -708,8 +740,7 @@ static int __damon_stop(struct damon_ctx *ctx) if (tsk) { get_task_struct(tsk); mutex_unlock(&ctx->kdamond_lock); - kthread_stop(tsk); - put_task_struct(tsk); + kthread_stop_put(tsk); return 0; } mutex_unlock(&ctx->kdamond_lock); @@ -749,7 +780,7 @@ static void kdamond_reset_aggregated(struct damon_ctx *c) struct damon_region *r; damon_for_each_region(r, t) { - trace_damon_aggregated(t, ti, r, damon_nr_regions(t)); + trace_damon_aggregated(ti, r, damon_nr_regions(t)); r->last_nr_accesses = r->nr_accesses; r->nr_accesses = 0; } @@ -763,12 +794,13 @@ static void damon_split_region_at(struct damon_target *t, static bool __damos_valid_target(struct damon_region *r, struct damos *s) { unsigned long sz; + unsigned int nr_accesses = r->nr_accesses_bp / 10000; sz = damon_sz_region(r); return s->pattern.min_sz_region <= sz && sz <= s->pattern.max_sz_region && - s->pattern.min_nr_accesses <= r->nr_accesses && - r->nr_accesses <= s->pattern.max_nr_accesses && + s->pattern.min_nr_accesses <= nr_accesses && + nr_accesses <= s->pattern.max_nr_accesses && s->pattern.min_age_region <= r->age && r->age <= s->pattern.max_age_region; } @@ -923,6 +955,33 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, struct timespec64 begin, end; unsigned long sz_applied = 0; int err = 0; + /* + * We plan to support multiple context per kdamond, as DAMON sysfs + * implies with 'nr_contexts' file. Nevertheless, only single context + * per kdamond is supported for now. So, we can simply use '0' context + * index here. + */ + unsigned int cidx = 0; + struct damos *siter; /* schemes iterator */ + unsigned int sidx = 0; + struct damon_target *titer; /* targets iterator */ + unsigned int tidx = 0; + bool do_trace = false; + + /* get indices for trace_damos_before_apply() */ + if (trace_damos_before_apply_enabled()) { + damon_for_each_scheme(siter, c) { + if (siter == s) + break; + sidx++; + } + damon_for_each_target(titer, c) { + if (titer == t) + break; + tidx++; + } + do_trace = true; + } if (c->ops.apply_scheme) { if (quota->esz && quota->charged_sz + sz > quota->esz) { @@ -937,8 +996,11 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, ktime_get_coarse_ts64(&begin); if (c->callback.before_damos_apply) err = c->callback.before_damos_apply(c, t, r, s); - if (!err) + if (!err) { + trace_damos_before_apply(cidx, sidx, tidx, r, + damon_nr_regions(t), do_trace); sz_applied = c->ops.apply_scheme(c, t, r, s); + } ktime_get_coarse_ts64(&end); quota->total_charged_ns += timespec64_to_ns(&end) - timespec64_to_ns(&begin); @@ -964,6 +1026,9 @@ static void damon_do_apply_schemes(struct damon_ctx *c, damon_for_each_scheme(s, c) { struct damos_quota *quota = &s->quota; + if (c->passed_sample_intervals != s->next_apply_sis) + continue; + if (!s->wmarks.activated) continue; @@ -1056,18 +1121,37 @@ static void kdamond_apply_schemes(struct damon_ctx *c) struct damon_target *t; struct damon_region *r, *next_r; struct damos *s; + unsigned long sample_interval = c->attrs.sample_interval ? + c->attrs.sample_interval : 1; + bool has_schemes_to_apply = false; damon_for_each_scheme(s, c) { + if (c->passed_sample_intervals != s->next_apply_sis) + continue; + if (!s->wmarks.activated) continue; + has_schemes_to_apply = true; + damos_adjust_quota(c, s); } + if (!has_schemes_to_apply) + return; + damon_for_each_target(t, c) { damon_for_each_region_safe(r, next_r, t) damon_do_apply_schemes(c, t, r); } + + damon_for_each_scheme(s, c) { + if (c->passed_sample_intervals != s->next_apply_sis) + continue; + s->next_apply_sis += + (s->apply_interval_us ? s->apply_interval_us : + c->attrs.aggr_interval) / sample_interval; + } } /* @@ -1080,6 +1164,7 @@ static void damon_merge_two_regions(struct damon_target *t, l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / (sz_l + sz_r); + l->nr_accesses_bp = l->nr_accesses * 10000; l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); l->ar.end = r->ar.end; damon_destroy_region(r, t); @@ -1151,6 +1236,8 @@ static void damon_split_region_at(struct damon_target *t, new->age = r->age; new->last_nr_accesses = r->last_nr_accesses; + new->nr_accesses_bp = r->nr_accesses_bp; + new->nr_accesses = r->nr_accesses; damon_insert_region(new, r, damon_next_region(r), t); } @@ -1245,12 +1332,10 @@ static bool kdamond_need_stop(struct damon_ctx *ctx) static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric) { - struct sysinfo i; - switch (metric) { case DAMOS_WMARK_FREE_MEM_RATE: - si_meminfo(&i); - return i.freeram * 1000 / i.totalram; + return global_zone_page_state(NR_FREE_PAGES) * 1000 / + totalram_pages(); default: break; } @@ -1332,11 +1417,19 @@ static void kdamond_init_intervals_sis(struct damon_ctx *ctx) { unsigned long sample_interval = ctx->attrs.sample_interval ? ctx->attrs.sample_interval : 1; + unsigned long apply_interval; + struct damos *scheme; ctx->passed_sample_intervals = 0; ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / sample_interval; + + damon_for_each_scheme(scheme, ctx) { + apply_interval = scheme->apply_interval_us ? + scheme->apply_interval_us : ctx->attrs.aggr_interval; + scheme->next_apply_sis = apply_interval / sample_interval; + } } /* @@ -1389,19 +1482,28 @@ static int kdamond_fn(void *data) if (ctx->ops.check_accesses) max_nr_accesses = ctx->ops.check_accesses(ctx); - sample_interval = ctx->attrs.sample_interval ? - ctx->attrs.sample_interval : 1; if (ctx->passed_sample_intervals == next_aggregation_sis) { - ctx->next_aggregation_sis = next_aggregation_sis + - ctx->attrs.aggr_interval / sample_interval; kdamond_merge_regions(ctx, max_nr_accesses / 10, sz_limit); if (ctx->callback.after_aggregation && ctx->callback.after_aggregation(ctx)) break; - if (!list_empty(&ctx->schemes)) - kdamond_apply_schemes(ctx); + } + + /* + * do kdamond_apply_schemes() after kdamond_merge_regions() if + * possible, to reduce overhead + */ + if (!list_empty(&ctx->schemes)) + kdamond_apply_schemes(ctx); + + sample_interval = ctx->attrs.sample_interval ? + ctx->attrs.sample_interval : 1; + if (ctx->passed_sample_intervals == next_aggregation_sis) { + ctx->next_aggregation_sis = next_aggregation_sis + + ctx->attrs.aggr_interval / sample_interval; + kdamond_reset_aggregated(ctx); kdamond_split_regions(ctx); if (ctx->ops.reset_aggregated) @@ -1515,6 +1617,76 @@ int damon_set_region_biggest_system_ram_default(struct damon_target *t, return damon_set_regions(t, &addr_range, 1); } +/* + * damon_moving_sum() - Calculate an inferred moving sum value. + * @mvsum: Inferred sum of the last @len_window values. + * @nomvsum: Non-moving sum of the last discrete @len_window window values. + * @len_window: The number of last values to take care of. + * @new_value: New value that will be added to the pseudo moving sum. + * + * Moving sum (moving average * window size) is good for handling noise, but + * the cost of keeping past values can be high for arbitrary window size. This + * function implements a lightweight pseudo moving sum function that doesn't + * keep the past window values. + * + * It simply assumes there was no noise in the past, and get the no-noise + * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a + * non-moving sum of the last window. For example, if @len_window is 10 and we + * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25 + * values. Hence, this function simply drops @nomvsum / @len_window from + * given @mvsum and add @new_value. + * + * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for + * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For + * calculating next moving sum with a new value, we should drop 0 from 50 and + * add the new value. However, this function assumes it got value 5 for each + * of the last ten times. Based on the assumption, when the next value is + * measured, it drops the assumed past value, 5 from the current sum, and add + * the new value to get the updated pseduo-moving average. + * + * This means the value could have errors, but the errors will be disappeared + * for every @len_window aligned calls. For example, if @len_window is 10, the + * pseudo moving sum with 11th value to 19th value would have an error. But + * the sum with 20th value will not have the error. + * + * Return: Pseudo-moving average after getting the @new_value. + */ +static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum, + unsigned int len_window, unsigned int new_value) +{ + return mvsum - nomvsum / len_window + new_value; +} + +/** + * damon_update_region_access_rate() - Update the access rate of a region. + * @r: The DAMON region to update for its access check result. + * @accessed: Whether the region has accessed during last sampling interval. + * @attrs: The damon_attrs of the DAMON context. + * + * Update the access rate of a region with the region's last sampling interval + * access check result. + * + * Usually this will be called by &damon_operations->check_accesses callback. + */ +void damon_update_region_access_rate(struct damon_region *r, bool accessed, + struct damon_attrs *attrs) +{ + unsigned int len_window = 1; + + /* + * sample_interval can be zero, but cannot be larger than + * aggr_interval, owing to validation of damon_set_attrs(). + */ + if (attrs->sample_interval) + len_window = damon_max_nr_accesses(attrs); + r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp, + r->last_nr_accesses * 10000, len_window, + accessed ? 10000 : 0); + + if (accessed) + r->nr_accesses++; +} + static int __init damon_init(void) { damon_region_cache = KMEM_CACHE(damon_region, 0); diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c index 124f0f8c97..dc0ea1fc30 100644 --- a/mm/damon/dbgfs.c +++ b/mm/damon/dbgfs.c @@ -278,7 +278,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len, goto fail; pos += parsed; - scheme = damon_new_scheme(&pattern, action, "a, &wmarks); + scheme = damon_new_scheme(&pattern, action, 0, "a, + &wmarks); if (!scheme) goto fail; diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c index 3071e08e8b..3de2916a65 100644 --- a/mm/damon/lru_sort.c +++ b/mm/damon/lru_sort.c @@ -158,6 +158,8 @@ static struct damos *damon_lru_sort_new_scheme( pattern, /* (de)prioritize on LRU-lists */ action, + /* for each aggregation interval */ + 0, /* under the quota. */ "a, /* (De)activate this according to the watermarks. */ @@ -183,9 +185,21 @@ static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres) return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_DEPRIO); } +static void damon_lru_sort_copy_quota_status(struct damos_quota *dst, + struct damos_quota *src) +{ + dst->total_charged_sz = src->total_charged_sz; + dst->total_charged_ns = src->total_charged_ns; + dst->charged_sz = src->charged_sz; + dst->charged_from = src->charged_from; + dst->charge_target_from = src->charge_target_from; + dst->charge_addr_from = src->charge_addr_from; +} + static int damon_lru_sort_apply_parameters(void) { - struct damos *scheme; + struct damos *scheme, *hot_scheme, *cold_scheme; + struct damos *old_hot_scheme = NULL, *old_cold_scheme = NULL; unsigned int hot_thres, cold_thres; int err = 0; @@ -193,18 +207,35 @@ static int damon_lru_sort_apply_parameters(void) if (err) return err; + damon_for_each_scheme(scheme, ctx) { + if (!old_hot_scheme) { + old_hot_scheme = scheme; + continue; + } + old_cold_scheme = scheme; + } + hot_thres = damon_max_nr_accesses(&damon_lru_sort_mon_attrs) * hot_thres_access_freq / 1000; - scheme = damon_lru_sort_new_hot_scheme(hot_thres); - if (!scheme) + hot_scheme = damon_lru_sort_new_hot_scheme(hot_thres); + if (!hot_scheme) return -ENOMEM; - damon_set_schemes(ctx, &scheme, 1); + if (old_hot_scheme) + damon_lru_sort_copy_quota_status(&hot_scheme->quota, + &old_hot_scheme->quota); cold_thres = cold_min_age / damon_lru_sort_mon_attrs.aggr_interval; - scheme = damon_lru_sort_new_cold_scheme(cold_thres); - if (!scheme) + cold_scheme = damon_lru_sort_new_cold_scheme(cold_thres); + if (!cold_scheme) { + damon_destroy_scheme(hot_scheme); return -ENOMEM; - damon_add_scheme(ctx, scheme); + } + if (old_cold_scheme) + damon_lru_sort_copy_quota_status(&cold_scheme->quota, + &old_cold_scheme->quota); + + damon_set_schemes(ctx, &hot_scheme, 1); + damon_add_scheme(ctx, cold_scheme); return damon_set_region_biggest_system_ram_default(target, &monitor_region_start, diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 909db25efb..081e2a3257 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -148,7 +148,8 @@ out: return accessed; } -static void __damon_pa_check_access(struct damon_region *r) +static void __damon_pa_check_access(struct damon_region *r, + struct damon_attrs *attrs) { static unsigned long last_addr; static unsigned long last_folio_sz = PAGE_SIZE; @@ -157,14 +158,12 @@ static void __damon_pa_check_access(struct damon_region *r) /* If the region is in the last checked page, reuse the result */ if (ALIGN_DOWN(last_addr, last_folio_sz) == ALIGN_DOWN(r->sampling_addr, last_folio_sz)) { - if (last_accessed) - r->nr_accesses++; + damon_update_region_access_rate(r, last_accessed, attrs); return; } last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz); - if (last_accessed) - r->nr_accesses++; + damon_update_region_access_rate(r, last_accessed, attrs); last_addr = r->sampling_addr; } @@ -177,7 +176,7 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx) damon_for_each_target(t, ctx) { damon_for_each_region(r, t) { - __damon_pa_check_access(r); + __damon_pa_check_access(r, &ctx->attrs); max_nr_accesses = max(r->nr_accesses, max_nr_accesses); } } diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c index 648d2a8552..66e190f037 100644 --- a/mm/damon/reclaim.c +++ b/mm/damon/reclaim.c @@ -142,15 +142,28 @@ static struct damos *damon_reclaim_new_scheme(void) &pattern, /* page out those, as soon as found */ DAMOS_PAGEOUT, + /* for each aggregation interval */ + 0, /* under the quota. */ &damon_reclaim_quota, /* (De)activate this according to the watermarks. */ &damon_reclaim_wmarks); } +static void damon_reclaim_copy_quota_status(struct damos_quota *dst, + struct damos_quota *src) +{ + dst->total_charged_sz = src->total_charged_sz; + dst->total_charged_ns = src->total_charged_ns; + dst->charged_sz = src->charged_sz; + dst->charged_from = src->charged_from; + dst->charge_target_from = src->charge_target_from; + dst->charge_addr_from = src->charge_addr_from; +} + static int damon_reclaim_apply_parameters(void) { - struct damos *scheme; + struct damos *scheme, *old_scheme; struct damos_filter *filter; int err = 0; @@ -162,6 +175,11 @@ static int damon_reclaim_apply_parameters(void) scheme = damon_reclaim_new_scheme(); if (!scheme) return -ENOMEM; + if (!list_empty(&ctx->schemes)) { + damon_for_each_scheme(old_scheme, ctx) + damon_reclaim_copy_quota_status(&scheme->quota, + &old_scheme->quota); + } if (skip_anon) { filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true); if (!filter) { diff --git a/mm/damon/sysfs-common.h b/mm/damon/sysfs-common.h index fd482a0639..5ff081226e 100644 --- a/mm/damon/sysfs-common.h +++ b/mm/damon/sysfs-common.h @@ -49,6 +49,8 @@ int damon_sysfs_schemes_update_regions_start( struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx, bool total_bytes_only); +bool damos_sysfs_regions_upd_done(void); + int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx); int damon_sysfs_schemes_clear_regions( diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 36dcd881a1..786b06239c 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -31,7 +31,7 @@ static struct damon_sysfs_scheme_region *damon_sysfs_scheme_region_alloc( return NULL; sysfs_region->kobj = (struct kobject){}; sysfs_region->ar = region->ar; - sysfs_region->nr_accesses = region->nr_accesses; + sysfs_region->nr_accesses = region->nr_accesses_bp / 10000; sysfs_region->age = region->age; INIT_LIST_HEAD(&sysfs_region->list); return sysfs_region; @@ -113,11 +113,55 @@ static const struct kobj_type damon_sysfs_scheme_region_ktype = { * scheme regions directory */ +/* + * enum damos_sysfs_regions_upd_status - Represent DAMOS tried regions update + * status + * @DAMOS_TRIED_REGIONS_UPD_IDLE: Waiting for next request. + * @DAMOS_TRIED_REGIONS_UPD_STARTED: Update started. + * @DAMOS_TRIED_REGIONS_UPD_FINISHED: Update finished. + * + * Each DAMON-based operation scheme (&struct damos) has its own apply + * interval, and we need to expose the scheme tried regions based on only + * single snapshot. For this, we keep the tried regions update status for each + * scheme. The status becomes 'idle' at the beginning. + * + * Once the tried regions update request is received, the request handling + * start function (damon_sysfs_scheme_update_regions_start()) sets the status + * of all schemes as 'idle' again, and register ->before_damos_apply() and + * ->after_sampling() callbacks. + * + * Then, the first followup ->before_damos_apply() callback + * (damon_sysfs_before_damos_apply()) sets the status 'started'. The first + * ->after_sampling() callback (damon_sysfs_after_sampling()) after the call + * is called only after the scheme is completely applied + * to the given snapshot. Hence the callback knows the situation by showing + * 'started' status, and sets the status as 'finished'. Then, + * damon_sysfs_before_damos_apply() understands the situation by showing the + * 'finished' status and do nothing. + * + * If DAMOS is not applied to any region due to any reasons including the + * access pattern, the watermarks, the quotas, and the filters, + * ->before_damos_apply() will not be called back. Until the situation is + * changed, the update will not be finished. To avoid this, + * damon_sysfs_after_sampling() set the status as 'finished' if more than two + * apply intervals of the scheme is passed while the state is 'idle'. + * + * Finally, the tried regions request handling finisher function + * (damon_sysfs_schemes_update_regions_stop()) unregisters the callbacks. + */ +enum damos_sysfs_regions_upd_status { + DAMOS_TRIED_REGIONS_UPD_IDLE, + DAMOS_TRIED_REGIONS_UPD_STARTED, + DAMOS_TRIED_REGIONS_UPD_FINISHED, +}; + struct damon_sysfs_scheme_regions { struct kobject kobj; struct list_head regions_list; int nr_regions; unsigned long total_bytes; + enum damos_sysfs_regions_upd_status upd_status; + unsigned long upd_timeout_jiffies; }; static struct damon_sysfs_scheme_regions * @@ -133,6 +177,7 @@ damon_sysfs_scheme_regions_alloc(void) INIT_LIST_HEAD(®ions->regions_list); regions->nr_regions = 0; regions->total_bytes = 0; + regions->upd_status = DAMOS_TRIED_REGIONS_UPD_IDLE; return regions; } @@ -1124,6 +1169,7 @@ struct damon_sysfs_scheme { struct kobject kobj; enum damos_action action; struct damon_sysfs_access_pattern *access_pattern; + unsigned long apply_interval_us; struct damon_sysfs_quotas *quotas; struct damon_sysfs_watermarks *watermarks; struct damon_sysfs_scheme_filters *filters; @@ -1144,7 +1190,7 @@ static const char * const damon_sysfs_damos_action_strs[] = { }; static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc( - enum damos_action action) + enum damos_action action, unsigned long apply_interval_us) { struct damon_sysfs_scheme *scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); @@ -1153,6 +1199,7 @@ static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc( return NULL; scheme->kobj = (struct kobject){}; scheme->action = action; + scheme->apply_interval_us = apply_interval_us; return scheme; } @@ -1356,6 +1403,25 @@ static ssize_t action_store(struct kobject *kobj, struct kobj_attribute *attr, return -EINVAL; } +static ssize_t apply_interval_us_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_scheme *scheme = container_of(kobj, + struct damon_sysfs_scheme, kobj); + + return sysfs_emit(buf, "%lu\n", scheme->apply_interval_us); +} + +static ssize_t apply_interval_us_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_scheme *scheme = container_of(kobj, + struct damon_sysfs_scheme, kobj); + int err = kstrtoul(buf, 0, &scheme->apply_interval_us); + + return err ? err : count; +} + static void damon_sysfs_scheme_release(struct kobject *kobj) { kfree(container_of(kobj, struct damon_sysfs_scheme, kobj)); @@ -1364,8 +1430,12 @@ static void damon_sysfs_scheme_release(struct kobject *kobj) static struct kobj_attribute damon_sysfs_scheme_action_attr = __ATTR_RW_MODE(action, 0600); +static struct kobj_attribute damon_sysfs_scheme_apply_interval_us_attr = + __ATTR_RW_MODE(apply_interval_us, 0600); + static struct attribute *damon_sysfs_scheme_attrs[] = { &damon_sysfs_scheme_action_attr.attr, + &damon_sysfs_scheme_apply_interval_us_attr.attr, NULL, }; ATTRIBUTE_GROUPS(damon_sysfs_scheme); @@ -1416,7 +1486,11 @@ static int damon_sysfs_schemes_add_dirs(struct damon_sysfs_schemes *schemes, schemes->schemes_arr = schemes_arr; for (i = 0; i < nr_schemes; i++) { - scheme = damon_sysfs_scheme_alloc(DAMOS_STAT); + /* + * apply_interval_us as 0 means same to aggregation interval + * (same to before-apply_interval behavior) + */ + scheme = damon_sysfs_scheme_alloc(DAMOS_STAT, 0); if (!scheme) { damon_sysfs_schemes_rm_dirs(schemes); return -ENOMEM; @@ -1613,8 +1687,8 @@ static struct damos *damon_sysfs_mk_scheme( .low = sysfs_wmarks->low, }; - scheme = damon_new_scheme(&pattern, sysfs_scheme->action, "a, - &wmarks); + scheme = damon_new_scheme(&pattern, sysfs_scheme->action, + sysfs_scheme->apply_interval_us, "a, &wmarks); if (!scheme) return NULL; @@ -1644,6 +1718,7 @@ static void damon_sysfs_update_scheme(struct damos *scheme, scheme->pattern.max_age_region = access_pattern->age->max; scheme->action = sysfs_scheme->action; + scheme->apply_interval_us = sysfs_scheme->apply_interval_us; scheme->quota.ms = sysfs_quotas->ms; scheme->quota.sz = sysfs_quotas->sz; @@ -1750,6 +1825,10 @@ static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx, return 0; sysfs_regions = sysfs_schemes->schemes_arr[schemes_idx]->tried_regions; + if (sysfs_regions->upd_status == DAMOS_TRIED_REGIONS_UPD_FINISHED) + return 0; + if (sysfs_regions->upd_status == DAMOS_TRIED_REGIONS_UPD_IDLE) + sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_STARTED; sysfs_regions->total_bytes += r->ar.end - r->ar.start; if (damos_regions_upd_total_bytes_only) return 0; @@ -1768,6 +1847,31 @@ static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx, return 0; } +/* + * DAMON callback that called after each accesses sampling. While this + * callback is registered, damon_sysfs_lock should be held to ensure the + * regions directories exist. + */ +static int damon_sysfs_after_sampling(struct damon_ctx *ctx) +{ + struct damon_sysfs_schemes *sysfs_schemes = + damon_sysfs_schemes_for_damos_callback; + struct damon_sysfs_scheme_regions *sysfs_regions; + int i; + + for (i = 0; i < sysfs_schemes->nr; i++) { + sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions; + if (sysfs_regions->upd_status == + DAMOS_TRIED_REGIONS_UPD_STARTED || + time_after(jiffies, + sysfs_regions->upd_timeout_jiffies)) + sysfs_regions->upd_status = + DAMOS_TRIED_REGIONS_UPD_FINISHED; + } + + return 0; +} + /* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */ int damon_sysfs_schemes_clear_regions( struct damon_sysfs_schemes *sysfs_schemes, @@ -1791,6 +1895,43 @@ int damon_sysfs_schemes_clear_regions( return 0; } +static struct damos *damos_sysfs_nth_scheme(int n, struct damon_ctx *ctx) +{ + struct damos *scheme; + int i = 0; + + damon_for_each_scheme(scheme, ctx) { + if (i == n) + return scheme; + i++; + } + return NULL; +} + +static void damos_tried_regions_init_upd_status( + struct damon_sysfs_schemes *sysfs_schemes, + struct damon_ctx *ctx) +{ + int i; + struct damos *scheme; + struct damon_sysfs_scheme_regions *sysfs_regions; + + for (i = 0; i < sysfs_schemes->nr; i++) { + sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions; + scheme = damos_sysfs_nth_scheme(i, ctx); + if (!scheme) { + sysfs_regions->upd_status = + DAMOS_TRIED_REGIONS_UPD_FINISHED; + continue; + } + sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_IDLE; + sysfs_regions->upd_timeout_jiffies = jiffies + + 2 * usecs_to_jiffies(scheme->apply_interval_us ? + scheme->apply_interval_us : + ctx->attrs.aggr_interval); + } +} + /* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */ int damon_sysfs_schemes_update_regions_start( struct damon_sysfs_schemes *sysfs_schemes, @@ -1798,11 +1939,29 @@ int damon_sysfs_schemes_update_regions_start( { damon_sysfs_schemes_clear_regions(sysfs_schemes, ctx); damon_sysfs_schemes_for_damos_callback = sysfs_schemes; + damos_tried_regions_init_upd_status(sysfs_schemes, ctx); damos_regions_upd_total_bytes_only = total_bytes_only; ctx->callback.before_damos_apply = damon_sysfs_before_damos_apply; + ctx->callback.after_sampling = damon_sysfs_after_sampling; return 0; } +bool damos_sysfs_regions_upd_done(void) +{ + struct damon_sysfs_schemes *sysfs_schemes = + damon_sysfs_schemes_for_damos_callback; + struct damon_sysfs_scheme_regions *sysfs_regions; + int i; + + for (i = 0; i < sysfs_schemes->nr; i++) { + sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions; + if (sysfs_regions->upd_status != + DAMOS_TRIED_REGIONS_UPD_FINISHED) + return false; + } + return true; +} + /* * Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock. Caller * should unlock damon_sysfs_lock which held before @@ -1812,6 +1971,7 @@ int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx) { damon_sysfs_schemes_for_damos_callback = NULL; ctx->callback.before_damos_apply = NULL; + ctx->callback.after_sampling = NULL; damon_sysfs_schemes_region_idx = 0; return 0; } diff --git a/mm/damon/sysfs-test.h b/mm/damon/sysfs-test.h new file mode 100644 index 0000000000..73bdce2452 --- /dev/null +++ b/mm/damon/sysfs-test.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Data Access Monitor Unit Tests + * + * Author: SeongJae Park <sj@kernel.org> + */ + +#ifdef CONFIG_DAMON_SYSFS_KUNIT_TEST + +#ifndef _DAMON_SYSFS_TEST_H +#define _DAMON_SYSFS_TEST_H + +#include <kunit/test.h> + +static unsigned int nr_damon_targets(struct damon_ctx *ctx) +{ + struct damon_target *t; + unsigned int nr_targets = 0; + + damon_for_each_target(t, ctx) + nr_targets++; + + return nr_targets; +} + +static int __damon_sysfs_test_get_any_pid(int min, int max) +{ + struct pid *pid; + int i; + + for (i = min; i <= max; i++) { + pid = find_get_pid(i); + if (pid) { + put_pid(pid); + return i; + } + } + return -1; +} + +static void damon_sysfs_test_set_targets(struct kunit *test) +{ + struct damon_sysfs_targets *sysfs_targets; + struct damon_sysfs_target *sysfs_target; + struct damon_ctx *ctx; + + sysfs_targets = damon_sysfs_targets_alloc(); + sysfs_targets->nr = 1; + sysfs_targets->targets_arr = kmalloc_array(1, + sizeof(*sysfs_targets->targets_arr), GFP_KERNEL); + + sysfs_target = damon_sysfs_target_alloc(); + sysfs_target->pid = __damon_sysfs_test_get_any_pid(12, 100); + sysfs_target->regions = damon_sysfs_regions_alloc(); + sysfs_targets->targets_arr[0] = sysfs_target; + + ctx = damon_new_ctx(); + + damon_sysfs_set_targets(ctx, sysfs_targets); + KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(ctx)); + + sysfs_target->pid = __damon_sysfs_test_get_any_pid( + sysfs_target->pid + 1, 200); + damon_sysfs_set_targets(ctx, sysfs_targets); + KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(ctx)); + + damon_destroy_ctx(ctx); + kfree(sysfs_targets->targets_arr); + kfree(sysfs_targets); + kfree(sysfs_target); +} + +static struct kunit_case damon_sysfs_test_cases[] = { + KUNIT_CASE(damon_sysfs_test_set_targets), + {}, +}; + +static struct kunit_suite damon_sysfs_test_suite = { + .name = "damon-sysfs", + .test_cases = damon_sysfs_test_cases, +}; +kunit_test_suite(damon_sysfs_test_suite); + +#endif /* _DAMON_SYSFS_TEST_H */ + +#endif /* CONFIG_DAMON_SYSFS_KUNIT_TEST */ diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index b317f51dcc..7472404456 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1353,12 +1353,13 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond) /* * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests. - * @c: The DAMON context of the callback. + * @c: The DAMON context of the callback. + * @active: Whether @c is not deactivated due to watermarks. * * This function is periodically called back from the kdamond thread for @c. * Then, it checks if there is a waiting DAMON sysfs request and handles it. */ -static int damon_sysfs_cmd_request_callback(struct damon_ctx *c) +static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active) { struct damon_sysfs_kdamond *kdamond; bool total_bytes_only = false; @@ -1390,6 +1391,13 @@ static int damon_sysfs_cmd_request_callback(struct damon_ctx *c) goto keep_lock_out; } } else { + /* + * Continue regions updating if DAMON is till + * active and the update for all schemes is not + * finished. + */ + if (active && !damos_sysfs_regions_upd_done()) + goto keep_lock_out; err = damon_sysfs_upd_schemes_regions_stop(kdamond); damon_sysfs_schemes_regions_updating = false; } @@ -1409,6 +1417,24 @@ keep_lock_out: return err; } +static int damon_sysfs_after_wmarks_check(struct damon_ctx *c) +{ + /* + * after_wmarks_check() is called back while the context is deactivated + * by watermarks. + */ + return damon_sysfs_cmd_request_callback(c, false); +} + +static int damon_sysfs_after_aggregation(struct damon_ctx *c) +{ + /* + * after_aggregation() is called back only while the context is not + * deactivated by watermarks. + */ + return damon_sysfs_cmd_request_callback(c, true); +} + static struct damon_ctx *damon_sysfs_build_ctx( struct damon_sysfs_context *sys_ctx) { @@ -1424,8 +1450,8 @@ static struct damon_ctx *damon_sysfs_build_ctx( return ERR_PTR(err); } - ctx->callback.after_wmarks_check = damon_sysfs_cmd_request_callback; - ctx->callback.after_aggregation = damon_sysfs_cmd_request_callback; + ctx->callback.after_wmarks_check = damon_sysfs_after_wmarks_check; + ctx->callback.after_aggregation = damon_sysfs_after_aggregation; ctx->callback.before_terminate = damon_sysfs_before_terminate; return ctx; } @@ -1827,3 +1853,5 @@ out: return err; } subsys_initcall(damon_sysfs_init); + +#include "sysfs-test.h" diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index cf8a9fc5c9..a4d1f63c5b 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -558,23 +558,27 @@ static bool damon_va_young(struct mm_struct *mm, unsigned long addr, * r the region to be checked */ static void __damon_va_check_access(struct mm_struct *mm, - struct damon_region *r, bool same_target) + struct damon_region *r, bool same_target, + struct damon_attrs *attrs) { static unsigned long last_addr; static unsigned long last_folio_sz = PAGE_SIZE; static bool last_accessed; + if (!mm) { + damon_update_region_access_rate(r, false, attrs); + return; + } + /* If the region is in the last checked page, reuse the result */ if (same_target && (ALIGN_DOWN(last_addr, last_folio_sz) == ALIGN_DOWN(r->sampling_addr, last_folio_sz))) { - if (last_accessed) - r->nr_accesses++; + damon_update_region_access_rate(r, last_accessed, attrs); return; } last_accessed = damon_va_young(mm, r->sampling_addr, &last_folio_sz); - if (last_accessed) - r->nr_accesses++; + damon_update_region_access_rate(r, last_accessed, attrs); last_addr = r->sampling_addr; } @@ -589,15 +593,15 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx) damon_for_each_target(t, ctx) { mm = damon_get_mm(t); - if (!mm) - continue; same_target = false; damon_for_each_region(r, t) { - __damon_va_check_access(mm, r, same_target); + __damon_va_check_access(mm, r, same_target, + &ctx->attrs); max_nr_accesses = max(r->nr_accesses, max_nr_accesses); same_target = true; } - mmput(mm); + if (mm) + mmput(mm); } return max_nr_accesses; |