diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:27 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:27 +0000 |
commit | 34996e42f82bfd60bc2c191e5cae3c6ab233ec6c (patch) | |
tree | 62db60558cbf089714b48daeabca82bf2b20b20e /mm/damon | |
parent | Adding debian version 6.8.12-1. (diff) | |
download | linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.tar.xz linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.zip |
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'mm/damon')
-rw-r--r-- | mm/damon/Kconfig | 7 | ||||
-rw-r--r-- | mm/damon/core.c | 120 | ||||
-rw-r--r-- | mm/damon/dbgfs.c | 26 | ||||
-rw-r--r-- | mm/damon/paddr.c | 2 | ||||
-rw-r--r-- | mm/damon/reclaim.c | 53 | ||||
-rw-r--r-- | mm/damon/sysfs-common.h | 8 | ||||
-rw-r--r-- | mm/damon/sysfs-schemes.c | 146 | ||||
-rw-r--r-- | mm/damon/sysfs.c | 54 |
8 files changed, 360 insertions, 56 deletions
diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig index 29f43fbc2e..fecb817241 100644 --- a/mm/damon/Kconfig +++ b/mm/damon/Kconfig @@ -71,7 +71,7 @@ config DAMON_SYSFS_KUNIT_TEST If unsure, say N. -config DAMON_DBGFS +config DAMON_DBGFS_DEPRECATED bool "DAMON debugfs interface (DEPRECATED!)" depends on DAMON_VADDR && DAMON_PADDR && DEBUG_FS help @@ -84,6 +84,11 @@ config DAMON_DBGFS (DAMON_SYSFS). If you depend on this and cannot move, please report your usecase to damon@lists.linux.dev and linux-mm@kvack.org. +config DAMON_DBGFS + bool + default y + depends on DAMON_DBGFS_DEPRECATED + config DAMON_DBGFS_KUNIT_TEST bool "Test for damon debugfs interface" if !KUNIT_ALL_TESTS depends on DAMON_DBGFS && KUNIT=y diff --git a/mm/damon/core.c b/mm/damon/core.c index 5b325749fc..6d503c1c12 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -11,6 +11,7 @@ #include <linux/delay.h> #include <linux/kthread.h> #include <linux/mm.h> +#include <linux/psi.h> #include <linux/slab.h> #include <linux/string.h> @@ -299,12 +300,48 @@ void damos_destroy_filter(struct damos_filter *f) damos_free_filter(f); } -/* initialize private fields of damos_quota and return the pointer */ -static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota) +struct damos_quota_goal *damos_new_quota_goal( + enum damos_quota_goal_metric metric, + unsigned long target_value) { + struct damos_quota_goal *goal; + + goal = kmalloc(sizeof(*goal), GFP_KERNEL); + if (!goal) + return NULL; + goal->metric = metric; + goal->target_value = target_value; + INIT_LIST_HEAD(&goal->list); + return goal; +} + +void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g) +{ + list_add_tail(&g->list, &q->goals); +} + +static void damos_del_quota_goal(struct damos_quota_goal *g) +{ + list_del(&g->list); +} + +static void damos_free_quota_goal(struct damos_quota_goal *g) +{ + kfree(g); +} + +void damos_destroy_quota_goal(struct damos_quota_goal *g) +{ + damos_del_quota_goal(g); + damos_free_quota_goal(g); +} + +/* initialize fields of @quota that normally API users wouldn't set */ +static struct damos_quota *damos_quota_init(struct damos_quota *quota) +{ + quota->esz = 0; quota->total_charged_sz = 0; quota->total_charged_ns = 0; - quota->esz = 0; quota->charged_sz = 0; quota->charged_from = 0; quota->charge_target_from = NULL; @@ -336,7 +373,9 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern, scheme->stat = (struct damos_stat){}; INIT_LIST_HEAD(&scheme->list); - scheme->quota = *(damos_quota_init_priv(quota)); + scheme->quota = *(damos_quota_init(quota)); + /* quota.goals should be separately set by caller */ + INIT_LIST_HEAD(&scheme->quota.goals); scheme->wmarks = *wmarks; scheme->wmarks.activated = true; @@ -373,8 +412,12 @@ static void damon_free_scheme(struct damos *s) void damon_destroy_scheme(struct damos *s) { + struct damos_quota_goal *g, *g_next; struct damos_filter *f, *next; + damos_for_each_quota_goal_safe(g, g_next, &s->quota) + damos_destroy_quota_goal(g); + damos_for_each_filter_safe(f, next, s) damos_destroy_filter(f); damon_del_scheme(s); @@ -1083,21 +1126,78 @@ static unsigned long damon_feed_loop_next_input(unsigned long last_input, return min_input; } -/* Shouldn't be called if quota->ms, quota->sz, and quota->get_score unset */ +#ifdef CONFIG_PSI + +static u64 damos_get_some_mem_psi_total(void) +{ + if (static_branch_likely(&psi_disabled)) + return 0; + return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2], + NSEC_PER_USEC); +} + +#else /* CONFIG_PSI */ + +static inline u64 damos_get_some_mem_psi_total(void) +{ + return 0; +}; + +#endif /* CONFIG_PSI */ + +static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal) +{ + u64 now_psi_total; + + switch (goal->metric) { + case DAMOS_QUOTA_USER_INPUT: + /* User should already set goal->current_value */ + break; + case DAMOS_QUOTA_SOME_MEM_PSI_US: + now_psi_total = damos_get_some_mem_psi_total(); + goal->current_value = now_psi_total - goal->last_psi_total; + goal->last_psi_total = now_psi_total; + break; + default: + break; + } +} + +/* Return the highest score since it makes schemes least aggressive */ +static unsigned long damos_quota_score(struct damos_quota *quota) +{ + struct damos_quota_goal *goal; + unsigned long highest_score = 0; + + damos_for_each_quota_goal(goal, quota) { + damos_set_quota_goal_current_value(goal); + highest_score = max(highest_score, + goal->current_value * 10000 / + goal->target_value); + } + + return highest_score; +} + +/* + * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty + */ static void damos_set_effective_quota(struct damos_quota *quota) { unsigned long throughput; unsigned long esz; - if (!quota->ms && !quota->get_score) { + if (!quota->ms && list_empty("a->goals)) { quota->esz = quota->sz; return; } - if (quota->get_score) { + if (!list_empty("a->goals)) { + unsigned long score = damos_quota_score(quota); + quota->esz_bp = damon_feed_loop_next_input( max(quota->esz_bp, 10000UL), - quota->get_score(quota->get_score_arg)); + score); esz = quota->esz_bp / 10000; } @@ -1107,7 +1207,7 @@ static void damos_set_effective_quota(struct damos_quota *quota) quota->total_charged_ns; else throughput = PAGE_SIZE * 1024; - if (quota->get_score) + if (!list_empty("a->goals)) esz = min(throughput * quota->ms, esz); else esz = throughput * quota->ms; @@ -1127,7 +1227,7 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) unsigned long cumulated_sz; unsigned int score, max_score = 0; - if (!quota->ms && !quota->sz && !quota->get_score) + if (!quota->ms && !quota->sz && list_empty("a->goals)) return; /* New charge window starts */ diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c index 7dac24e69e..2461cfe2e9 100644 --- a/mm/damon/dbgfs.c +++ b/mm/damon/dbgfs.c @@ -15,6 +15,11 @@ #include <linux/page_idle.h> #include <linux/slab.h> +#define DAMON_DBGFS_DEPRECATION_NOTICE \ + "DAMON debugfs interface is deprecated, so users should move " \ + "to DAMON_SYSFS. If you cannot, please report your usecase to " \ + "damon@lists.linux.dev and linux-mm@kvack.org.\n" + static struct damon_ctx **dbgfs_ctxs; static int dbgfs_nr_ctxs; static struct dentry **dbgfs_dirs; @@ -22,10 +27,7 @@ static DEFINE_MUTEX(damon_dbgfs_lock); static void damon_dbgfs_warn_deprecation(void) { - pr_warn_once("DAMON debugfs interface is deprecated, " - "so users should move to DAMON_SYSFS. If you cannot, " - "please report your usecase to damon@lists.linux.dev and " - "linux-mm@kvack.org.\n"); + pr_warn_once(DAMON_DBGFS_DEPRECATION_NOTICE); } /* @@ -805,6 +807,14 @@ static void dbgfs_destroy_ctx(struct damon_ctx *ctx) damon_destroy_ctx(ctx); } +static ssize_t damon_dbgfs_deprecated_read(struct file *file, + char __user *buf, size_t count, loff_t *ppos) +{ + static const char kbuf[512] = DAMON_DBGFS_DEPRECATION_NOTICE; + + return simple_read_from_buffer(buf, count, ppos, kbuf, strlen(kbuf)); +} + /* * Make a context of @name and create a debugfs directory for it. * @@ -1056,6 +1066,10 @@ static int damon_dbgfs_static_file_open(struct inode *inode, struct file *file) return nonseekable_open(inode, file); } +static const struct file_operations deprecated_fops = { + .read = damon_dbgfs_deprecated_read, +}; + static const struct file_operations mk_contexts_fops = { .open = damon_dbgfs_static_file_open, .write = dbgfs_mk_context_write, @@ -1076,9 +1090,9 @@ static int __init __damon_dbgfs_init(void) { struct dentry *dbgfs_root; const char * const file_names[] = {"mk_contexts", "rm_contexts", - "monitor_on"}; + "monitor_on_DEPRECATED", "DEPRECATED"}; const struct file_operations *fops[] = {&mk_contexts_fops, - &rm_contexts_fops, &monitor_on_fops}; + &rm_contexts_fops, &monitor_on_fops, &deprecated_fops}; int i; dbgfs_root = debugfs_create_dir("damon", NULL); diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 081e2a3257..5e6dc31207 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -249,7 +249,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s) put_folio: folio_put(folio); } - applied = reclaim_pages(&folio_list); + applied = reclaim_pages(&folio_list, false); cond_resched(); return applied * PAGE_SIZE; } diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c index 66e190f037..9bd341d62b 100644 --- a/mm/damon/reclaim.c +++ b/mm/damon/reclaim.c @@ -62,6 +62,36 @@ static struct damos_quota damon_reclaim_quota = { }; DEFINE_DAMON_MODULES_DAMOS_QUOTAS(damon_reclaim_quota); +/* + * Desired level of memory pressure-stall time in microseconds. + * + * While keeping the caps that set by other quotas, DAMON_RECLAIM automatically + * increases and decreases the effective level of the quota aiming this level of + * memory pressure is incurred. System-wide ``some`` memory PSI in microseconds + * per quota reset interval (``quota_reset_interval_ms``) is collected and + * compared to this value to see if the aim is satisfied. Value zero means + * disabling this auto-tuning feature. + * + * Disabled by default. + */ +static unsigned long quota_mem_pressure_us __read_mostly; +module_param(quota_mem_pressure_us, ulong, 0600); + +/* + * User-specifiable feedback for auto-tuning of the effective quota. + * + * While keeping the caps that set by other quotas, DAMON_RECLAIM automatically + * increases and decreases the effective level of the quota aiming receiving this + * feedback of value ``10,000`` from the user. DAMON_RECLAIM assumes the feedback + * value and the quota are positively proportional. Value zero means disabling + * this auto-tuning feature. + * + * Disabled by default. + * + */ +static unsigned long quota_autotune_feedback __read_mostly; +module_param(quota_autotune_feedback, ulong, 0600); + static struct damos_watermarks damon_reclaim_wmarks = { .metric = DAMOS_WMARK_FREE_MEM_RATE, .interval = 5000000, /* 5 seconds */ @@ -159,11 +189,13 @@ static void damon_reclaim_copy_quota_status(struct damos_quota *dst, dst->charged_from = src->charged_from; dst->charge_target_from = src->charge_target_from; dst->charge_addr_from = src->charge_addr_from; + dst->esz_bp = src->esz_bp; } static int damon_reclaim_apply_parameters(void) { struct damos *scheme, *old_scheme; + struct damos_quota_goal *goal; struct damos_filter *filter; int err = 0; @@ -180,6 +212,27 @@ static int damon_reclaim_apply_parameters(void) damon_reclaim_copy_quota_status(&scheme->quota, &old_scheme->quota); } + + if (quota_mem_pressure_us) { + goal = damos_new_quota_goal(DAMOS_QUOTA_SOME_MEM_PSI_US, + quota_mem_pressure_us); + if (!goal) { + damon_destroy_scheme(scheme); + return -ENOMEM; + } + damos_add_quota_goal(&scheme->quota, goal); + } + + if (quota_autotune_feedback) { + goal = damos_new_quota_goal(DAMOS_QUOTA_USER_INPUT, 10000); + if (!goal) { + damon_destroy_scheme(scheme); + return -ENOMEM; + } + goal->current_value = quota_autotune_feedback; + damos_add_quota_goal(&scheme->quota, goal); + } + if (skip_anon) { filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true); if (!filter) { diff --git a/mm/damon/sysfs-common.h b/mm/damon/sysfs-common.h index 4c37a166eb..a63f51577c 100644 --- a/mm/damon/sysfs-common.h +++ b/mm/damon/sysfs-common.h @@ -49,6 +49,8 @@ int damon_sysfs_schemes_update_regions_start( struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx, bool total_bytes_only); +void damos_sysfs_mark_finished_regions_updates(struct damon_ctx *ctx); + bool damos_sysfs_regions_upd_done(void); int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx); @@ -57,5 +59,9 @@ int damon_sysfs_schemes_clear_regions( struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx); -void damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes, +int damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes, + struct damon_ctx *ctx); + +void damos_sysfs_update_effective_quotas( + struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx); diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index ae0f0b314f..53a90ac678 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -127,17 +127,17 @@ static const struct kobj_type damon_sysfs_scheme_region_ktype = { * * Once the tried regions update request is received, the request handling * start function (damon_sysfs_scheme_update_regions_start()) sets the status - * of all schemes as 'idle' again, and register ->before_damos_apply() and - * ->after_sampling() callbacks. + * of all schemes as 'idle' again, and register ->before_damos_apply() + * callback. * * Then, the first followup ->before_damos_apply() callback * (damon_sysfs_before_damos_apply()) sets the status 'started'. The first - * ->after_sampling() callback (damon_sysfs_after_sampling()) after the call - * is called only after the scheme is completely applied - * to the given snapshot. Hence the callback knows the situation by showing - * 'started' status, and sets the status as 'finished'. Then, - * damon_sysfs_before_damos_apply() understands the situation by showing the - * 'finished' status and do nothing. + * ->after_sampling() or ->after_aggregation() callback + * (damon_sysfs_cmd_request_callback()) after the call is called only after + * the scheme is completely applied to the given snapshot. Hence the callback + * knows the situation by showing 'started' status, and sets the status as + * 'finished'. Then, damon_sysfs_before_damos_apply() understands the + * situation by showing the 'finished' status and do nothing. * * If DAMOS is not applied to any region due to any reasons including the * access pattern, the watermarks, the quotas, and the filters, @@ -826,15 +826,48 @@ static const struct kobj_type damon_sysfs_watermarks_ktype = { struct damos_sysfs_quota_goal { struct kobject kobj; + enum damos_quota_goal_metric metric; unsigned long target_value; unsigned long current_value; }; +/* This should match with enum damos_action */ +static const char * const damos_sysfs_quota_goal_metric_strs[] = { + "user_input", + "some_mem_psi_us", +}; + static struct damos_sysfs_quota_goal *damos_sysfs_quota_goal_alloc(void) { return kzalloc(sizeof(struct damos_sysfs_quota_goal), GFP_KERNEL); } +static ssize_t target_metric_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damos_sysfs_quota_goal *goal = container_of(kobj, + struct damos_sysfs_quota_goal, kobj); + + return sysfs_emit(buf, "%s\n", + damos_sysfs_quota_goal_metric_strs[goal->metric]); +} + +static ssize_t target_metric_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damos_sysfs_quota_goal *goal = container_of(kobj, + struct damos_sysfs_quota_goal, kobj); + enum damos_quota_goal_metric m; + + for (m = 0; m < NR_DAMOS_QUOTA_GOAL_METRICS; m++) { + if (sysfs_streq(buf, damos_sysfs_quota_goal_metric_strs[m])) { + goal->metric = m; + return count; + } + } + return -EINVAL; +} + static ssize_t target_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -880,6 +913,9 @@ static void damos_sysfs_quota_goal_release(struct kobject *kobj) kfree(container_of(kobj, struct damos_sysfs_quota_goal, kobj)); } +static struct kobj_attribute damos_sysfs_quota_goal_target_metric_attr = + __ATTR_RW_MODE(target_metric, 0600); + static struct kobj_attribute damos_sysfs_quota_goal_target_value_attr = __ATTR_RW_MODE(target_value, 0600); @@ -887,6 +923,7 @@ static struct kobj_attribute damos_sysfs_quota_goal_current_value_attr = __ATTR_RW_MODE(current_value, 0600); static struct attribute *damos_sysfs_quota_goal_attrs[] = { + &damos_sysfs_quota_goal_target_metric_attr.attr, &damos_sysfs_quota_goal_target_value_attr.attr, &damos_sysfs_quota_goal_current_value_attr.attr, NULL, @@ -1139,6 +1176,7 @@ struct damon_sysfs_quotas { unsigned long ms; unsigned long sz; unsigned long reset_interval_ms; + unsigned long effective_sz; /* Effective size quota in bytes */ }; static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void) @@ -1252,6 +1290,15 @@ static ssize_t reset_interval_ms_store(struct kobject *kobj, return count; } +static ssize_t effective_bytes_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_quotas *quotas = container_of(kobj, + struct damon_sysfs_quotas, kobj); + + return sysfs_emit(buf, "%lu\n", quotas->effective_sz); +} + static void damon_sysfs_quotas_release(struct kobject *kobj) { kfree(container_of(kobj, struct damon_sysfs_quotas, kobj)); @@ -1266,10 +1313,14 @@ static struct kobj_attribute damon_sysfs_quotas_sz_attr = static struct kobj_attribute damon_sysfs_quotas_reset_interval_ms_attr = __ATTR_RW_MODE(reset_interval_ms, 0600); +static struct kobj_attribute damon_sysfs_quotas_effective_bytes_attr = + __ATTR_RO_MODE(effective_bytes, 0400); + static struct attribute *damon_sysfs_quotas_attrs[] = { &damon_sysfs_quotas_ms_attr.attr, &damon_sysfs_quotas_sz_attr.attr, &damon_sysfs_quotas_reset_interval_ms_attr.attr, + &damon_sysfs_quotas_effective_bytes_attr.attr, NULL, }; ATTRIBUTE_GROUPS(damon_sysfs_quotas); @@ -1868,35 +1919,35 @@ static int damon_sysfs_set_scheme_filters(struct damos *scheme, return 0; } -static unsigned long damos_sysfs_get_quota_score(void *arg) -{ - return (unsigned long)arg; -} - -static void damos_sysfs_set_quota_score( +static int damos_sysfs_set_quota_score( struct damos_sysfs_quota_goals *sysfs_goals, struct damos_quota *quota) { - struct damos_sysfs_quota_goal *sysfs_goal; + struct damos_quota_goal *goal, *next; int i; - quota->get_score = NULL; - quota->get_score_arg = (void *)0; + damos_for_each_quota_goal_safe(goal, next, quota) + damos_destroy_quota_goal(goal); + for (i = 0; i < sysfs_goals->nr; i++) { - sysfs_goal = sysfs_goals->goals_arr[i]; + struct damos_sysfs_quota_goal *sysfs_goal = + sysfs_goals->goals_arr[i]; + if (!sysfs_goal->target_value) continue; - /* Higher score makes scheme less aggressive */ - quota->get_score_arg = (void *)max( - (unsigned long)quota->get_score_arg, - sysfs_goal->current_value * 10000 / + goal = damos_new_quota_goal(sysfs_goal->metric, sysfs_goal->target_value); - quota->get_score = damos_sysfs_get_quota_score; + if (!goal) + return -ENOMEM; + if (sysfs_goal->metric == DAMOS_QUOTA_USER_INPUT) + goal->current_value = sysfs_goal->current_value; + damos_add_quota_goal(quota, goal); } + return 0; } -void damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes, +int damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx) { struct damos *scheme; @@ -1904,16 +1955,41 @@ void damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes, damon_for_each_scheme(scheme, ctx) { struct damon_sysfs_scheme *sysfs_scheme; + int err; /* user could have removed the scheme sysfs dir */ if (i >= sysfs_schemes->nr) break; sysfs_scheme = sysfs_schemes->schemes_arr[i]; - damos_sysfs_set_quota_score(sysfs_scheme->quotas->goals, + err = damos_sysfs_set_quota_score(sysfs_scheme->quotas->goals, &scheme->quota); + if (err) + /* kdamond will clean up schemes and terminated */ + return err; i++; } + return 0; +} + +void damos_sysfs_update_effective_quotas( + struct damon_sysfs_schemes *sysfs_schemes, + struct damon_ctx *ctx) +{ + struct damos *scheme; + int schemes_idx = 0; + + damon_for_each_scheme(scheme, ctx) { + struct damon_sysfs_quotas *sysfs_quotas; + + /* user could have removed the scheme sysfs dir */ + if (schemes_idx >= sysfs_schemes->nr) + break; + + sysfs_quotas = + sysfs_schemes->schemes_arr[schemes_idx++]->quotas; + sysfs_quotas->effective_sz = scheme->quota.esz; + } } static struct damos *damon_sysfs_mk_scheme( @@ -1953,13 +2029,17 @@ static struct damos *damon_sysfs_mk_scheme( .low = sysfs_wmarks->low, }; - damos_sysfs_set_quota_score(sysfs_quotas->goals, "a); - scheme = damon_new_scheme(&pattern, sysfs_scheme->action, sysfs_scheme->apply_interval_us, "a, &wmarks); if (!scheme) return NULL; + err = damos_sysfs_set_quota_score(sysfs_quotas->goals, &scheme->quota); + if (err) { + damon_destroy_scheme(scheme); + return NULL; + } + err = damon_sysfs_set_scheme_filters(scheme, sysfs_filters); if (err) { damon_destroy_scheme(scheme); @@ -1995,7 +2075,11 @@ static void damon_sysfs_update_scheme(struct damos *scheme, scheme->quota.weight_nr_accesses = sysfs_weights->nr_accesses; scheme->quota.weight_age = sysfs_weights->age; - damos_sysfs_set_quota_score(sysfs_quotas->goals, &scheme->quota); + err = damos_sysfs_set_quota_score(sysfs_quotas->goals, &scheme->quota); + if (err) { + damon_destroy_scheme(scheme); + return; + } scheme->wmarks.metric = sysfs_wmarks->metric; scheme->wmarks.interval = sysfs_wmarks->interval_us; @@ -2122,7 +2206,7 @@ static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx, * callback is registered, damon_sysfs_lock should be held to ensure the * regions directories exist. */ -static int damon_sysfs_after_sampling(struct damon_ctx *ctx) +void damos_sysfs_mark_finished_regions_updates(struct damon_ctx *ctx) { struct damon_sysfs_schemes *sysfs_schemes = damon_sysfs_schemes_for_damos_callback; @@ -2138,8 +2222,6 @@ static int damon_sysfs_after_sampling(struct damon_ctx *ctx) sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_FINISHED; } - - return 0; } /* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */ @@ -2212,7 +2294,6 @@ int damon_sysfs_schemes_update_regions_start( damos_tried_regions_init_upd_status(sysfs_schemes, ctx); damos_regions_upd_total_bytes_only = total_bytes_only; ctx->callback.before_damos_apply = damon_sysfs_before_damos_apply; - ctx->callback.after_sampling = damon_sysfs_after_sampling; return 0; } @@ -2241,7 +2322,6 @@ int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx) { damon_sysfs_schemes_for_damos_callback = NULL; ctx->callback.before_damos_apply = NULL; - ctx->callback.after_sampling = NULL; damon_sysfs_schemes_region_idx = 0; return 0; } diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 1f891e18b4..6fee383bc0 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1020,6 +1020,11 @@ enum damon_sysfs_cmd { */ DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS, /* + * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS: Update the + * effective size quota of the scheme in bytes. + */ + DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS, + /* * @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands. */ NR_DAMON_SYSFS_CMDS, @@ -1035,6 +1040,7 @@ static const char * const damon_sysfs_cmd_strs[] = { "update_schemes_tried_bytes", "update_schemes_tried_regions", "clear_schemes_tried_regions", + "update_schemes_effective_quotas", }; /* @@ -1371,19 +1377,43 @@ static int damon_sysfs_commit_schemes_quota_goals( ctx = sysfs_kdamond->damon_ctx; sysfs_ctx = sysfs_kdamond->contexts->contexts_arr[0]; - damos_sysfs_set_quota_scores(sysfs_ctx->schemes, ctx); + return damos_sysfs_set_quota_scores(sysfs_ctx->schemes, ctx); +} + +/* + * damon_sysfs_upd_schemes_effective_quotas() - Update schemes effective quotas + * sysfs files. + * @kdamond: The kobject wrapper that associated to the kdamond thread. + * + * This function reads the schemes' effective quotas of specific kdamond and + * update the related values for sysfs files. This function should be called + * from DAMON callbacks while holding ``damon_syfs_lock``, to safely access the + * DAMON contexts-internal data and DAMON sysfs variables. + */ +static int damon_sysfs_upd_schemes_effective_quotas( + struct damon_sysfs_kdamond *kdamond) +{ + struct damon_ctx *ctx = kdamond->damon_ctx; + + if (!ctx) + return -EINVAL; + damos_sysfs_update_effective_quotas( + kdamond->contexts->contexts_arr[0]->schemes, ctx); return 0; } + /* * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests. * @c: The DAMON context of the callback. * @active: Whether @c is not deactivated due to watermarks. + * @after_aggr: Whether this is called from after_aggregation() callback. * * This function is periodically called back from the kdamond thread for @c. * Then, it checks if there is a waiting DAMON sysfs request and handles it. */ -static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active) +static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active, + bool after_aggregation) { struct damon_sysfs_kdamond *kdamond; bool total_bytes_only = false; @@ -1401,6 +1431,8 @@ static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active) err = damon_sysfs_upd_schemes_stats(kdamond); break; case DAMON_SYSFS_CMD_COMMIT: + if (!after_aggregation) + goto out; err = damon_sysfs_commit_input(kdamond); break; case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS: @@ -1418,6 +1450,7 @@ static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active) goto keep_lock_out; } } else { + damos_sysfs_mark_finished_regions_updates(c); /* * Continue regions updating if DAMON is till * active and the update for all schemes is not @@ -1432,6 +1465,9 @@ static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active) case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS: err = damon_sysfs_clear_schemes_regions(kdamond); break; + case DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS: + err = damon_sysfs_upd_schemes_effective_quotas(kdamond); + break; default: break; } @@ -1450,7 +1486,16 @@ static int damon_sysfs_after_wmarks_check(struct damon_ctx *c) * after_wmarks_check() is called back while the context is deactivated * by watermarks. */ - return damon_sysfs_cmd_request_callback(c, false); + return damon_sysfs_cmd_request_callback(c, false, false); +} + +static int damon_sysfs_after_sampling(struct damon_ctx *c) +{ + /* + * after_sampling() is called back only while the context is not + * deactivated by watermarks. + */ + return damon_sysfs_cmd_request_callback(c, true, false); } static int damon_sysfs_after_aggregation(struct damon_ctx *c) @@ -1459,7 +1504,7 @@ static int damon_sysfs_after_aggregation(struct damon_ctx *c) * after_aggregation() is called back only while the context is not * deactivated by watermarks. */ - return damon_sysfs_cmd_request_callback(c, true); + return damon_sysfs_cmd_request_callback(c, true, true); } static struct damon_ctx *damon_sysfs_build_ctx( @@ -1478,6 +1523,7 @@ static struct damon_ctx *damon_sysfs_build_ctx( } ctx->callback.after_wmarks_check = damon_sysfs_after_wmarks_check; + ctx->callback.after_sampling = damon_sysfs_after_sampling; ctx->callback.after_aggregation = damon_sysfs_after_aggregation; ctx->callback.before_terminate = damon_sysfs_before_terminate; return ctx; |