diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:40:19 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:40:19 +0000 |
commit | 9f0fc191371843c4fc000a226b0a26b6c059aacd (patch) | |
tree | 35f8be3ef04506ac891ad001e8c41e535ae8d01d /kernel/sched/psi.c | |
parent | Releasing progress-linux version 6.6.15-2~progress7.99u1. (diff) | |
download | linux-9f0fc191371843c4fc000a226b0a26b6c059aacd.tar.xz linux-9f0fc191371843c4fc000a226b0a26b6c059aacd.zip |
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | kernel/sched/psi.c | 58 |
1 files changed, 25 insertions, 33 deletions
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index 1d0f634725..7b4aa5809c 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -434,14 +434,13 @@ static u64 window_update(struct psi_window *win, u64 now, u64 value) return growth; } -static u64 update_triggers(struct psi_group *group, u64 now, bool *update_total, +static void update_triggers(struct psi_group *group, u64 now, enum psi_aggregators aggregator) { struct psi_trigger *t; u64 *total = group->total[aggregator]; struct list_head *triggers; u64 *aggregator_total; - *update_total = false; if (aggregator == PSI_AVGS) { triggers = &group->avg_triggers; @@ -471,14 +470,6 @@ static u64 update_triggers(struct psi_group *group, u64 now, bool *update_total, * events without dropping any). */ if (new_stall) { - /* - * Multiple triggers might be looking at the same state, - * remember to update group->polling_total[] once we've - * been through all of them. Also remember to extend the - * polling time if we see new stall activity. - */ - *update_total = true; - /* Calculate growth since last update */ growth = window_update(&t->win, now, total[t->state]); if (!t->pending_event) { @@ -503,8 +494,6 @@ static u64 update_triggers(struct psi_group *group, u64 now, bool *update_total, /* Reset threshold breach flag once event got generated */ t->pending_event = false; } - - return now + group->rtpoll_min_period; } static u64 update_averages(struct psi_group *group, u64 now) @@ -565,7 +554,6 @@ static void psi_avgs_work(struct work_struct *work) struct delayed_work *dwork; struct psi_group *group; u32 changed_states; - bool update_total; u64 now; dwork = to_delayed_work(work); @@ -584,7 +572,7 @@ static void psi_avgs_work(struct work_struct *work) * go - see calc_avgs() and missed_periods. */ if (now >= group->avg_next_update) { - update_triggers(group, now, &update_total, PSI_AVGS); + update_triggers(group, now, PSI_AVGS); group->avg_next_update = update_averages(group, now); } @@ -608,7 +596,7 @@ static void init_rtpoll_triggers(struct psi_group *group, u64 now) group->rtpoll_next_update = now + group->rtpoll_min_period; } -/* Schedule polling if it's not already scheduled or forced. */ +/* Schedule rtpolling if it's not already scheduled or forced. */ static void psi_schedule_rtpoll_work(struct psi_group *group, unsigned long delay, bool force) { @@ -640,7 +628,6 @@ static void psi_rtpoll_work(struct psi_group *group) { bool force_reschedule = false; u32 changed_states; - bool update_total; u64 now; mutex_lock(&group->rtpoll_trigger_lock); @@ -649,37 +636,37 @@ static void psi_rtpoll_work(struct psi_group *group) if (now > group->rtpoll_until) { /* - * We are either about to start or might stop polling if no - * state change was recorded. Resetting poll_scheduled leaves + * We are either about to start or might stop rtpolling if no + * state change was recorded. Resetting rtpoll_scheduled leaves * a small window for psi_group_change to sneak in and schedule - * an immediate poll_work before we get to rescheduling. One - * potential extra wakeup at the end of the polling window - * should be negligible and polling_next_update still keeps + * an immediate rtpoll_work before we get to rescheduling. One + * potential extra wakeup at the end of the rtpolling window + * should be negligible and rtpoll_next_update still keeps * updates correctly on schedule. */ atomic_set(&group->rtpoll_scheduled, 0); /* - * A task change can race with the poll worker that is supposed to + * A task change can race with the rtpoll worker that is supposed to * report on it. To avoid missing events, ensure ordering between - * poll_scheduled and the task state accesses, such that if the poll - * worker misses the state update, the task change is guaranteed to - * reschedule the poll worker: + * rtpoll_scheduled and the task state accesses, such that if the + * rtpoll worker misses the state update, the task change is + * guaranteed to reschedule the rtpoll worker: * - * poll worker: - * atomic_set(poll_scheduled, 0) + * rtpoll worker: + * atomic_set(rtpoll_scheduled, 0) * smp_mb() * LOAD states * * task change: * STORE states - * if atomic_xchg(poll_scheduled, 1) == 0: - * schedule poll worker + * if atomic_xchg(rtpoll_scheduled, 1) == 0: + * schedule rtpoll worker * * The atomic_xchg() implies a full barrier. */ smp_mb(); } else { - /* Polling window is not over, keep rescheduling */ + /* The rtpolling window is not over, keep rescheduling */ force_reschedule = true; } @@ -687,7 +674,7 @@ static void psi_rtpoll_work(struct psi_group *group) collect_percpu_times(group, PSI_POLL, &changed_states); if (changed_states & group->rtpoll_states) { - /* Initialize trigger windows when entering polling mode */ + /* Initialize trigger windows when entering rtpolling mode */ if (now > group->rtpoll_until) init_rtpoll_triggers(group, now); @@ -706,10 +693,12 @@ static void psi_rtpoll_work(struct psi_group *group) } if (now >= group->rtpoll_next_update) { - group->rtpoll_next_update = update_triggers(group, now, &update_total, PSI_POLL); - if (update_total) + if (changed_states & group->rtpoll_states) { + update_triggers(group, now, PSI_POLL); memcpy(group->rtpoll_total, group->total[PSI_POLL], sizeof(group->rtpoll_total)); + } + group->rtpoll_next_update = now + group->rtpoll_min_period; } psi_schedule_rtpoll_work(group, @@ -1009,6 +998,9 @@ void psi_account_irqtime(struct task_struct *task, u32 delta) struct psi_group_cpu *groupc; u64 now; + if (static_branch_likely(&psi_disabled)) + return; + if (!task->pid) return; |