diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
commit | b15a952c52a6825376d3e7f6c1bf5c886c6d8b74 (patch) | |
tree | 1500f2f8f276908a36d8126cb632c0d6b1276764 /debian/patches-rt/0008-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch | |
parent | Adding upstream version 5.10.209. (diff) | |
download | linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.tar.xz linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.zip |
Adding debian version 5.10.209-2.debian/5.10.209-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0008-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch')
-rw-r--r-- | debian/patches-rt/0008-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch | 94 |
1 files changed, 94 insertions, 0 deletions
diff --git a/debian/patches-rt/0008-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch b/debian/patches-rt/0008-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch new file mode 100644 index 000000000..6c09cbe86 --- /dev/null +++ b/debian/patches-rt/0008-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch @@ -0,0 +1,94 @@ +From 36d2d778f786cb7fd55c549911a6055d6b6f40ef Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra <peterz@infradead.org> +Date: Fri, 23 Oct 2020 12:12:05 +0200 +Subject: [PATCH 008/323] sched: Fix hotplug vs CPU bandwidth control +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +Since we now migrate tasks away before DYING, we should also move +bandwidth unthrottle, otherwise we can gain tasks from unthrottle +after we expect all tasks to be gone already. + +Also; it looks like the RT balancers don't respect cpu_active() and +instead rely on rq->online in part, complete this. This too requires +we do set_rq_offline() earlier to match the cpu_active() semantics. +(The bigger patch is to convert RT to cpu_active() entirely) + +Since set_rq_online() is called from sched_cpu_activate(), place +set_rq_offline() in sched_cpu_deactivate(). + +Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + kernel/sched/core.c | 14 ++++++++++---- + kernel/sched/deadline.c | 2 +- + kernel/sched/rt.c | 2 +- + 3 files changed, 12 insertions(+), 6 deletions(-) + +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index a26a82c3e939..c5d5576c67fb 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -6979,6 +6979,8 @@ int sched_cpu_activate(unsigned int cpu) + + int sched_cpu_deactivate(unsigned int cpu) + { ++ struct rq *rq = cpu_rq(cpu); ++ struct rq_flags rf; + int ret; + + set_cpu_active(cpu, false); +@@ -6993,6 +6995,14 @@ int sched_cpu_deactivate(unsigned int cpu) + + balance_push_set(cpu, true); + ++ rq_lock_irqsave(rq, &rf); ++ if (rq->rd) { ++ update_rq_clock(rq); ++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); ++ set_rq_offline(rq); ++ } ++ rq_unlock_irqrestore(rq, &rf); ++ + #ifdef CONFIG_SCHED_SMT + /* + * When going down, decrement the number of cores with SMT present. +@@ -7074,10 +7084,6 @@ int sched_cpu_dying(unsigned int cpu) + sched_tick_stop(cpu); + + rq_lock_irqsave(rq, &rf); +- if (rq->rd) { +- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); +- set_rq_offline(rq); +- } + BUG_ON(rq->nr_running != 1); + rq_unlock_irqrestore(rq, &rf); + +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c +index d91295d3059f..af8569dbdc9c 100644 +--- a/kernel/sched/deadline.c ++++ b/kernel/sched/deadline.c +@@ -566,7 +566,7 @@ static int push_dl_task(struct rq *rq); + + static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) + { +- return dl_task(prev); ++ return rq->online && dl_task(prev); + } + + static DEFINE_PER_CPU(struct callback_head, dl_push_head); +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c +index f690f901b6cc..fdcce04913db 100644 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -270,7 +270,7 @@ static void pull_rt_task(struct rq *this_rq); + static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) + { + /* Try to pull RT tasks here if we lower this rq's prio */ +- return rq->rt.highest_prio.curr > prev->prio; ++ return rq->online && rq->rt.highest_prio.curr > prev->prio; + } + + static inline int rt_overloaded(struct rq *rq) +-- +2.43.0 + |