diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
commit | b15a952c52a6825376d3e7f6c1bf5c886c6d8b74 (patch) | |
tree | 1500f2f8f276908a36d8126cb632c0d6b1276764 /debian/patches-rt/0012-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch | |
parent | Adding upstream version 5.10.209. (diff) | |
download | linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.tar.xz linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.zip |
Adding debian version 5.10.209-2.debian/5.10.209-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | debian/patches-rt/0012-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch | 137 |
1 files changed, 137 insertions, 0 deletions
diff --git a/debian/patches-rt/0012-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch b/debian/patches-rt/0012-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch new file mode 100644 index 000000000..e89668431 --- /dev/null +++ b/debian/patches-rt/0012-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch @@ -0,0 +1,137 @@ +From 02829fffd43c5fe3e617d07e0a94d5164324449b Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Fri, 23 Oct 2020 12:12:09 +0200 +Subject: [PATCH 012/323] sched/core: Make migrate disable and CPU hotplug + cooperative +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +On CPU unplug tasks which are in a migrate disabled region cannot be pushed +to a different CPU until they returned to migrateable state. + +Account the number of tasks on a runqueue which are in a migrate disabled +section and make the hotplug wait mechanism respect that. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + kernel/sched/core.c | 36 ++++++++++++++++++++++++++++++------ + kernel/sched/sched.h | 4 ++++ + 2 files changed, 34 insertions(+), 6 deletions(-) + +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 3af7c42896c9..2517a003295b 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1735,10 +1735,17 @@ static void migrate_disable_switch(struct rq *rq, struct task_struct *p) + + void migrate_disable(void) + { +- if (current->migration_disabled++) ++ struct task_struct *p = current; ++ ++ if (p->migration_disabled) { ++ p->migration_disabled++; + return; ++ } + +- barrier(); ++ preempt_disable(); ++ this_rq()->nr_pinned++; ++ p->migration_disabled = 1; ++ preempt_enable(); + } + EXPORT_SYMBOL_GPL(migrate_disable); + +@@ -1765,6 +1772,7 @@ void migrate_enable(void) + */ + barrier(); + p->migration_disabled = 0; ++ this_rq()->nr_pinned--; + preempt_enable(); + } + EXPORT_SYMBOL_GPL(migrate_enable); +@@ -1774,6 +1782,11 @@ static inline bool is_migration_disabled(struct task_struct *p) + return p->migration_disabled; + } + ++static inline bool rq_has_pinned_tasks(struct rq *rq) ++{ ++ return rq->nr_pinned; ++} ++ + #endif + + /* +@@ -2705,6 +2718,11 @@ static inline bool is_migration_disabled(struct task_struct *p) + return false; + } + ++static inline bool rq_has_pinned_tasks(struct rq *rq) ++{ ++ return false; ++} ++ + #endif + + static void +@@ -7064,15 +7082,20 @@ static void balance_push(struct rq *rq) + * Both the cpu-hotplug and stop task are in this case and are + * required to complete the hotplug process. + */ +- if (is_per_cpu_kthread(push_task)) { ++ if (is_per_cpu_kthread(push_task) || is_migration_disabled(push_task)) { + /* + * If this is the idle task on the outgoing CPU try to wake + * up the hotplug control thread which might wait for the + * last task to vanish. The rcuwait_active() check is + * accurate here because the waiter is pinned on this CPU + * and can't obviously be running in parallel. ++ * ++ * On RT kernels this also has to check whether there are ++ * pinned and scheduled out tasks on the runqueue. They ++ * need to leave the migrate disabled section first. + */ +- if (!rq->nr_running && rcuwait_active(&rq->hotplug_wait)) { ++ if (!rq->nr_running && !rq_has_pinned_tasks(rq) && ++ rcuwait_active(&rq->hotplug_wait)) { + raw_spin_unlock(&rq->lock); + rcuwait_wake_up(&rq->hotplug_wait); + raw_spin_lock(&rq->lock); +@@ -7119,7 +7142,8 @@ static void balance_hotplug_wait(void) + { + struct rq *rq = this_rq(); + +- rcuwait_wait_event(&rq->hotplug_wait, rq->nr_running == 1, ++ rcuwait_wait_event(&rq->hotplug_wait, ++ rq->nr_running == 1 && !rq_has_pinned_tasks(rq), + TASK_UNINTERRUPTIBLE); + } + +@@ -7366,7 +7390,7 @@ int sched_cpu_dying(unsigned int cpu) + sched_tick_stop(cpu); + + rq_lock_irqsave(rq, &rf); +- BUG_ON(rq->nr_running != 1); ++ BUG_ON(rq->nr_running != 1 || rq_has_pinned_tasks(rq)); + rq_unlock_irqrestore(rq, &rf); + + calc_load_migrate(rq); +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index f3109adda484..8237c9ab2bb8 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -1057,6 +1057,10 @@ struct rq { + /* Must be inspected within a rcu lock section */ + struct cpuidle_state *idle_state; + #endif ++ ++#if defined(CONFIG_PREEMPT_RT) && defined(CONFIG_SMP) ++ unsigned int nr_pinned; ++#endif + }; + + #ifdef CONFIG_FAIR_GROUP_SCHED +-- +2.43.0 + |