summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0292-sched-Optimize-migration_cpu_stop.patch
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--debian/patches-rt/0292-sched-Optimize-migration_cpu_stop.patch58
1 files changed, 58 insertions, 0 deletions
diff --git a/debian/patches-rt/0292-sched-Optimize-migration_cpu_stop.patch b/debian/patches-rt/0292-sched-Optimize-migration_cpu_stop.patch
new file mode 100644
index 000000000..87ca79ca8
--- /dev/null
+++ b/debian/patches-rt/0292-sched-Optimize-migration_cpu_stop.patch
@@ -0,0 +1,58 @@
+From b1353ab8df1e6dd3bf9afaef3096749db2a193f1 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 8 Jun 2021 00:37:33 -0400
+Subject: [PATCH 292/323] sched: Optimize migration_cpu_stop()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+commit 3f1bc119cd7fc987c8ed25ffb717f99403bb308c upstream.
+
+When the purpose of migration_cpu_stop() is to migrate the task to
+'any' valid CPU, don't migrate the task when it's already running on a
+valid CPU.
+
+Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()")
+Cc: stable@kernel.org
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
+Link: https://lkml.kernel.org/r/20210224131355.569238629@infradead.org
+Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+---
+ kernel/sched/core.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 3130289baf79..e28fb33afa95 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1988,14 +1988,25 @@ static int migration_cpu_stop(void *data)
+ complete = true;
+ }
+
+- if (dest_cpu < 0)
++ if (dest_cpu < 0) {
++ if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
++ goto out;
++
+ dest_cpu = cpumask_any_distribute(&p->cpus_mask);
++ }
+
+ if (task_on_rq_queued(p))
+ rq = __migrate_task(rq, &rf, p, dest_cpu);
+ else
+ p->wake_cpu = dest_cpu;
+
++ /*
++ * XXX __migrate_task() can fail, at which point we might end
++ * up running on a dodgy CPU, AFAICT this can only happen
++ * during CPU hotplug, at which point we'll get pushed out
++ * anyway, so it's probably not a big deal.
++ */
++
+ } else if (pending) {
+ /*
+ * This happens when we get migrated between migrate_enable()'s
+--
+2.43.0
+