summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0295-sched-Don-t-defer-CPU-pick-to-migration_cpu_stop.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0295-sched-Don-t-defer-CPU-pick-to-migration_cpu_stop.patch')
-rw-r--r--debian/patches-rt/0295-sched-Don-t-defer-CPU-pick-to-migration_cpu_stop.patch100
1 files changed, 100 insertions, 0 deletions
diff --git a/debian/patches-rt/0295-sched-Don-t-defer-CPU-pick-to-migration_cpu_stop.patch b/debian/patches-rt/0295-sched-Don-t-defer-CPU-pick-to-migration_cpu_stop.patch
new file mode 100644
index 000000000..e05440c01
--- /dev/null
+++ b/debian/patches-rt/0295-sched-Don-t-defer-CPU-pick-to-migration_cpu_stop.patch
@@ -0,0 +1,100 @@
+From b625852d41e17fb13b4caf7192734866534d9799 Mon Sep 17 00:00:00 2001
+From: Valentin Schneider <valentin.schneider@arm.com>
+Date: Tue, 8 Jun 2021 00:37:36 -0400
+Subject: [PATCH 295/323] sched: Don't defer CPU pick to migration_cpu_stop()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+commit 475ea6c60279e9f2ddf7e4cf2648cd8ae0608361 upstream.
+
+Will reported that the 'XXX __migrate_task() can fail' in migration_cpu_stop()
+can happen, and it *is* sort of a big deal. Looking at it some more, one
+will note there is a glaring hole in the deferred CPU selection:
+
+ (w/ CONFIG_CPUSET=n, so that the affinity mask passed via taskset doesn't
+ get AND'd with cpu_online_mask)
+
+ $ taskset -pc 0-2 $PID
+ # offline CPUs 3-4
+ $ taskset -pc 3-5 $PID
+ `\
+ $PID may stay on 0-2 due to the cpumask_any_distribute() picking an
+ offline CPU and __migrate_task() refusing to do anything due to
+ cpu_is_allowed().
+
+set_cpus_allowed_ptr() goes to some length to pick a dest_cpu that matches
+the right constraints vs affinity and the online/active state of the
+CPUs. Reuse that instead of discarding it in the affine_move_task() case.
+
+Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()")
+Reported-by: Will Deacon <will@kernel.org>
+Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20210526205751.842360-2-valentin.schneider@arm.com
+Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+---
+ kernel/sched/core.c | 20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index d3b9d69171a2..81b342d6629b 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1958,7 +1958,6 @@ static int migration_cpu_stop(void *data)
+ struct migration_arg *arg = data;
+ struct set_affinity_pending *pending = arg->pending;
+ struct task_struct *p = arg->task;
+- int dest_cpu = arg->dest_cpu;
+ struct rq *rq = this_rq();
+ bool complete = false;
+ struct rq_flags rf;
+@@ -1991,19 +1990,15 @@ static int migration_cpu_stop(void *data)
+ if (p->migration_pending == pending)
+ p->migration_pending = NULL;
+ complete = true;
+- }
+
+- if (dest_cpu < 0) {
+ if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
+ goto out;
+-
+- dest_cpu = cpumask_any_distribute(&p->cpus_mask);
+ }
+
+ if (task_on_rq_queued(p))
+- rq = __migrate_task(rq, &rf, p, dest_cpu);
++ rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
+ else
+- p->wake_cpu = dest_cpu;
++ p->wake_cpu = arg->dest_cpu;
+
+ /*
+ * XXX __migrate_task() can fail, at which point we might end
+@@ -2282,7 +2277,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ init_completion(&my_pending.done);
+ my_pending.arg = (struct migration_arg) {
+ .task = p,
+- .dest_cpu = -1, /* any */
++ .dest_cpu = dest_cpu,
+ .pending = &my_pending,
+ };
+
+@@ -2290,6 +2285,15 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ } else {
+ pending = p->migration_pending;
+ refcount_inc(&pending->refs);
++ /*
++ * Affinity has changed, but we've already installed a
++ * pending. migration_cpu_stop() *must* see this, else
++ * we risk a completion of the pending despite having a
++ * task on a disallowed CPU.
++ *
++ * Serialized by p->pi_lock, so this is safe.
++ */
++ pending->arg.dest_cpu = dest_cpu;
+ }
+ }
+ pending = p->migration_pending;
+--
+2.43.0
+