summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0293-sched-Fix-affine_move_task-self-concurrency.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0293-sched-Fix-affine_move_task-self-concurrency.patch')
-rw-r--r--debian/patches-rt/0293-sched-Fix-affine_move_task-self-concurrency.patch96
1 files changed, 96 insertions, 0 deletions
diff --git a/debian/patches-rt/0293-sched-Fix-affine_move_task-self-concurrency.patch b/debian/patches-rt/0293-sched-Fix-affine_move_task-self-concurrency.patch
new file mode 100644
index 000000000..4a906f7cb
--- /dev/null
+++ b/debian/patches-rt/0293-sched-Fix-affine_move_task-self-concurrency.patch
@@ -0,0 +1,96 @@
+From e9faaf024fdd553b55aaed31855385da7e9d505a Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 8 Jun 2021 00:37:34 -0400
+Subject: [PATCH 293/323] sched: Fix affine_move_task() self-concurrency
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+commit 9e81889c7648d48dd5fe13f41cbc99f3c362484a upstream.
+
+Consider:
+
+ sched_setaffinity(p, X); sched_setaffinity(p, Y);
+
+Then the first will install p->migration_pending = &my_pending; and
+issue stop_one_cpu_nowait(pending); and the second one will read
+p->migration_pending and _also_ issue: stop_one_cpu_nowait(pending),
+the _SAME_ @pending.
+
+This causes stopper list corruption.
+
+Add set_affinity_pending::stop_pending, to indicate if a stopper is in
+progress.
+
+Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()")
+Cc: stable@kernel.org
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
+Link: https://lkml.kernel.org/r/20210224131355.649146419@infradead.org
+Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+---
+ kernel/sched/core.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index e28fb33afa95..76fa3daf1f60 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1916,6 +1916,7 @@ struct migration_arg {
+
+ struct set_affinity_pending {
+ refcount_t refs;
++ unsigned int stop_pending;
+ struct completion done;
+ struct cpu_stop_work stop_work;
+ struct migration_arg arg;
+@@ -2034,12 +2035,15 @@ static int migration_cpu_stop(void *data)
+ * determine is_migration_disabled() and so have to chase after
+ * it.
+ */
++ WARN_ON_ONCE(!pending->stop_pending);
+ task_rq_unlock(rq, p, &rf);
+ stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
+ &pending->arg, &pending->stop_work);
+ return 0;
+ }
+ out:
++ if (pending)
++ pending->stop_pending = false;
+ task_rq_unlock(rq, p, &rf);
+
+ if (complete)
+@@ -2235,7 +2239,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ int dest_cpu, unsigned int flags)
+ {
+ struct set_affinity_pending my_pending = { }, *pending = NULL;
+- bool complete = false;
++ bool stop_pending, complete = false;
+
+ /* Can the task run on the task's current CPU? If so, we're done */
+ if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
+@@ -2308,14 +2312,19 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ * anything else we cannot do is_migration_disabled(), punt
+ * and have the stopper function handle it all race-free.
+ */
++ stop_pending = pending->stop_pending;
++ if (!stop_pending)
++ pending->stop_pending = true;
+
+ refcount_inc(&pending->refs); /* pending->{arg,stop_work} */
+ if (flags & SCA_MIGRATE_ENABLE)
+ p->migration_flags &= ~MDF_PUSH;
+ task_rq_unlock(rq, p, rf);
+
+- stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
+- &pending->arg, &pending->stop_work);
++ if (!stop_pending) {
++ stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
++ &pending->arg, &pending->stop_work);
++ }
+
+ if (flags & SCA_MIGRATE_ENABLE)
+ return 0;
+--
+2.43.0
+