summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0294-sched-Simplify-set_affinity_pending-refcounts.patch
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--debian/patches-rt/0294-sched-Simplify-set_affinity_pending-refcounts.patch129
1 files changed, 129 insertions, 0 deletions
diff --git a/debian/patches-rt/0294-sched-Simplify-set_affinity_pending-refcounts.patch b/debian/patches-rt/0294-sched-Simplify-set_affinity_pending-refcounts.patch
new file mode 100644
index 000000000..1230259b8
--- /dev/null
+++ b/debian/patches-rt/0294-sched-Simplify-set_affinity_pending-refcounts.patch
@@ -0,0 +1,129 @@
+From 6681b566fe9ffe9365121a790537ada2e4ef97ba Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 8 Jun 2021 00:37:35 -0400
+Subject: [PATCH 294/323] sched: Simplify set_affinity_pending refcounts
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+commit 50caf9c14b1498c90cf808dbba2ca29bd32ccba4 upstream.
+
+Now that we have set_affinity_pending::stop_pending to indicate if a
+stopper is in progress, and we have the guarantee that if that stopper
+exists, it will (eventually) complete our @pending we can simplify the
+refcount scheme by no longer counting the stopper thread.
+
+Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()")
+Cc: stable@kernel.org
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
+Link: https://lkml.kernel.org/r/20210224131355.724130207@infradead.org
+Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+---
+ kernel/sched/core.c | 32 ++++++++++++++++++++------------
+ 1 file changed, 20 insertions(+), 12 deletions(-)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 76fa3daf1f60..d3b9d69171a2 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1914,6 +1914,10 @@ struct migration_arg {
+ struct set_affinity_pending *pending;
+ };
+
++/*
++ * @refs: number of wait_for_completion()
++ * @stop_pending: is @stop_work in use
++ */
+ struct set_affinity_pending {
+ refcount_t refs;
+ unsigned int stop_pending;
+@@ -2049,10 +2053,6 @@ static int migration_cpu_stop(void *data)
+ if (complete)
+ complete_all(&pending->done);
+
+- /* For pending->{arg,stop_work} */
+- if (pending && refcount_dec_and_test(&pending->refs))
+- wake_up_var(&pending->refs);
+-
+ return 0;
+ }
+
+@@ -2251,12 +2251,16 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ push_task = get_task_struct(p);
+ }
+
++ /*
++ * If there are pending waiters, but no pending stop_work,
++ * then complete now.
++ */
+ pending = p->migration_pending;
+- if (pending) {
+- refcount_inc(&pending->refs);
++ if (pending && !pending->stop_pending) {
+ p->migration_pending = NULL;
+ complete = true;
+ }
++
+ task_rq_unlock(rq, p, rf);
+
+ if (push_task) {
+@@ -2265,7 +2269,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ }
+
+ if (complete)
+- goto do_complete;
++ complete_all(&pending->done);
+
+ return 0;
+ }
+@@ -2316,9 +2320,9 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ if (!stop_pending)
+ pending->stop_pending = true;
+
+- refcount_inc(&pending->refs); /* pending->{arg,stop_work} */
+ if (flags & SCA_MIGRATE_ENABLE)
+ p->migration_flags &= ~MDF_PUSH;
++
+ task_rq_unlock(rq, p, rf);
+
+ if (!stop_pending) {
+@@ -2334,12 +2338,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ if (task_on_rq_queued(p))
+ rq = move_queued_task(rq, rf, p, dest_cpu);
+
+- p->migration_pending = NULL;
+- complete = true;
++ if (!pending->stop_pending) {
++ p->migration_pending = NULL;
++ complete = true;
++ }
+ }
+ task_rq_unlock(rq, p, rf);
+
+-do_complete:
+ if (complete)
+ complete_all(&pending->done);
+ }
+@@ -2347,7 +2352,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ wait_for_completion(&pending->done);
+
+ if (refcount_dec_and_test(&pending->refs))
+- wake_up_var(&pending->refs);
++ wake_up_var(&pending->refs); /* No UaF, just an address */
+
+ /*
+ * Block the original owner of &pending until all subsequent callers
+@@ -2355,6 +2360,9 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ */
+ wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
+
++ /* ARGH */
++ WARN_ON_ONCE(my_pending.stop_pending);
++
+ return 0;
+ }
+
+--
+2.43.0
+