summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0316-sched-migrate_enable-Use-per-cpu-cpu_stop_work.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0316-sched-migrate_enable-Use-per-cpu-cpu_stop_work.patch')
-rw-r--r--debian/patches-rt/0316-sched-migrate_enable-Use-per-cpu-cpu_stop_work.patch83
1 files changed, 83 insertions, 0 deletions
diff --git a/debian/patches-rt/0316-sched-migrate_enable-Use-per-cpu-cpu_stop_work.patch b/debian/patches-rt/0316-sched-migrate_enable-Use-per-cpu-cpu_stop_work.patch
new file mode 100644
index 000000000..fcff1b8a4
--- /dev/null
+++ b/debian/patches-rt/0316-sched-migrate_enable-Use-per-cpu-cpu_stop_work.patch
@@ -0,0 +1,83 @@
+From 5595f7a45953987f56b3e64c3a20f6f90735d4a1 Mon Sep 17 00:00:00 2001
+From: Scott Wood <swood@redhat.com>
+Date: Fri, 24 Jan 2020 06:11:46 -0500
+Subject: [PATCH 316/347] sched: migrate_enable: Use per-cpu cpu_stop_work
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+[ Upstream commit 2dcd94b443c5dcbc20281666321b7f025f9cc85c ]
+
+Commit e6c287b1512d ("sched: migrate_enable: Use stop_one_cpu_nowait()")
+adds a busy wait to deal with an edge case where the migrated thread
+can resume running on another CPU before the stopper has consumed
+cpu_stop_work. However, this is done with preemption disabled and can
+potentially lead to deadlock.
+
+While it is not guaranteed that the cpu_stop_work will be consumed before
+the migrating thread resumes and exits the stack frame, it is guaranteed
+that nothing other than the stopper can run on the old cpu between the
+migrating thread scheduling out and the cpu_stop_work being consumed.
+Thus, we can store cpu_stop_work in per-cpu data without it being
+reused too early.
+
+Fixes: e6c287b1512d ("sched: migrate_enable: Use stop_one_cpu_nowait()")
+Suggested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Scott Wood <swood@redhat.com>
+Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+---
+ kernel/sched/core.c | 22 ++++++++++++++--------
+ 1 file changed, 14 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index d190d9e6b10f..8b946353b1d7 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7285,6 +7285,9 @@ static void migrate_disabled_sched(struct task_struct *p)
+ p->migrate_disable_scheduled = 1;
+ }
+
++static DEFINE_PER_CPU(struct cpu_stop_work, migrate_work);
++static DEFINE_PER_CPU(struct migration_arg, migrate_arg);
++
+ void migrate_enable(void)
+ {
+ struct task_struct *p = current;
+@@ -7323,23 +7326,26 @@ void migrate_enable(void)
+
+ WARN_ON(smp_processor_id() != cpu);
+ if (!is_cpu_allowed(p, cpu)) {
+- struct migration_arg arg = { .task = p };
+- struct cpu_stop_work work;
++ struct migration_arg __percpu *arg;
++ struct cpu_stop_work __percpu *work;
+ struct rq_flags rf;
+
++ work = this_cpu_ptr(&migrate_work);
++ arg = this_cpu_ptr(&migrate_arg);
++ WARN_ON_ONCE(!arg->done && !work->disabled && work->arg);
++
++ arg->task = p;
++ arg->done = false;
++
+ rq = task_rq_lock(p, &rf);
+ update_rq_clock(rq);
+- arg.dest_cpu = select_fallback_rq(cpu, p);
++ arg->dest_cpu = select_fallback_rq(cpu, p);
+ task_rq_unlock(rq, p, &rf);
+
+ stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
+- &arg, &work);
++ arg, work);
+ tlb_migrate_finish(p->mm);
+ __schedule(true);
+- if (!work.disabled) {
+- while (!arg.done)
+- cpu_relax();
+- }
+ }
+
+ out:
+--
+2.36.1
+