summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0306-sched-migrate_enable-Use-select_fallback_rq.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0306-sched-migrate_enable-Use-select_fallback_rq.patch')
-rw-r--r--debian/patches-rt/0306-sched-migrate_enable-Use-select_fallback_rq.patch62
1 files changed, 62 insertions, 0 deletions
diff --git a/debian/patches-rt/0306-sched-migrate_enable-Use-select_fallback_rq.patch b/debian/patches-rt/0306-sched-migrate_enable-Use-select_fallback_rq.patch
new file mode 100644
index 000000000..8177c1f0a
--- /dev/null
+++ b/debian/patches-rt/0306-sched-migrate_enable-Use-select_fallback_rq.patch
@@ -0,0 +1,62 @@
+From: Scott Wood <swood@redhat.com>
+Date: Sat, 12 Oct 2019 01:52:12 -0500
+Subject: [PATCH 306/342] sched: migrate_enable: Use select_fallback_rq()
+Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b19de5d2c7678334eef4c5e764fcbe08f4fce1f6
+
+[ Upstream commit adfa969d4cfcc995a9d866020124e50f1827d2d1 ]
+
+migrate_enable() currently open-codes a variant of select_fallback_rq().
+However, it does not have the "No more Mr. Nice Guy" fallback and thus
+it will pass an invalid CPU to the migration thread if cpus_mask only
+contains a CPU that is !active.
+
+Signed-off-by: Scott Wood <swood@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+---
+ kernel/sched/core.c | 25 ++++++++++---------------
+ 1 file changed, 10 insertions(+), 15 deletions(-)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 6f91ee2b7e91..7eb3037c0b35 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7329,6 +7329,7 @@ void migrate_enable(void)
+ if (p->migrate_disable_update) {
+ struct rq *rq;
+ struct rq_flags rf;
++ int cpu = task_cpu(p);
+
+ rq = task_rq_lock(p, &rf);
+ update_rq_clock(rq);
+@@ -7338,21 +7339,15 @@ void migrate_enable(void)
+
+ p->migrate_disable_update = 0;
+
+- WARN_ON(smp_processor_id() != task_cpu(p));
+- if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
+- const struct cpumask *cpu_valid_mask = cpu_active_mask;
+- struct migration_arg arg;
+- unsigned int dest_cpu;
+-
+- if (p->flags & PF_KTHREAD) {
+- /*
+- * Kernel threads are allowed on online && !active CPUs
+- */
+- cpu_valid_mask = cpu_online_mask;
+- }
+- dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_mask);
+- arg.task = p;
+- arg.dest_cpu = dest_cpu;
++ WARN_ON(smp_processor_id() != cpu);
++ if (!cpumask_test_cpu(cpu, &p->cpus_mask)) {
++ struct migration_arg arg = { p };
++ struct rq_flags rf;
++
++ rq = task_rq_lock(p, &rf);
++ update_rq_clock(rq);
++ arg.dest_cpu = select_fallback_rq(cpu, p);
++ task_rq_unlock(rq, p, &rf);
+
+ unpin_current_cpu();
+ preempt_lazy_enable();