summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0307-sched-migrate_enable-Use-select_fallback_rq.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0307-sched-migrate_enable-Use-select_fallback_rq.patch')
-rw-r--r--debian/patches-rt/0307-sched-migrate_enable-Use-select_fallback_rq.patch66
1 files changed, 66 insertions, 0 deletions
diff --git a/debian/patches-rt/0307-sched-migrate_enable-Use-select_fallback_rq.patch b/debian/patches-rt/0307-sched-migrate_enable-Use-select_fallback_rq.patch
new file mode 100644
index 000000000..6de7f0383
--- /dev/null
+++ b/debian/patches-rt/0307-sched-migrate_enable-Use-select_fallback_rq.patch
@@ -0,0 +1,66 @@
+From 86ff244b7222b5d5734b2c195ba28c7bbb5b6b0a Mon Sep 17 00:00:00 2001
+From: Scott Wood <swood@redhat.com>
+Date: Sat, 12 Oct 2019 01:52:12 -0500
+Subject: [PATCH 307/347] sched: migrate_enable: Use select_fallback_rq()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+[ Upstream commit adfa969d4cfcc995a9d866020124e50f1827d2d1 ]
+
+migrate_enable() currently open-codes a variant of select_fallback_rq().
+However, it does not have the "No more Mr. Nice Guy" fallback and thus
+it will pass an invalid CPU to the migration thread if cpus_mask only
+contains a CPU that is !active.
+
+Signed-off-by: Scott Wood <swood@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+---
+ kernel/sched/core.c | 25 ++++++++++---------------
+ 1 file changed, 10 insertions(+), 15 deletions(-)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 6f91ee2b7e91..7eb3037c0b35 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7329,6 +7329,7 @@ void migrate_enable(void)
+ if (p->migrate_disable_update) {
+ struct rq *rq;
+ struct rq_flags rf;
++ int cpu = task_cpu(p);
+
+ rq = task_rq_lock(p, &rf);
+ update_rq_clock(rq);
+@@ -7338,21 +7339,15 @@ void migrate_enable(void)
+
+ p->migrate_disable_update = 0;
+
+- WARN_ON(smp_processor_id() != task_cpu(p));
+- if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
+- const struct cpumask *cpu_valid_mask = cpu_active_mask;
+- struct migration_arg arg;
+- unsigned int dest_cpu;
+-
+- if (p->flags & PF_KTHREAD) {
+- /*
+- * Kernel threads are allowed on online && !active CPUs
+- */
+- cpu_valid_mask = cpu_online_mask;
+- }
+- dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_mask);
+- arg.task = p;
+- arg.dest_cpu = dest_cpu;
++ WARN_ON(smp_processor_id() != cpu);
++ if (!cpumask_test_cpu(cpu, &p->cpus_mask)) {
++ struct migration_arg arg = { p };
++ struct rq_flags rf;
++
++ rq = task_rq_lock(p, &rf);
++ update_rq_clock(rq);
++ arg.dest_cpu = select_fallback_rq(cpu, p);
++ task_rq_unlock(rq, p, &rf);
+
+ unpin_current_cpu();
+ preempt_lazy_enable();
+--
+2.36.1
+