1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
|
From: Scott Wood <swood@redhat.com>
Date: Sat, 12 Oct 2019 01:52:12 -0500
Subject: [PATCH 306/353] sched: migrate_enable: Use select_fallback_rq()
Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=d5eebc90b5a02e2cbf415e6b056bd92cee740473
[ Upstream commit adfa969d4cfcc995a9d866020124e50f1827d2d1 ]
migrate_enable() currently open-codes a variant of select_fallback_rq().
However, it does not have the "No more Mr. Nice Guy" fallback and thus
it will pass an invalid CPU to the migration thread if cpus_mask only
contains a CPU that is !active.
Signed-off-by: Scott Wood <swood@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
---
kernel/sched/core.c | 25 ++++++++++---------------
1 file changed, 10 insertions(+), 15 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0f2c20111c09..8cfb469b23a6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7332,6 +7332,7 @@ void migrate_enable(void)
if (p->migrate_disable_update) {
struct rq *rq;
struct rq_flags rf;
+ int cpu = task_cpu(p);
rq = task_rq_lock(p, &rf);
update_rq_clock(rq);
@@ -7341,21 +7342,15 @@ void migrate_enable(void)
p->migrate_disable_update = 0;
- WARN_ON(smp_processor_id() != task_cpu(p));
- if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
- const struct cpumask *cpu_valid_mask = cpu_active_mask;
- struct migration_arg arg;
- unsigned int dest_cpu;
-
- if (p->flags & PF_KTHREAD) {
- /*
- * Kernel threads are allowed on online && !active CPUs
- */
- cpu_valid_mask = cpu_online_mask;
- }
- dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_mask);
- arg.task = p;
- arg.dest_cpu = dest_cpu;
+ WARN_ON(smp_processor_id() != cpu);
+ if (!cpumask_test_cpu(cpu, &p->cpus_mask)) {
+ struct migration_arg arg = { p };
+ struct rq_flags rf;
+
+ rq = task_rq_lock(p, &rf);
+ update_rq_clock(rq);
+ arg.dest_cpu = select_fallback_rq(cpu, p);
+ task_rq_unlock(rq, p, &rf);
unpin_current_cpu();
preempt_lazy_enable();
|