blob: 0d230fa20be35d8a2b2d10998195ebdf5a1cb302 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
|
From: Scott Wood <swood@redhat.com>
Date: Tue, 24 Sep 2019 14:36:41 +0200
Subject: [PATCH 297/354] =?UTF-8?q?sched:=20migrate=5Fdis/enable:=20Use=20?=
=?UTF-8?q?sleeping=5Flock=E2=80=A6()=20to=20annotate=20sleeping=20points?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f1a7c6b6fc4b40b5f8abbf40f75ba741feb4d95a
[ Upstream commit 4230dd3824c3e1785504e6f757ce79a4b55651fa ]
Without this, rcu_note_context_switch() will complain if an RCU read lock
is held when migrate_enable() calls stop_one_cpu(). Likewise when
migrate_disable() calls pin_current_cpu() which calls __read_rt_lock() --
which bypasses the part of the mutex code that calls sleeping_lock_inc().
Signed-off-by: Scott Wood <swood@redhat.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
[bigeasy: use sleeping_lock_…() ]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/cpu.c | 2 ++
kernel/sched/core.c | 3 +++
2 files changed, 5 insertions(+)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 9547c5bcbbc4..e0b8387ac924 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -310,7 +310,9 @@ void pin_current_cpu(void)
preempt_lazy_enable();
preempt_enable();
+ sleeping_lock_inc();
__read_rt_lock(cpuhp_pin);
+ sleeping_lock_dec();
preempt_disable();
preempt_lazy_disable();
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index dded9ca916eb..223a5a23834a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7369,7 +7369,10 @@ void migrate_enable(void)
unpin_current_cpu();
preempt_lazy_enable();
preempt_enable();
+
+ sleeping_lock_inc();
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
+ sleeping_lock_dec();
tlb_migrate_finish(p->mm);
return;
|