1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
|
From e85e95b0e59b7d493402419e3a63a06978a06452 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 5 Jul 2018 14:44:51 +0200
Subject: [PATCH 155/347] sched/migrate_disable: fallback to preempt_disable()
instead barrier()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
On SMP + !RT migrate_disable() is still around. It is not part of spin_lock()
anymore so it has almost no users. However the futex code has a workaround for
the !in_atomic() part of migrate disable which fails because the matching
migrade_disable() is no longer part of spin_lock().
On !SMP + !RT migrate_disable() is reduced to barrier(). This is not optimal
because we few spots where a "preempt_disable()" statement was replaced with
"migrate_disable()".
We also used the migration_disable counter to figure out if a sleeping lock is
acquired so RCU does not complain about schedule() during rcu_read_lock() while
a sleeping lock is held. This changed, we no longer use it, we have now a
sleeping_lock counter for the RCU purpose.
This means we can now:
- for SMP + RT_BASE
full migration program, nothing changes here
- for !SMP + RT_BASE
the migration counting is no longer required. It used to ensure that the task
is not migrated to another CPU and that this CPU remains online. !SMP ensures
that already.
Move it to CONFIG_SCHED_DEBUG so the counting is done for debugging purpose
only.
- for all other cases including !RT
fallback to preempt_disable(). The only remaining users of migrate_disable()
are those which were converted from preempt_disable() and the futex
workaround which is already in the preempt_disable() section due to the
spin_lock that is held.
Cc: stable-rt@vger.kernel.org
Reported-by: joe.korty@concurrent-rt.com
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/preempt.h | 6 +++---
include/linux/sched.h | 4 ++--
kernel/sched/core.c | 23 +++++++++++------------
kernel/sched/debug.c | 2 +-
4 files changed, 17 insertions(+), 18 deletions(-)
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 9eafc34898b4..ed8413e7140f 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -204,7 +204,7 @@ do { \
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
extern void migrate_disable(void);
extern void migrate_enable(void);
@@ -221,8 +221,8 @@ static inline int __migrate_disabled(struct task_struct *p)
}
#else
-#define migrate_disable() barrier()
-#define migrate_enable() barrier()
+#define migrate_disable() preempt_disable()
+#define migrate_enable() preempt_enable()
static inline int __migrate_disabled(struct task_struct *p)
{
return 0;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4f8fcba23c1d..ceb3bdfb6bc4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -667,7 +667,7 @@ struct task_struct {
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
int migrate_disable;
int migrate_disable_update;
# ifdef CONFIG_SCHED_DEBUG
@@ -675,8 +675,8 @@ struct task_struct {
# endif
#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
- int migrate_disable;
# ifdef CONFIG_SCHED_DEBUG
+ int migrate_disable;
int migrate_disable_atomic;
# endif
#endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 555dea10764e..0c44c16244a1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1031,7 +1031,7 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
int __migrate_disabled(struct task_struct *p)
{
return p->migrate_disable;
@@ -1071,7 +1071,7 @@ static void __do_set_cpus_allowed_tail(struct task_struct *p,
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
if (__migrate_disabled(p)) {
lockdep_assert_held(&p->pi_lock);
@@ -1145,7 +1145,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
goto out;
-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
if (__migrate_disabled(p)) {
p->migrate_disable_update = 1;
goto out;
@@ -7204,7 +7204,7 @@ const u32 sched_prio_to_wmult[40] = {
#undef CREATE_TRACE_POINTS
-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
static inline void
update_nr_migratory(struct task_struct *p, long delta)
@@ -7352,45 +7352,44 @@ EXPORT_SYMBOL(migrate_enable);
#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
void migrate_disable(void)
{
+#ifdef CONFIG_SCHED_DEBUG
struct task_struct *p = current;
if (in_atomic() || irqs_disabled()) {
-#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic++;
-#endif
return;
}
-#ifdef CONFIG_SCHED_DEBUG
+
if (unlikely(p->migrate_disable_atomic)) {
tracing_off();
WARN_ON_ONCE(1);
}
-#endif
p->migrate_disable++;
+#endif
+ barrier();
}
EXPORT_SYMBOL(migrate_disable);
void migrate_enable(void)
{
+#ifdef CONFIG_SCHED_DEBUG
struct task_struct *p = current;
if (in_atomic() || irqs_disabled()) {
-#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic--;
-#endif
return;
}
-#ifdef CONFIG_SCHED_DEBUG
if (unlikely(p->migrate_disable_atomic)) {
tracing_off();
WARN_ON_ONCE(1);
}
-#endif
WARN_ON_ONCE(p->migrate_disable <= 0);
p->migrate_disable--;
+#endif
+ barrier();
}
EXPORT_SYMBOL(migrate_enable);
#endif
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index dc3f3e6fa0bd..169b98c12da7 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -988,7 +988,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(dl.runtime);
P(dl.deadline);
}
-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
P(migrate_disable);
#endif
P(nr_cpus_allowed);
--
2.36.1
|