1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
|
From: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Date: Wed, 5 Oct 2011 11:45:18 -0700
Subject: [PATCH 159/342] rcu: Make ksoftirqd do RCU quiescent states
Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=910ed8de1b690c46028f322611f08647be22a46e
Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable
to network-based denial-of-service attacks. This patch therefore
makes __do_softirq() invoke rcu_bh_qs(), but only when __do_softirq()
is running in ksoftirqd context. A wrapper layer in interposed so that
other calls to __do_softirq() avoid invoking rcu_bh_qs(). The underlying
function __do_softirq_common() does the actual work.
The reason that rcu_bh_qs() is bad in these non-ksoftirqd contexts is
that there might be a local_bh_enable() inside an RCU-preempt read-side
critical section. This local_bh_enable() can invoke __do_softirq()
directly, so if __do_softirq() were to invoke rcu_bh_qs() (which just
calls rcu_preempt_qs() in the PREEMPT_RT_FULL case), there would be
an illegal RCU-preempt quiescent state in the middle of an RCU-preempt
read-side critical section. Therefore, quiescent states can only happen
in cases where __do_softirq() is invoked directly from ksoftirqd.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/20111005184518.GA21601@linux.vnet.ibm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/rcu/tree.c | 18 +++++++++++++-----
kernel/rcu/tree_plugin.h | 8 +++++++-
2 files changed, 20 insertions(+), 6 deletions(-)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 1456a3d97971..1a40e3d44cb8 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -244,7 +244,19 @@ void rcu_sched_qs(void)
this_cpu_ptr(&rcu_sched_data), true);
}
-#ifndef CONFIG_PREEMPT_RT_FULL
+#ifdef CONFIG_PREEMPT_RT_FULL
+static void rcu_preempt_qs(void);
+
+void rcu_bh_qs(void)
+{
+ unsigned long flags;
+
+ /* Callers to this function, rcu_preempt_qs(), must disable irqs. */
+ local_irq_save(flags);
+ rcu_preempt_qs();
+ local_irq_restore(flags);
+}
+#else
void rcu_bh_qs(void)
{
RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
@@ -255,10 +267,6 @@ void rcu_bh_qs(void)
__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
}
}
-#else
-void rcu_bh_qs(void)
-{
-}
#endif
/*
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index e9ce51e19e87..938e64c69d18 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -29,6 +29,7 @@
#include <linux/oom.h>
#include <linux/sched/debug.h>
#include <linux/smpboot.h>
+#include <linux/jiffies.h>
#include <linux/sched/isolation.h>
#include <uapi/linux/sched/types.h>
#include "../time/tick-internal.h"
@@ -1408,7 +1409,7 @@ static void rcu_prepare_kthreads(int cpu)
#endif /* #else #ifdef CONFIG_RCU_BOOST */
-#if !defined(CONFIG_RCU_FAST_NO_HZ)
+#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL)
/*
* Check to see if any future RCU-related work will need to be done
@@ -1424,7 +1425,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
*nextevt = KTIME_MAX;
return rcu_cpu_has_callbacks(NULL);
}
+#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
+#if !defined(CONFIG_RCU_FAST_NO_HZ)
/*
* Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
* after it.
@@ -1521,6 +1524,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
return cbs_ready;
}
+#ifndef CONFIG_PREEMPT_RT_FULL
+
/*
* Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
* to invoke. If the CPU has callbacks, try to advance them. Tell the
@@ -1563,6 +1568,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
*nextevt = basemono + dj * TICK_NSEC;
return 0;
}
+#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
/*
* Prepare a CPU for idle from an RCU perspective. The first major task
|