summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0150-tick-sched-Prevent-false-positive-softirq-pending-wa.patch
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--debian/patches-rt/0150-tick-sched-Prevent-false-positive-softirq-pending-wa.patch84
1 files changed, 84 insertions, 0 deletions
diff --git a/debian/patches-rt/0150-tick-sched-Prevent-false-positive-softirq-pending-wa.patch b/debian/patches-rt/0150-tick-sched-Prevent-false-positive-softirq-pending-wa.patch
new file mode 100644
index 000000000..179b20d7b
--- /dev/null
+++ b/debian/patches-rt/0150-tick-sched-Prevent-false-positive-softirq-pending-wa.patch
@@ -0,0 +1,84 @@
+From 8f4c53c804fbc30a305bf13376f5748a55ec4944 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Mar 2021 09:55:57 +0100
+Subject: [PATCH 150/323] tick/sched: Prevent false positive softirq pending
+ warnings on RT
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+On RT a task which has soft interrupts disabled can block on a lock and
+schedule out to idle while soft interrupts are pending. This triggers the
+warning in the NOHZ idle code which complains about going idle with pending
+soft interrupts. But as the task is blocked soft interrupt processing is
+temporarily blocked as well which means that such a warning is a false
+positive.
+
+To prevent that check the per CPU state which indicates that a scheduled
+out task has soft interrupts disabled.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/bottom_half.h | 6 ++++++
+ kernel/softirq.c | 15 +++++++++++++++
+ kernel/time/tick-sched.c | 2 +-
+ 3 files changed, 22 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
+index e4dd613a070e..eed86eb0a1de 100644
+--- a/include/linux/bottom_half.h
++++ b/include/linux/bottom_half.h
+@@ -32,4 +32,10 @@ static inline void local_bh_enable(void)
+ __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
+ }
+
++#ifdef CONFIG_PREEMPT_RT
++extern bool local_bh_blocked(void);
++#else
++static inline bool local_bh_blocked(void) { return false; }
++#endif
++
+ #endif /* _LINUX_BH_H */
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index ed13f6097de8..c9adc5c46248 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -139,6 +139,21 @@ static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
+ .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
+ };
+
++/**
++ * local_bh_blocked() - Check for idle whether BH processing is blocked
++ *
++ * Returns false if the per CPU softirq::cnt is 0 otherwise true.
++ *
++ * This is invoked from the idle task to guard against false positive
++ * softirq pending warnings, which would happen when the task which holds
++ * softirq_ctrl::lock was the only running task on the CPU and blocks on
++ * some other lock.
++ */
++bool local_bh_blocked(void)
++{
++ return __this_cpu_read(softirq_ctrl.cnt) != 0;
++}
++
+ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+ {
+ unsigned long flags;
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index fc79b04b5947..fb10be8e5e92 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -982,7 +982,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
+ if (unlikely(local_softirq_pending())) {
+ static int ratelimit;
+
+- if (ratelimit < 10 &&
++ if (ratelimit < 10 && !local_bh_blocked() &&
+ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
+ pr_warn("NOHZ tick-stop error: Non-RCU local softirq work is pending, handler #%02x!!!\n",
+ (unsigned int) local_softirq_pending());
+--
+2.43.0
+