summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch60
1 files changed, 60 insertions, 0 deletions
diff --git a/debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch b/debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch
new file mode 100644
index 000000000..f8cddfa44
--- /dev/null
+++ b/debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch
@@ -0,0 +1,60 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 4 Aug 2023 13:30:38 +0200
+Subject: [PATCH 2/3] softirq: Add function to preempt serving softirqs.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Add a functionality for the softirq handler to preempt its current work
+if needed. The softirq core has no particular state. It reads and resets
+the pending softirq bits and then processes one after the other.
+It can already be preempted while it invokes a certain softirq handler.
+
+By enabling the BH the softirq core releases the per-CPU bh lock which
+serializes all softirq handler. It is safe to do as long as the code
+does not expect any serialisation in between. A typical scenarion would
+after the invocation of callback where no state needs to be preserved
+before the next callback is invoked.
+
+Add functionaliry to preempt the serving softirqs.
+
+Link: https://lore.kernel.org/r/20230804113039.419794-3-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/bottom_half.h | 2 ++
+ kernel/softirq.c | 13 +++++++++++++
+ 2 files changed, 15 insertions(+)
+
+--- a/include/linux/bottom_half.h
++++ b/include/linux/bottom_half.h
+@@ -35,8 +35,10 @@ static inline void local_bh_enable(void)
+
+ #ifdef CONFIG_PREEMPT_RT
+ extern bool local_bh_blocked(void);
++extern void softirq_preempt(void);
+ #else
+ static inline bool local_bh_blocked(void) { return false; }
++static inline void softirq_preempt(void) { }
+ #endif
+
+ #endif /* _LINUX_BH_H */
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -247,6 +247,19 @@ void __local_bh_enable_ip(unsigned long
+ }
+ EXPORT_SYMBOL(__local_bh_enable_ip);
+
++void softirq_preempt(void)
++{
++ if (WARN_ON_ONCE(!preemptible()))
++ return;
++
++ if (WARN_ON_ONCE(__this_cpu_read(softirq_ctrl.cnt) != SOFTIRQ_OFFSET))
++ return;
++
++ __local_bh_enable(SOFTIRQ_OFFSET, true);
++ /* preemption point */
++ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
++}
++
+ /*
+ * Invoked from ksoftirqd_run() outside of the interrupt disabled section
+ * to acquire the per CPU local lock for reentrancy protection.