summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0124-softirq-Move-related-code-into-one-section.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:06:00 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:06:00 +0000
commitb15a952c52a6825376d3e7f6c1bf5c886c6d8b74 (patch)
tree1500f2f8f276908a36d8126cb632c0d6b1276764 /debian/patches-rt/0124-softirq-Move-related-code-into-one-section.patch
parentAdding upstream version 5.10.209. (diff)
downloadlinux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.tar.xz
linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.zip
Adding debian version 5.10.209-2.debian/5.10.209-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0124-softirq-Move-related-code-into-one-section.patch')
-rw-r--r--debian/patches-rt/0124-softirq-Move-related-code-into-one-section.patch169
1 files changed, 169 insertions, 0 deletions
diff --git a/debian/patches-rt/0124-softirq-Move-related-code-into-one-section.patch b/debian/patches-rt/0124-softirq-Move-related-code-into-one-section.patch
new file mode 100644
index 000000000..fb937d24b
--- /dev/null
+++ b/debian/patches-rt/0124-softirq-Move-related-code-into-one-section.patch
@@ -0,0 +1,169 @@
+From e40ca378bfbd05514961947d57b163831bdc3bbe Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Nov 2020 15:02:18 +0100
+Subject: [PATCH 124/323] softirq: Move related code into one section
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+To prepare for adding a RT aware variant of softirq serialization and
+processing move related code into one section so the necessary #ifdeffery
+is reduced to one.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Link: https://lore.kernel.org/r/20201113141733.974214480@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/softirq.c | 107 ++++++++++++++++++++++++-----------------------
+ 1 file changed, 54 insertions(+), 53 deletions(-)
+
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 09229ad82209..617009ccd82c 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -92,6 +92,13 @@ static bool ksoftirqd_running(unsigned long pending)
+ !__kthread_should_park(tsk);
+ }
+
++#ifdef CONFIG_TRACE_IRQFLAGS
++DEFINE_PER_CPU(int, hardirqs_enabled);
++DEFINE_PER_CPU(int, hardirq_context);
++EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
++EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
++#endif
++
+ /*
+ * preempt_count and SOFTIRQ_OFFSET usage:
+ * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+@@ -102,17 +109,11 @@ static bool ksoftirqd_running(unsigned long pending)
+ * softirq and whether we just have bh disabled.
+ */
+
++#ifdef CONFIG_TRACE_IRQFLAGS
+ /*
+- * This one is for softirq.c-internal use,
+- * where hardirqs are disabled legitimately:
++ * This is for softirq.c-internal use, where hardirqs are disabled
++ * legitimately:
+ */
+-#ifdef CONFIG_TRACE_IRQFLAGS
+-
+-DEFINE_PER_CPU(int, hardirqs_enabled);
+-DEFINE_PER_CPU(int, hardirq_context);
+-EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
+-EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
+-
+ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+ {
+ unsigned long flags;
+@@ -203,6 +204,50 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
+ }
+ EXPORT_SYMBOL(__local_bh_enable_ip);
+
++static inline void invoke_softirq(void)
++{
++ if (ksoftirqd_running(local_softirq_pending()))
++ return;
++
++ if (!force_irqthreads) {
++#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
++ /*
++ * We can safely execute softirq on the current stack if
++ * it is the irq stack, because it should be near empty
++ * at this stage.
++ */
++ __do_softirq();
++#else
++ /*
++ * Otherwise, irq_exit() is called on the task stack that can
++ * be potentially deep already. So call softirq in its own stack
++ * to prevent from any overrun.
++ */
++ do_softirq_own_stack();
++#endif
++ } else {
++ wakeup_softirqd();
++ }
++}
++
++asmlinkage __visible void do_softirq(void)
++{
++ __u32 pending;
++ unsigned long flags;
++
++ if (in_interrupt())
++ return;
++
++ local_irq_save(flags);
++
++ pending = local_softirq_pending();
++
++ if (pending && !ksoftirqd_running(pending))
++ do_softirq_own_stack();
++
++ local_irq_restore(flags);
++}
++
+ /*
+ * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
+ * but break the loop if need_resched() is set or after 2 ms.
+@@ -327,24 +372,6 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
+ current_restore_flags(old_flags, PF_MEMALLOC);
+ }
+
+-asmlinkage __visible void do_softirq(void)
+-{
+- __u32 pending;
+- unsigned long flags;
+-
+- if (in_interrupt())
+- return;
+-
+- local_irq_save(flags);
+-
+- pending = local_softirq_pending();
+-
+- if (pending && !ksoftirqd_running(pending))
+- do_softirq_own_stack();
+-
+- local_irq_restore(flags);
+-}
+-
+ /**
+ * irq_enter_rcu - Enter an interrupt context with RCU watching
+ */
+@@ -371,32 +398,6 @@ void irq_enter(void)
+ irq_enter_rcu();
+ }
+
+-static inline void invoke_softirq(void)
+-{
+- if (ksoftirqd_running(local_softirq_pending()))
+- return;
+-
+- if (!force_irqthreads) {
+-#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
+- /*
+- * We can safely execute softirq on the current stack if
+- * it is the irq stack, because it should be near empty
+- * at this stage.
+- */
+- __do_softirq();
+-#else
+- /*
+- * Otherwise, irq_exit() is called on the task stack that can
+- * be potentially deep already. So call softirq in its own stack
+- * to prevent from any overrun.
+- */
+- do_softirq_own_stack();
+-#endif
+- } else {
+- wakeup_softirqd();
+- }
+-}
+-
+ static inline void tick_irq_exit(void)
+ {
+ #ifdef CONFIG_NO_HZ_COMMON
+--
+2.43.0
+