diff options
Diffstat (limited to '')
-rw-r--r-- | debian/patches-rt/0148-softirq-Move-various-protections-into-inline-helpers.patch | 108 |
1 files changed, 108 insertions, 0 deletions
diff --git a/debian/patches-rt/0148-softirq-Move-various-protections-into-inline-helpers.patch b/debian/patches-rt/0148-softirq-Move-various-protections-into-inline-helpers.patch new file mode 100644 index 000000000..e08794144 --- /dev/null +++ b/debian/patches-rt/0148-softirq-Move-various-protections-into-inline-helpers.patch @@ -0,0 +1,108 @@ +From 7d59f358f639cc9e535a4049d663d9347aef6380 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Tue, 9 Mar 2021 09:55:55 +0100 +Subject: [PATCH 148/323] softirq: Move various protections into inline helpers +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +To allow reuse of the bulk of softirq processing code for RT and to avoid +#ifdeffery all over the place, split protections for various code sections +out into inline helpers so the RT variant can just replace them in one go. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Reviewed-by: Frederic Weisbecker <frederic@kernel.org> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + kernel/softirq.c | 39 ++++++++++++++++++++++++++++++++------- + 1 file changed, 32 insertions(+), 7 deletions(-) + +diff --git a/kernel/softirq.c b/kernel/softirq.c +index fcb201ceed71..87fac6ac0c32 100644 +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -205,6 +205,32 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) + } + EXPORT_SYMBOL(__local_bh_enable_ip); + ++static inline void softirq_handle_begin(void) ++{ ++ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); ++} ++ ++static inline void softirq_handle_end(void) ++{ ++ __local_bh_enable(SOFTIRQ_OFFSET); ++ WARN_ON_ONCE(in_interrupt()); ++} ++ ++static inline void ksoftirqd_run_begin(void) ++{ ++ local_irq_disable(); ++} ++ ++static inline void ksoftirqd_run_end(void) ++{ ++ local_irq_enable(); ++} ++ ++static inline bool should_wake_ksoftirqd(void) ++{ ++ return true; ++} ++ + static inline void invoke_softirq(void) + { + if (ksoftirqd_running(local_softirq_pending())) +@@ -317,7 +343,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) + + pending = local_softirq_pending(); + +- __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); ++ softirq_handle_begin(); + in_hardirq = lockdep_softirq_start(); + account_softirq_enter(current); + +@@ -368,8 +394,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) + + account_softirq_exit(current); + lockdep_softirq_end(in_hardirq); +- __local_bh_enable(SOFTIRQ_OFFSET); +- WARN_ON_ONCE(in_interrupt()); ++ softirq_handle_end(); + current_restore_flags(old_flags, PF_MEMALLOC); + } + +@@ -464,7 +489,7 @@ inline void raise_softirq_irqoff(unsigned int nr) + * Otherwise we wake up ksoftirqd to make sure we + * schedule the softirq soon. + */ +- if (!in_interrupt()) ++ if (!in_interrupt() && should_wake_ksoftirqd()) + wakeup_softirqd(); + } + +@@ -692,18 +717,18 @@ static int ksoftirqd_should_run(unsigned int cpu) + + static void run_ksoftirqd(unsigned int cpu) + { +- local_irq_disable(); ++ ksoftirqd_run_begin(); + if (local_softirq_pending()) { + /* + * We can safely run softirq on inline stack, as we are not deep + * in the task stack here. + */ + __do_softirq(); +- local_irq_enable(); ++ ksoftirqd_run_end(); + cond_resched(); + return; + } +- local_irq_enable(); ++ ksoftirqd_run_end(); + } + + #ifdef CONFIG_HOTPLUG_CPU +-- +2.43.0 + |