diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
commit | b15a952c52a6825376d3e7f6c1bf5c886c6d8b74 (patch) | |
tree | 1500f2f8f276908a36d8126cb632c0d6b1276764 /debian/patches-rt/0149-softirq-Make-softirq-control-and-processing-RT-aware.patch | |
parent | Adding upstream version 5.10.209. (diff) | |
download | linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.tar.xz linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.zip |
Adding debian version 5.10.209-2.debian/5.10.209-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | debian/patches-rt/0149-softirq-Make-softirq-control-and-processing-RT-aware.patch | 267 |
1 files changed, 267 insertions, 0 deletions
diff --git a/debian/patches-rt/0149-softirq-Make-softirq-control-and-processing-RT-aware.patch b/debian/patches-rt/0149-softirq-Make-softirq-control-and-processing-RT-aware.patch new file mode 100644 index 000000000..22a88a79c --- /dev/null +++ b/debian/patches-rt/0149-softirq-Make-softirq-control-and-processing-RT-aware.patch @@ -0,0 +1,267 @@ +From d2c5f9de8094418b5ebade0ff54219371ec0a5ef Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Tue, 9 Mar 2021 09:55:56 +0100 +Subject: [PATCH 149/323] softirq: Make softirq control and processing RT aware +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +Provide a local lock based serialization for soft interrupts on RT which +allows the local_bh_disabled() sections and servicing soft interrupts to be +preemptible. + +Provide the necessary inline helpers which allow to reuse the bulk of the +softirq processing code. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Reviewed-by: Frederic Weisbecker <frederic@kernel.org> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + include/linux/bottom_half.h | 2 +- + kernel/softirq.c | 188 ++++++++++++++++++++++++++++++++++-- + 2 files changed, 182 insertions(+), 8 deletions(-) + +diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h +index a19519f4241d..e4dd613a070e 100644 +--- a/include/linux/bottom_half.h ++++ b/include/linux/bottom_half.h +@@ -4,7 +4,7 @@ + + #include <linux/preempt.h> + +-#ifdef CONFIG_TRACE_IRQFLAGS ++#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS) + extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); + #else + static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) +diff --git a/kernel/softirq.c b/kernel/softirq.c +index 87fac6ac0c32..ed13f6097de8 100644 +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -13,6 +13,7 @@ + #include <linux/kernel_stat.h> + #include <linux/interrupt.h> + #include <linux/init.h> ++#include <linux/local_lock.h> + #include <linux/mm.h> + #include <linux/notifier.h> + #include <linux/percpu.h> +@@ -101,20 +102,189 @@ EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context); + #endif + + /* +- * preempt_count and SOFTIRQ_OFFSET usage: +- * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving +- * softirq processing. +- * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) ++ * SOFTIRQ_OFFSET usage: ++ * ++ * On !RT kernels 'count' is the preempt counter, on RT kernels this applies ++ * to a per CPU counter and to task::softirqs_disabled_cnt. ++ * ++ * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq ++ * processing. ++ * ++ * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) + * on local_bh_disable or local_bh_enable. ++ * + * This lets us distinguish between whether we are currently processing + * softirq and whether we just have bh disabled. + */ ++#ifdef CONFIG_PREEMPT_RT + +-#ifdef CONFIG_TRACE_IRQFLAGS + /* +- * This is for softirq.c-internal use, where hardirqs are disabled ++ * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and ++ * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a ++ * softirq disabled section to be preempted. ++ * ++ * The per task counter is used for softirq_count(), in_softirq() and ++ * in_serving_softirqs() because these counts are only valid when the task ++ * holding softirq_ctrl::lock is running. ++ * ++ * The per CPU counter prevents pointless wakeups of ksoftirqd in case that ++ * the task which is in a softirq disabled section is preempted or blocks. ++ */ ++struct softirq_ctrl { ++ local_lock_t lock; ++ int cnt; ++}; ++ ++static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = { ++ .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock), ++}; ++ ++void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) ++{ ++ unsigned long flags; ++ int newcnt; ++ ++ WARN_ON_ONCE(in_hardirq()); ++ ++ /* First entry of a task into a BH disabled section? */ ++ if (!current->softirq_disable_cnt) { ++ if (preemptible()) { ++ local_lock(&softirq_ctrl.lock); ++ /* Required to meet the RCU bottomhalf requirements. */ ++ rcu_read_lock(); ++ } else { ++ DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt)); ++ } ++ } ++ ++ /* ++ * Track the per CPU softirq disabled state. On RT this is per CPU ++ * state to allow preemption of bottom half disabled sections. ++ */ ++ newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt); ++ /* ++ * Reflect the result in the task state to prevent recursion on the ++ * local lock and to make softirq_count() & al work. ++ */ ++ current->softirq_disable_cnt = newcnt; ++ ++ if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) { ++ raw_local_irq_save(flags); ++ lockdep_softirqs_off(ip); ++ raw_local_irq_restore(flags); ++ } ++} ++EXPORT_SYMBOL(__local_bh_disable_ip); ++ ++static void __local_bh_enable(unsigned int cnt, bool unlock) ++{ ++ unsigned long flags; ++ int newcnt; ++ ++ DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt != ++ this_cpu_read(softirq_ctrl.cnt)); ++ ++ if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) { ++ raw_local_irq_save(flags); ++ lockdep_softirqs_on(_RET_IP_); ++ raw_local_irq_restore(flags); ++ } ++ ++ newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt); ++ current->softirq_disable_cnt = newcnt; ++ ++ if (!newcnt && unlock) { ++ rcu_read_unlock(); ++ local_unlock(&softirq_ctrl.lock); ++ } ++} ++ ++void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) ++{ ++ bool preempt_on = preemptible(); ++ unsigned long flags; ++ u32 pending; ++ int curcnt; ++ ++ WARN_ON_ONCE(in_irq()); ++ lockdep_assert_irqs_enabled(); ++ ++ local_irq_save(flags); ++ curcnt = __this_cpu_read(softirq_ctrl.cnt); ++ ++ /* ++ * If this is not reenabling soft interrupts, no point in trying to ++ * run pending ones. ++ */ ++ if (curcnt != cnt) ++ goto out; ++ ++ pending = local_softirq_pending(); ++ if (!pending || ksoftirqd_running(pending)) ++ goto out; ++ ++ /* ++ * If this was called from non preemptible context, wake up the ++ * softirq daemon. ++ */ ++ if (!preempt_on) { ++ wakeup_softirqd(); ++ goto out; ++ } ++ ++ /* ++ * Adjust softirq count to SOFTIRQ_OFFSET which makes ++ * in_serving_softirq() become true. ++ */ ++ cnt = SOFTIRQ_OFFSET; ++ __local_bh_enable(cnt, false); ++ __do_softirq(); ++ ++out: ++ __local_bh_enable(cnt, preempt_on); ++ local_irq_restore(flags); ++} ++EXPORT_SYMBOL(__local_bh_enable_ip); ++ ++/* ++ * Invoked from ksoftirqd_run() outside of the interrupt disabled section ++ * to acquire the per CPU local lock for reentrancy protection. ++ */ ++static inline void ksoftirqd_run_begin(void) ++{ ++ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); ++ local_irq_disable(); ++} ++ ++/* Counterpart to ksoftirqd_run_begin() */ ++static inline void ksoftirqd_run_end(void) ++{ ++ __local_bh_enable(SOFTIRQ_OFFSET, true); ++ WARN_ON_ONCE(in_interrupt()); ++ local_irq_enable(); ++} ++ ++static inline void softirq_handle_begin(void) { } ++static inline void softirq_handle_end(void) { } ++ ++static inline bool should_wake_ksoftirqd(void) ++{ ++ return !this_cpu_read(softirq_ctrl.cnt); ++} ++ ++static inline void invoke_softirq(void) ++{ ++ if (should_wake_ksoftirqd()) ++ wakeup_softirqd(); ++} ++ ++#else /* CONFIG_PREEMPT_RT */ ++ ++/* ++ * This one is for softirq.c-internal use, where hardirqs are disabled + * legitimately: + */ ++#ifdef CONFIG_TRACE_IRQFLAGS + void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) + { + unsigned long flags; +@@ -275,6 +445,8 @@ asmlinkage __visible void do_softirq(void) + local_irq_restore(flags); + } + ++#endif /* !CONFIG_PREEMPT_RT */ ++ + /* + * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, + * but break the loop if need_resched() is set or after 2 ms. +@@ -379,8 +551,10 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) + pending >>= softirq_bit; + } + +- if (__this_cpu_read(ksoftirqd) == current) ++ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ++ __this_cpu_read(ksoftirqd) == current) + rcu_softirq_qs(); ++ + local_irq_disable(); + + pending = local_softirq_pending(); +-- +2.43.0 + |