summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0121-softirq-Split-softirq-locks.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
commit08b74a000942a380fe028845f92cd3a0dee827d5 (patch)
treeaa78b4e12607c3e1fcce8d5cc42df4330792f118 /debian/patches-rt/0121-softirq-Split-softirq-locks.patch
parentAdding upstream version 4.19.249. (diff)
downloadlinux-08b74a000942a380fe028845f92cd3a0dee827d5.tar.xz
linux-08b74a000942a380fe028845f92cd3a0dee827d5.zip
Adding debian version 4.19.249-2.debian/4.19.249-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0121-softirq-Split-softirq-locks.patch')
-rw-r--r--debian/patches-rt/0121-softirq-Split-softirq-locks.patch832
1 files changed, 832 insertions, 0 deletions
diff --git a/debian/patches-rt/0121-softirq-Split-softirq-locks.patch b/debian/patches-rt/0121-softirq-Split-softirq-locks.patch
new file mode 100644
index 000000000..e3aeafa1d
--- /dev/null
+++ b/debian/patches-rt/0121-softirq-Split-softirq-locks.patch
@@ -0,0 +1,832 @@
+From 4f304d0db14e45c0b23ed9ed8ee6749255dd180a Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 4 Oct 2012 14:20:47 +0100
+Subject: [PATCH 121/347] softirq: Split softirq locks
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+The 3.x RT series removed the split softirq implementation in favour
+of pushing softirq processing into the context of the thread which
+raised it. Though this prevents us from handling the various softirqs
+at different priorities. Now instead of reintroducing the split
+softirq threads we split the locks which serialize the softirq
+processing.
+
+If a softirq is raised in context of a thread, then the softirq is
+noted on a per thread field, if the thread is in a bh disabled
+region. If the softirq is raised from hard interrupt context, then the
+bit is set in the flag field of ksoftirqd and ksoftirqd is invoked.
+When a thread leaves a bh disabled region, then it tries to execute
+the softirqs which have been raised in its own context. It acquires
+the per softirq / per cpu lock for the softirq and then checks,
+whether the softirq is still pending in the per cpu
+local_softirq_pending() field. If yes, it runs the softirq. If no,
+then some other task executed it already. This allows for zero config
+softirq elevation in the context of user space tasks or interrupt
+threads.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/bottom_half.h | 34 +++
+ include/linux/interrupt.h | 15 +-
+ include/linux/preempt.h | 15 +-
+ include/linux/sched.h | 3 +
+ init/main.c | 1 +
+ kernel/softirq.c | 491 ++++++++++++++++++++++++++++++------
+ kernel/time/tick-sched.c | 9 +-
+ 7 files changed, 478 insertions(+), 90 deletions(-)
+
+diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
+index a19519f4241d..40dd5ef9c154 100644
+--- a/include/linux/bottom_half.h
++++ b/include/linux/bottom_half.h
+@@ -4,6 +4,39 @@
+
+ #include <linux/preempt.h>
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++
++extern void __local_bh_disable(void);
++extern void _local_bh_enable(void);
++extern void __local_bh_enable(void);
++
++static inline void local_bh_disable(void)
++{
++ __local_bh_disable();
++}
++
++static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
++{
++ __local_bh_disable();
++}
++
++static inline void local_bh_enable(void)
++{
++ __local_bh_enable();
++}
++
++static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
++{
++ __local_bh_enable();
++}
++
++static inline void local_bh_enable_ip(unsigned long ip)
++{
++ __local_bh_enable();
++}
++
++#else
++
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
+ #else
+@@ -31,5 +64,6 @@ static inline void local_bh_enable(void)
+ {
+ __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
+ }
++#endif
+
+ #endif /* _LINUX_BH_H */
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index eec4bec454b5..cf9860d49d57 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -497,10 +497,11 @@ struct softirq_action
+ void (*action)(struct softirq_action *);
+ };
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ asmlinkage void do_softirq(void);
+ asmlinkage void __do_softirq(void);
+-
+-#if defined(__ARCH_HAS_DO_SOFTIRQ) && !defined(CONFIG_PREEMPT_RT_FULL)
++static inline void thread_do_softirq(void) { do_softirq(); }
++#ifdef __ARCH_HAS_DO_SOFTIRQ
+ void do_softirq_own_stack(void);
+ #else
+ static inline void do_softirq_own_stack(void)
+@@ -508,6 +509,9 @@ static inline void do_softirq_own_stack(void)
+ __do_softirq();
+ }
+ #endif
++#else
++extern void thread_do_softirq(void);
++#endif
+
+ extern void open_softirq(int nr, void (*action)(struct softirq_action *));
+ extern void softirq_init(void);
+@@ -515,6 +519,7 @@ extern void __raise_softirq_irqoff(unsigned int nr);
+
+ extern void raise_softirq_irqoff(unsigned int nr);
+ extern void raise_softirq(unsigned int nr);
++extern void softirq_check_pending_idle(void);
+
+ DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+
+@@ -632,6 +637,12 @@ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
+ extern void tasklet_init(struct tasklet_struct *t,
+ void (*func)(unsigned long), unsigned long data);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++extern void softirq_early_init(void);
++#else
++static inline void softirq_early_init(void) { }
++#endif
++
+ struct tasklet_hrtimer {
+ struct hrtimer timer;
+ struct tasklet_struct tasklet;
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 9984f2b75b73..27c3176d88d2 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -51,7 +51,11 @@
+ #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+ #define NMI_OFFSET (1UL << NMI_SHIFT)
+
+-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
++#else
++# define SOFTIRQ_DISABLE_OFFSET (0)
++#endif
+
+ /* We use the MSB mostly because its available */
+ #define PREEMPT_NEED_RESCHED 0x80000000
+@@ -81,9 +85,15 @@
+ #include <asm/preempt.h>
+
+ #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+ #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+ | NMI_MASK))
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
++# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
++#else
++# define softirq_count() ((unsigned long)current->softirq_nestcnt)
++extern int in_serving_softirq(void);
++#endif
+
+ /*
+ * Are we doing bottom half or hardware interrupt processing?
+@@ -101,7 +111,6 @@
+ #define in_irq() (hardirq_count())
+ #define in_softirq() (softirq_count())
+ #define in_interrupt() (irq_count())
+-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+ #define in_nmi() (preempt_count() & NMI_MASK)
+ #define in_task() (!(preempt_count() & \
+ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 1b2884889dd0..63a97f24d138 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1200,6 +1200,8 @@ struct task_struct {
+ #endif
+ #ifdef CONFIG_PREEMPT_RT_BASE
+ struct rcu_head put_rcu;
++ int softirq_nestcnt;
++ unsigned int softirqs_raised;
+ #endif
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ unsigned long task_state_change;
+@@ -1397,6 +1399,7 @@ extern struct pid *cad_pid;
+ /*
+ * Per process flags
+ */
++#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
+ #define PF_IDLE 0x00000002 /* I am an IDLE thread */
+ #define PF_EXITING 0x00000004 /* Getting shut down */
+ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
+diff --git a/init/main.c b/init/main.c
+index 272ec131211c..0d5763c5da28 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -560,6 +560,7 @@ asmlinkage __visible void __init start_kernel(void)
+ setup_command_line(command_line);
+ setup_nr_cpu_ids();
+ setup_per_cpu_areas();
++ softirq_early_init();
+ smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
+ boot_cpu_hotplug_init();
+
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 1d3a482246cc..fd89f8ab85ac 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -26,7 +26,9 @@
+ #include <linux/smp.h>
+ #include <linux/smpboot.h>
+ #include <linux/tick.h>
++#include <linux/locallock.h>
+ #include <linux/irq.h>
++#include <linux/sched/types.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/irq.h>
+@@ -63,6 +65,98 @@ const char * const softirq_to_name[NR_SOFTIRQS] = {
+ "TASKLET", "SCHED", "HRTIMER", "RCU"
+ };
+
++#ifdef CONFIG_NO_HZ_COMMON
++# ifdef CONFIG_PREEMPT_RT_FULL
++
++struct softirq_runner {
++ struct task_struct *runner[NR_SOFTIRQS];
++};
++
++static DEFINE_PER_CPU(struct softirq_runner, softirq_runners);
++
++static inline void softirq_set_runner(unsigned int sirq)
++{
++ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
++
++ sr->runner[sirq] = current;
++}
++
++static inline void softirq_clr_runner(unsigned int sirq)
++{
++ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
++
++ sr->runner[sirq] = NULL;
++}
++
++/*
++ * On preempt-rt a softirq running context might be blocked on a
++ * lock. There might be no other runnable task on this CPU because the
++ * lock owner runs on some other CPU. So we have to go into idle with
++ * the pending bit set. Therefor we need to check this otherwise we
++ * warn about false positives which confuses users and defeats the
++ * whole purpose of this test.
++ *
++ * This code is called with interrupts disabled.
++ */
++void softirq_check_pending_idle(void)
++{
++ static int rate_limit;
++ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
++ u32 warnpending;
++ int i;
++
++ if (rate_limit >= 10)
++ return;
++
++ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
++ for (i = 0; i < NR_SOFTIRQS; i++) {
++ struct task_struct *tsk = sr->runner[i];
++
++ /*
++ * The wakeup code in rtmutex.c wakes up the task
++ * _before_ it sets pi_blocked_on to NULL under
++ * tsk->pi_lock. So we need to check for both: state
++ * and pi_blocked_on.
++ */
++ if (tsk) {
++ raw_spin_lock(&tsk->pi_lock);
++ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
++ /* Clear all bits pending in that task */
++ warnpending &= ~(tsk->softirqs_raised);
++ warnpending &= ~(1 << i);
++ }
++ raw_spin_unlock(&tsk->pi_lock);
++ }
++ }
++
++ if (warnpending) {
++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++ warnpending);
++ rate_limit++;
++ }
++}
++# else
++/*
++ * On !PREEMPT_RT we just printk rate limited:
++ */
++void softirq_check_pending_idle(void)
++{
++ static int rate_limit;
++
++ if (rate_limit < 10 &&
++ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++ local_softirq_pending());
++ rate_limit++;
++ }
++}
++# endif
++
++#else /* !CONFIG_NO_HZ_COMMON */
++static inline void softirq_set_runner(unsigned int sirq) { }
++static inline void softirq_clr_runner(unsigned int sirq) { }
++#endif
++
+ /*
+ * we cannot loop indefinitely here to avoid userspace starvation,
+ * but we also don't want to introduce a worst case 1/HZ latency
+@@ -78,6 +172,27 @@ static void wakeup_softirqd(void)
+ wake_up_process(tsk);
+ }
+
++static void handle_softirq(unsigned int vec_nr)
++{
++ struct softirq_action *h = softirq_vec + vec_nr;
++ int prev_count;
++
++ prev_count = preempt_count();
++
++ kstat_incr_softirqs_this_cpu(vec_nr);
++
++ trace_softirq_entry(vec_nr);
++ h->action(h);
++ trace_softirq_exit(vec_nr);
++ if (unlikely(prev_count != preempt_count())) {
++ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
++ vec_nr, softirq_to_name[vec_nr], h->action,
++ prev_count, preempt_count());
++ preempt_count_set(prev_count);
++ }
++}
++
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * If ksoftirqd is scheduled, we do not want to process pending softirqs
+ * right now. Let ksoftirqd handle this at its own rate, to get fairness,
+@@ -93,6 +208,47 @@ static bool ksoftirqd_running(unsigned long pending)
+ return tsk && (tsk->state == TASK_RUNNING);
+ }
+
++static inline int ksoftirqd_softirq_pending(void)
++{
++ return local_softirq_pending();
++}
++
++static void handle_pending_softirqs(u32 pending)
++{
++ struct softirq_action *h = softirq_vec;
++ int softirq_bit;
++
++ local_irq_enable();
++
++ h = softirq_vec;
++
++ while ((softirq_bit = ffs(pending))) {
++ unsigned int vec_nr;
++
++ h += softirq_bit - 1;
++ vec_nr = h - softirq_vec;
++ handle_softirq(vec_nr);
++
++ h++;
++ pending >>= softirq_bit;
++ }
++
++ rcu_bh_qs();
++ local_irq_disable();
++}
++
++static void run_ksoftirqd(unsigned int cpu)
++{
++ local_irq_disable();
++ if (ksoftirqd_softirq_pending()) {
++ __do_softirq();
++ local_irq_enable();
++ cond_resched();
++ return;
++ }
++ local_irq_enable();
++}
++
+ /*
+ * preempt_count and SOFTIRQ_OFFSET usage:
+ * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+@@ -252,10 +408,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
+ unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
+ unsigned long old_flags = current->flags;
+ int max_restart = MAX_SOFTIRQ_RESTART;
+- struct softirq_action *h;
+ bool in_hardirq;
+ __u32 pending;
+- int softirq_bit;
+
+ /*
+ * Mask out PF_MEMALLOC s current task context is borrowed for the
+@@ -274,36 +428,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
+ /* Reset the pending bitmask before enabling irqs */
+ set_softirq_pending(0);
+
+- local_irq_enable();
+-
+- h = softirq_vec;
+-
+- while ((softirq_bit = ffs(pending))) {
+- unsigned int vec_nr;
+- int prev_count;
+-
+- h += softirq_bit - 1;
+-
+- vec_nr = h - softirq_vec;
+- prev_count = preempt_count();
+-
+- kstat_incr_softirqs_this_cpu(vec_nr);
+-
+- trace_softirq_entry(vec_nr);
+- h->action(h);
+- trace_softirq_exit(vec_nr);
+- if (unlikely(prev_count != preempt_count())) {
+- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
+- vec_nr, softirq_to_name[vec_nr], h->action,
+- prev_count, preempt_count());
+- preempt_count_set(prev_count);
+- }
+- h++;
+- pending >>= softirq_bit;
+- }
+-
+- rcu_bh_qs();
+- local_irq_disable();
++ handle_pending_softirqs(pending);
+
+ pending = local_softirq_pending();
+ if (pending) {
+@@ -339,6 +464,248 @@ asmlinkage __visible void do_softirq(void)
+ local_irq_restore(flags);
+ }
+
++/*
++ * This function must run with irqs disabled!
++ */
++void raise_softirq_irqoff(unsigned int nr)
++{
++ __raise_softirq_irqoff(nr);
++
++ /*
++ * If we're in an interrupt or softirq, we're done
++ * (this also catches softirq-disabled code). We will
++ * actually run the softirq once we return from
++ * the irq or softirq.
++ *
++ * Otherwise we wake up ksoftirqd to make sure we
++ * schedule the softirq soon.
++ */
++ if (!in_interrupt())
++ wakeup_softirqd();
++}
++
++void __raise_softirq_irqoff(unsigned int nr)
++{
++ trace_softirq_raise(nr);
++ or_softirq_pending(1UL << nr);
++}
++
++static inline void local_bh_disable_nort(void) { local_bh_disable(); }
++static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
++static void ksoftirqd_set_sched_params(unsigned int cpu) { }
++static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { }
++
++#else /* !PREEMPT_RT_FULL */
++
++/*
++ * On RT we serialize softirq execution with a cpu local lock per softirq
++ */
++static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks);
++
++void __init softirq_early_init(void)
++{
++ int i;
++
++ for (i = 0; i < NR_SOFTIRQS; i++)
++ local_irq_lock_init(local_softirq_locks[i]);
++}
++
++static void lock_softirq(int which)
++{
++ local_lock(local_softirq_locks[which]);
++}
++
++static void unlock_softirq(int which)
++{
++ local_unlock(local_softirq_locks[which]);
++}
++
++static void do_single_softirq(int which)
++{
++ unsigned long old_flags = current->flags;
++
++ current->flags &= ~PF_MEMALLOC;
++ vtime_account_irq_enter(current);
++ current->flags |= PF_IN_SOFTIRQ;
++ lockdep_softirq_enter();
++ local_irq_enable();
++ handle_softirq(which);
++ local_irq_disable();
++ lockdep_softirq_exit();
++ current->flags &= ~PF_IN_SOFTIRQ;
++ vtime_account_irq_enter(current);
++ current_restore_flags(old_flags, PF_MEMALLOC);
++}
++
++/*
++ * Called with interrupts disabled. Process softirqs which were raised
++ * in current context (or on behalf of ksoftirqd).
++ */
++static void do_current_softirqs(void)
++{
++ while (current->softirqs_raised) {
++ int i = __ffs(current->softirqs_raised);
++ unsigned int pending, mask = (1U << i);
++
++ current->softirqs_raised &= ~mask;
++ local_irq_enable();
++
++ /*
++ * If the lock is contended, we boost the owner to
++ * process the softirq or leave the critical section
++ * now.
++ */
++ lock_softirq(i);
++ local_irq_disable();
++ softirq_set_runner(i);
++ /*
++ * Check with the local_softirq_pending() bits,
++ * whether we need to process this still or if someone
++ * else took care of it.
++ */
++ pending = local_softirq_pending();
++ if (pending & mask) {
++ set_softirq_pending(pending & ~mask);
++ do_single_softirq(i);
++ }
++ softirq_clr_runner(i);
++ WARN_ON(current->softirq_nestcnt != 1);
++ local_irq_enable();
++ unlock_softirq(i);
++ local_irq_disable();
++ }
++}
++
++void __local_bh_disable(void)
++{
++ if (++current->softirq_nestcnt == 1)
++ migrate_disable();
++}
++EXPORT_SYMBOL(__local_bh_disable);
++
++void __local_bh_enable(void)
++{
++ if (WARN_ON(current->softirq_nestcnt == 0))
++ return;
++
++ local_irq_disable();
++ if (current->softirq_nestcnt == 1 && current->softirqs_raised)
++ do_current_softirqs();
++ local_irq_enable();
++
++ if (--current->softirq_nestcnt == 0)
++ migrate_enable();
++}
++EXPORT_SYMBOL(__local_bh_enable);
++
++int in_serving_softirq(void)
++{
++ return current->flags & PF_IN_SOFTIRQ;
++}
++EXPORT_SYMBOL(in_serving_softirq);
++
++/* Called with preemption disabled */
++static void run_ksoftirqd(unsigned int cpu)
++{
++ local_irq_disable();
++ current->softirq_nestcnt++;
++
++ do_current_softirqs();
++ current->softirq_nestcnt--;
++ local_irq_enable();
++ cond_resched();
++}
++
++/*
++ * Called from netif_rx_ni(). Preemption enabled, but migration
++ * disabled. So the cpu can't go away under us.
++ */
++void thread_do_softirq(void)
++{
++ if (!in_serving_softirq() && current->softirqs_raised) {
++ current->softirq_nestcnt++;
++ do_current_softirqs();
++ current->softirq_nestcnt--;
++ }
++}
++
++static void do_raise_softirq_irqoff(unsigned int nr)
++{
++ trace_softirq_raise(nr);
++ or_softirq_pending(1UL << nr);
++
++ /*
++ * If we are not in a hard interrupt and inside a bh disabled
++ * region, we simply raise the flag on current. local_bh_enable()
++ * will make sure that the softirq is executed. Otherwise we
++ * delegate it to ksoftirqd.
++ */
++ if (!in_irq() && current->softirq_nestcnt)
++ current->softirqs_raised |= (1U << nr);
++ else if (__this_cpu_read(ksoftirqd))
++ __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr);
++}
++
++void __raise_softirq_irqoff(unsigned int nr)
++{
++ do_raise_softirq_irqoff(nr);
++ if (!in_irq() && !current->softirq_nestcnt)
++ wakeup_softirqd();
++}
++
++/*
++ * This function must run with irqs disabled!
++ */
++void raise_softirq_irqoff(unsigned int nr)
++{
++ do_raise_softirq_irqoff(nr);
++
++ /*
++ * If we're in an hard interrupt we let irq return code deal
++ * with the wakeup of ksoftirqd.
++ */
++ if (in_irq())
++ return;
++ /*
++ * If we are in thread context but outside of a bh disabled
++ * region, we need to wake ksoftirqd as well.
++ *
++ * CHECKME: Some of the places which do that could be wrapped
++ * into local_bh_disable/enable pairs. Though it's unclear
++ * whether this is worth the effort. To find those places just
++ * raise a WARN() if the condition is met.
++ */
++ if (!current->softirq_nestcnt)
++ wakeup_softirqd();
++}
++
++static inline int ksoftirqd_softirq_pending(void)
++{
++ return current->softirqs_raised;
++}
++
++static inline void local_bh_disable_nort(void) { }
++static inline void _local_bh_enable_nort(void) { }
++
++static inline void ksoftirqd_set_sched_params(unsigned int cpu)
++{
++ struct sched_param param = { .sched_priority = 1 };
++
++ sched_setscheduler(current, SCHED_FIFO, &param);
++ /* Take over all pending softirqs when starting */
++ local_irq_disable();
++ current->softirqs_raised = local_softirq_pending();
++ local_irq_enable();
++}
++
++static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online)
++{
++ struct sched_param param = { .sched_priority = 0 };
++
++ sched_setscheduler(current, SCHED_NORMAL, &param);
++}
++
++#endif /* PREEMPT_RT_FULL */
+ /*
+ * Enter an interrupt context.
+ */
+@@ -350,9 +717,9 @@ void irq_enter(void)
+ * Prevent raise_softirq from needlessly waking up ksoftirqd
+ * here, as softirq will be serviced on return from interrupt.
+ */
+- local_bh_disable();
++ local_bh_disable_nort();
+ tick_irq_enter();
+- _local_bh_enable();
++ _local_bh_enable_nort();
+ }
+
+ __irq_enter();
+@@ -360,6 +727,7 @@ void irq_enter(void)
+
+ static inline void invoke_softirq(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (ksoftirqd_running(local_softirq_pending()))
+ return;
+
+@@ -382,6 +750,15 @@ static inline void invoke_softirq(void)
+ } else {
+ wakeup_softirqd();
+ }
++#else /* PREEMPT_RT_FULL */
++ unsigned long flags;
++
++ local_irq_save(flags);
++ if (__this_cpu_read(ksoftirqd) &&
++ __this_cpu_read(ksoftirqd)->softirqs_raised)
++ wakeup_softirqd();
++ local_irq_restore(flags);
++#endif
+ }
+
+ static inline void tick_irq_exit(void)
+@@ -417,26 +794,6 @@ void irq_exit(void)
+ trace_hardirq_exit(); /* must be last! */
+ }
+
+-/*
+- * This function must run with irqs disabled!
+- */
+-inline void raise_softirq_irqoff(unsigned int nr)
+-{
+- __raise_softirq_irqoff(nr);
+-
+- /*
+- * If we're in an interrupt or softirq, we're done
+- * (this also catches softirq-disabled code). We will
+- * actually run the softirq once we return from
+- * the irq or softirq.
+- *
+- * Otherwise we wake up ksoftirqd to make sure we
+- * schedule the softirq soon.
+- */
+- if (!in_interrupt())
+- wakeup_softirqd();
+-}
+-
+ void raise_softirq(unsigned int nr)
+ {
+ unsigned long flags;
+@@ -446,12 +803,6 @@ void raise_softirq(unsigned int nr)
+ local_irq_restore(flags);
+ }
+
+-void __raise_softirq_irqoff(unsigned int nr)
+-{
+- trace_softirq_raise(nr);
+- or_softirq_pending(1UL << nr);
+-}
+-
+ void open_softirq(int nr, void (*action)(struct softirq_action *))
+ {
+ softirq_vec[nr].action = action;
+@@ -725,23 +1076,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait);
+
+ static int ksoftirqd_should_run(unsigned int cpu)
+ {
+- return local_softirq_pending();
+-}
+-
+-static void run_ksoftirqd(unsigned int cpu)
+-{
+- local_irq_disable();
+- if (local_softirq_pending()) {
+- /*
+- * We can safely run softirq on inline stack, as we are not deep
+- * in the task stack here.
+- */
+- __do_softirq();
+- local_irq_enable();
+- cond_resched();
+- return;
+- }
+- local_irq_enable();
++ return ksoftirqd_softirq_pending();
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -808,6 +1143,8 @@ static int takeover_tasklets(unsigned int cpu)
+
+ static struct smp_hotplug_thread softirq_threads = {
+ .store = &ksoftirqd,
++ .setup = ksoftirqd_set_sched_params,
++ .cleanup = ksoftirqd_clr_sched_params,
+ .thread_should_run = ksoftirqd_should_run,
+ .thread_fn = run_ksoftirqd,
+ .thread_comm = "ksoftirqd/%u",
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 012bc81879bf..2b0ddd50e879 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -895,14 +895,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
+ return false;
+
+ if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
+- static int ratelimit;
+-
+- if (ratelimit < 10 &&
+- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
+- pr_warn("NOHZ: local_softirq_pending %02x\n",
+- (unsigned int) local_softirq_pending());
+- ratelimit++;
+- }
++ softirq_check_pending_idle();
+ return false;
+ }
+
+--
+2.36.1
+