summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0124-softirq-split-timer-softirqs-out-of-ksoftirqd.patch
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--debian/patches-rt/0124-softirq-split-timer-softirqs-out-of-ksoftirqd.patch215
1 files changed, 215 insertions, 0 deletions
diff --git a/debian/patches-rt/0124-softirq-split-timer-softirqs-out-of-ksoftirqd.patch b/debian/patches-rt/0124-softirq-split-timer-softirqs-out-of-ksoftirqd.patch
new file mode 100644
index 000000000..b6bb8cca8
--- /dev/null
+++ b/debian/patches-rt/0124-softirq-split-timer-softirqs-out-of-ksoftirqd.patch
@@ -0,0 +1,215 @@
+From 883087f48f717058bc203cd47430b55f77275fbd Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 20 Jan 2016 16:34:17 +0100
+Subject: [PATCH 124/347] softirq: split timer softirqs out of ksoftirqd
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+The softirqd runs in -RT with SCHED_FIFO (prio 1) and deals mostly with
+timer wakeup which can not happen in hardirq context. The prio has been
+risen from the normal SCHED_OTHER so the timer wakeup does not happen
+too late.
+With enough networking load it is possible that the system never goes
+idle and schedules ksoftirqd and everything else with a higher priority.
+One of the tasks left behind is one of RCU's threads and so we see stalls
+and eventually run out of memory.
+This patch moves the TIMER and HRTIMER softirqs out of the `ksoftirqd`
+thread into its own `ktimersoftd`. The former can now run SCHED_OTHER
+(same as mainline) and the latter at SCHED_FIFO due to the wakeups.
+
+From networking point of view: The NAPI callback runs after the network
+interrupt thread completes. If its run time takes too long the NAPI code
+itself schedules the `ksoftirqd`. Here in the thread it can run at
+SCHED_OTHER priority and it won't defer RCU anymore.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/softirq.c | 85 +++++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 73 insertions(+), 12 deletions(-)
+
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 3e9333d148ad..fe4e59c80a08 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -59,6 +59,10 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
+ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
+
+ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
++#ifdef CONFIG_PREEMPT_RT_FULL
++#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ))
++DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd);
++#endif
+
+ const char * const softirq_to_name[NR_SOFTIRQS] = {
+ "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
+@@ -172,6 +176,17 @@ static void wakeup_softirqd(void)
+ wake_up_process(tsk);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void wakeup_timer_softirqd(void)
++{
++ /* Interrupts are disabled: no need to stop preemption */
++ struct task_struct *tsk = __this_cpu_read(ktimer_softirqd);
++
++ if (tsk && tsk->state != TASK_RUNNING)
++ wake_up_process(tsk);
++}
++#endif
++
+ static void handle_softirq(unsigned int vec_nr)
+ {
+ struct softirq_action *h = softirq_vec + vec_nr;
+@@ -493,7 +508,6 @@ void __raise_softirq_irqoff(unsigned int nr)
+ static inline void local_bh_disable_nort(void) { local_bh_disable(); }
+ static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
+ static void ksoftirqd_set_sched_params(unsigned int cpu) { }
+-static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { }
+
+ #else /* !PREEMPT_RT_FULL */
+
+@@ -640,8 +654,12 @@ void thread_do_softirq(void)
+
+ static void do_raise_softirq_irqoff(unsigned int nr)
+ {
++ unsigned int mask;
++
++ mask = 1UL << nr;
++
+ trace_softirq_raise(nr);
+- or_softirq_pending(1UL << nr);
++ or_softirq_pending(mask);
+
+ /*
+ * If we are not in a hard interrupt and inside a bh disabled
+@@ -650,16 +668,29 @@ static void do_raise_softirq_irqoff(unsigned int nr)
+ * delegate it to ksoftirqd.
+ */
+ if (!in_irq() && current->softirq_nestcnt)
+- current->softirqs_raised |= (1U << nr);
+- else if (__this_cpu_read(ksoftirqd))
+- __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr);
++ current->softirqs_raised |= mask;
++ else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd))
++ return;
++
++ if (mask & TIMER_SOFTIRQS)
++ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
++ else
++ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
++}
++
++static void wakeup_proper_softirq(unsigned int nr)
++{
++ if ((1UL << nr) & TIMER_SOFTIRQS)
++ wakeup_timer_softirqd();
++ else
++ wakeup_softirqd();
+ }
+
+ void __raise_softirq_irqoff(unsigned int nr)
+ {
+ do_raise_softirq_irqoff(nr);
+ if (!in_irq() && !current->softirq_nestcnt)
+- wakeup_softirqd();
++ wakeup_proper_softirq(nr);
+ }
+
+ /*
+@@ -685,7 +716,7 @@ void raise_softirq_irqoff(unsigned int nr)
+ * raise a WARN() if the condition is met.
+ */
+ if (!current->softirq_nestcnt)
+- wakeup_softirqd();
++ wakeup_proper_softirq(nr);
+ }
+
+ static inline int ksoftirqd_softirq_pending(void)
+@@ -697,23 +728,38 @@ static inline void local_bh_disable_nort(void) { }
+ static inline void _local_bh_enable_nort(void) { }
+
+ static inline void ksoftirqd_set_sched_params(unsigned int cpu)
++{
++ /* Take over all but timer pending softirqs when starting */
++ local_irq_disable();
++ current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS;
++ local_irq_enable();
++}
++
++static inline void ktimer_softirqd_set_sched_params(unsigned int cpu)
+ {
+ struct sched_param param = { .sched_priority = 1 };
+
+ sched_setscheduler(current, SCHED_FIFO, &param);
+- /* Take over all pending softirqs when starting */
++
++ /* Take over timer pending softirqs when starting */
+ local_irq_disable();
+- current->softirqs_raised = local_softirq_pending();
++ current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS;
+ local_irq_enable();
+ }
+
+-static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online)
++static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu,
++ bool online)
+ {
+ struct sched_param param = { .sched_priority = 0 };
+
+ sched_setscheduler(current, SCHED_NORMAL, &param);
+ }
+
++static int ktimer_softirqd_should_run(unsigned int cpu)
++{
++ return current->softirqs_raised;
++}
++
+ #endif /* PREEMPT_RT_FULL */
+ /*
+ * Enter an interrupt context.
+@@ -766,6 +812,9 @@ static inline void invoke_softirq(void)
+ if (__this_cpu_read(ksoftirqd) &&
+ __this_cpu_read(ksoftirqd)->softirqs_raised)
+ wakeup_softirqd();
++ if (__this_cpu_read(ktimer_softirqd) &&
++ __this_cpu_read(ktimer_softirqd)->softirqs_raised)
++ wakeup_timer_softirqd();
+ local_irq_restore(flags);
+ #endif
+ }
+@@ -1153,18 +1202,30 @@ static int takeover_tasklets(unsigned int cpu)
+ static struct smp_hotplug_thread softirq_threads = {
+ .store = &ksoftirqd,
+ .setup = ksoftirqd_set_sched_params,
+- .cleanup = ksoftirqd_clr_sched_params,
+ .thread_should_run = ksoftirqd_should_run,
+ .thread_fn = run_ksoftirqd,
+ .thread_comm = "ksoftirqd/%u",
+ };
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static struct smp_hotplug_thread softirq_timer_threads = {
++ .store = &ktimer_softirqd,
++ .setup = ktimer_softirqd_set_sched_params,
++ .cleanup = ktimer_softirqd_clr_sched_params,
++ .thread_should_run = ktimer_softirqd_should_run,
++ .thread_fn = run_ksoftirqd,
++ .thread_comm = "ktimersoftd/%u",
++};
++#endif
++
+ static __init int spawn_ksoftirqd(void)
+ {
+ cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
+ takeover_tasklets);
+ BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
+-
++#ifdef CONFIG_PREEMPT_RT_FULL
++ BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads));
++#endif
+ return 0;
+ }
+ early_initcall(spawn_ksoftirqd);
+--
+2.36.1
+