From 01997497f915e8f79871f3f2acb55ac465051d24 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:59 +0200 Subject: Adding debian version 6.1.76-1. Signed-off-by: Daniel Baumann --- ...5-revert-softirq-Let-ksoftirqd-do-its-job.patch | 108 +++++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 debian/patches-rt/0055-revert-softirq-Let-ksoftirqd-do-its-job.patch (limited to 'debian/patches-rt/0055-revert-softirq-Let-ksoftirqd-do-its-job.patch') diff --git a/debian/patches-rt/0055-revert-softirq-Let-ksoftirqd-do-its-job.patch b/debian/patches-rt/0055-revert-softirq-Let-ksoftirqd-do-its-job.patch new file mode 100644 index 000000000..c9ed7f6f6 --- /dev/null +++ b/debian/patches-rt/0055-revert-softirq-Let-ksoftirqd-do-its-job.patch @@ -0,0 +1,108 @@ +From fcdb9b29c6d58895a386ac23229564fad2c316b5 Mon Sep 17 00:00:00 2001 +From: Paolo Abeni +Date: Mon, 8 May 2023 08:17:44 +0200 +Subject: [PATCH 55/62] revert: "softirq: Let ksoftirqd do its job" +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.1/older/patches-6.1.69-rt21.tar.xz + +Due to the mentioned commit, when the ksoftirqd processes take charge +of softirq processing, the system can experience high latencies. + +In the past a few workarounds have been implemented for specific +side-effects of the above: + +commit 1ff688209e2e ("watchdog: core: make sure the watchdog_worker is not deferred") +commit 8d5755b3f77b ("watchdog: softdog: fire watchdog even if softirqs do not get to run") +commit 217f69743681 ("net: busy-poll: allow preemption in sk_busy_loop()") +commit 3c53776e29f8 ("Mark HI and TASKLET softirq synchronous") + +but the latency problem still exists in real-life workloads, see the +link below. + +The reverted commit intended to solve a live-lock scenario that can now +be addressed with the NAPI threaded mode, introduced with commit +29863d41bb6e ("net: implement threaded-able napi poll loop support"), +and nowadays in a pretty stable status. + +While a complete solution to put softirq processing under nice resource +control would be preferable, that has proven to be a very hard task. In +the short term, remove the main pain point, and also simplify a bit the +current softirq implementation. + +Note that this change also reverts commit 3c53776e29f8 ("Mark HI and +TASKLET softirq synchronous") and commit 1342d8080f61 ("softirq: Don't +skip softirq execution when softirq thread is parking"), which are +direct follow-ups of the feature commit. A single change is preferred to +avoid known bad intermediate states introduced by a patch series +reverting them individually. + +Link: https://lore.kernel.org/netdev/305d7742212cbe98621b16be782b0562f1012cb6.camel@redhat.com/ +Signed-off-by: Paolo Abeni +Tested-by: Jason Xing +Reviewed-by: Jakub Kicinski +Reviewed-by: Eric Dumazet +Reviewed-by: Sebastian Andrzej Siewior +Link: https://lore.kernel.org/r/57e66b364f1b6f09c9bc0316742c3b14f4ce83bd.1683526542.git.pabeni@redhat.com +Signed-off-by: Sebastian Andrzej Siewior +(cherry picked from commit b8a04a538ed4755dc97c403ee3b8dd882955c98c) +Signed-off-by: Clark Williams +--- + kernel/softirq.c | 22 ++-------------------- + 1 file changed, 2 insertions(+), 20 deletions(-) + +diff --git a/kernel/softirq.c b/kernel/softirq.c +index 82f3e68fbe22..af9e879bbbf7 100644 +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -80,21 +80,6 @@ static void wakeup_softirqd(void) + wake_up_process(tsk); + } + +-/* +- * If ksoftirqd is scheduled, we do not want to process pending softirqs +- * right now. Let ksoftirqd handle this at its own rate, to get fairness, +- * unless we're doing some of the synchronous softirqs. +- */ +-#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ)) +-static bool ksoftirqd_running(unsigned long pending) +-{ +- struct task_struct *tsk = __this_cpu_read(ksoftirqd); +- +- if (pending & SOFTIRQ_NOW_MASK) +- return false; +- return tsk && task_is_running(tsk) && !__kthread_should_park(tsk); +-} +- + #ifdef CONFIG_TRACE_IRQFLAGS + DEFINE_PER_CPU(int, hardirqs_enabled); + DEFINE_PER_CPU(int, hardirq_context); +@@ -236,7 +221,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) + goto out; + + pending = local_softirq_pending(); +- if (!pending || ksoftirqd_running(pending)) ++ if (!pending) + goto out; + + /* +@@ -432,9 +417,6 @@ static inline bool should_wake_ksoftirqd(void) + + static inline void invoke_softirq(void) + { +- if (ksoftirqd_running(local_softirq_pending())) +- return; +- + if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) { + #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK + /* +@@ -468,7 +450,7 @@ asmlinkage __visible void do_softirq(void) + + pending = local_softirq_pending(); + +- if (pending && !ksoftirqd_running(pending)) ++ if (pending) + do_softirq_own_stack(); + + local_irq_restore(flags); +-- +2.43.0 + -- cgit v1.2.3