summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0009-net-Avoid-the-IPI-to-free-the.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:59 +0000
commit01997497f915e8f79871f3f2acb55ac465051d24 (patch)
tree1ce1afd7246e1014199e15cbf854bf7924458e5d /debian/patches-rt/0009-net-Avoid-the-IPI-to-free-the.patch
parentAdding upstream version 6.1.76. (diff)
downloadlinux-01997497f915e8f79871f3f2acb55ac465051d24.tar.xz
linux-01997497f915e8f79871f3f2acb55ac465051d24.zip
Adding debian version 6.1.76-1.debian/6.1.76-1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0009-net-Avoid-the-IPI-to-free-the.patch')
-rw-r--r--debian/patches-rt/0009-net-Avoid-the-IPI-to-free-the.patch125
1 files changed, 125 insertions, 0 deletions
diff --git a/debian/patches-rt/0009-net-Avoid-the-IPI-to-free-the.patch b/debian/patches-rt/0009-net-Avoid-the-IPI-to-free-the.patch
new file mode 100644
index 000000000..1f6687796
--- /dev/null
+++ b/debian/patches-rt/0009-net-Avoid-the-IPI-to-free-the.patch
@@ -0,0 +1,125 @@
+From c910f301d71266e18f63407ec6c65d19ae90e779 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 15 Aug 2022 17:29:50 +0200
+Subject: [PATCH 09/62] net: Avoid the IPI to free the
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.1/older/patches-6.1.69-rt21.tar.xz
+
+skb_attempt_defer_free() collects a skbs, which was allocated on a
+remote CPU, on a per-CPU list. These skbs are either freed on that
+remote CPU once the CPU enters NET_RX or an remote IPI function is
+invoked in to raise the NET_RX softirq if a threshold of pending skb has
+been exceeded.
+This remote IPI can cause the wakeup of ksoftirqd on PREEMPT_RT if the
+remote CPU idle was idle. This is undesired because once the ksoftirqd
+is running it will acquire all pending softirqs and they will not be
+executed as part of the threaded interrupt until ksoftird goes idle
+again.
+
+To void all this, schedule the deferred clean up from a worker.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/netdevice.h | 4 ++++
+ net/core/dev.c | 37 ++++++++++++++++++++++++++++---------
+ net/core/skbuff.c | 7 ++++++-
+ 3 files changed, 38 insertions(+), 10 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 0373e0935990..55d698367883 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3169,7 +3169,11 @@ struct softnet_data {
+ int defer_count;
+ int defer_ipi_scheduled;
+ struct sk_buff *defer_list;
++#ifndef CONFIG_PREEMPT_RT
+ call_single_data_t defer_csd;
++#else
++ struct work_struct defer_work;
++#endif
+ };
+
+ static inline void input_queue_head_incr(struct softnet_data *sd)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 070039f9296c..a3caa23be3cf 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4618,15 +4618,6 @@ static void rps_trigger_softirq(void *data)
+
+ #endif /* CONFIG_RPS */
+
+-/* Called from hardirq (IPI) context */
+-static void trigger_rx_softirq(void *data)
+-{
+- struct softnet_data *sd = data;
+-
+- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+- smp_store_release(&sd->defer_ipi_scheduled, 0);
+-}
+-
+ /*
+ * Check if this softnet_data structure is another cpu one
+ * If yes, queue it to our IPI list and return 1
+@@ -6684,6 +6675,30 @@ static void skb_defer_free_flush(struct softnet_data *sd)
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT
++/* Called from hardirq (IPI) context */
++static void trigger_rx_softirq(void *data)
++{
++ struct softnet_data *sd = data;
++
++ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
++ smp_store_release(&sd->defer_ipi_scheduled, 0);
++}
++
++#else
++
++static void trigger_rx_softirq(struct work_struct *defer_work)
++{
++ struct softnet_data *sd;
++
++ sd = container_of(defer_work, struct softnet_data, defer_work);
++ smp_store_release(&sd->defer_ipi_scheduled, 0);
++ local_bh_disable();
++ skb_defer_free_flush(sd);
++ local_bh_enable();
++}
++#endif
++
+ static __latent_entropy void net_rx_action(struct softirq_action *h)
+ {
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+@@ -11435,7 +11450,11 @@ static int __init net_dev_init(void)
+ INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
+ sd->cpu = i;
+ #endif
++#ifndef CONFIG_PREEMPT_RT
+ INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
++#else
++ INIT_WORK(&sd->defer_work, trigger_rx_softirq);
++#endif
+ spin_lock_init(&sd->defer_lock);
+
+ init_gro_hash(&sd->backlog);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 73b1e0e53534..a457a3445469 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -6680,6 +6680,11 @@ nodefer: __kfree_skb(skb);
+ /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
+ * if we are unlucky enough (this seems very unlikely).
+ */
+- if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
++ if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
++#ifndef CONFIG_PREEMPT_RT
+ smp_call_function_single_async(cpu, &sd->defer_csd);
++#else
++ schedule_work_on(cpu, &sd->defer_work);
++#endif
++ }
+ }
+--
+2.43.0
+