summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0008-net-softnet_data-Make-xmit-per-task.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:18:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:18:13 +0000
commit225809f918c2f2c9c831ea16ddb9b81485af5f34 (patch)
tree5332d51631f39fc96804d8001996f028bbbbdf54 /debian/patches-rt/0008-net-softnet_data-Make-xmit-per-task.patch
parentMerging upstream version 6.10.3. (diff)
downloadlinux-225809f918c2f2c9c831ea16ddb9b81485af5f34.tar.xz
linux-225809f918c2f2c9c831ea16ddb9b81485af5f34.zip
Merging debian version 6.10.3-1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0008-net-softnet_data-Make-xmit-per-task.patch')
-rw-r--r--debian/patches-rt/0008-net-softnet_data-Make-xmit-per-task.patch227
1 files changed, 227 insertions, 0 deletions
diff --git a/debian/patches-rt/0008-net-softnet_data-Make-xmit-per-task.patch b/debian/patches-rt/0008-net-softnet_data-Make-xmit-per-task.patch
new file mode 100644
index 0000000000..24ba5a1789
--- /dev/null
+++ b/debian/patches-rt/0008-net-softnet_data-Make-xmit-per-task.patch
@@ -0,0 +1,227 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 21 Aug 2023 11:47:40 +0200
+Subject: [PATCH 08/15] net: softnet_data: Make xmit per task.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Softirq is preemptible on PREEMPT_RT. Without a per-CPU lock in
+local_bh_disable() there is no guarantee that only one device is
+transmitting at a time.
+With preemption and multiple senders it is possible that the per-CPU
+`recursion' counter gets incremented by different threads and exceeds
+XMIT_RECURSION_LIMIT leading to a false positive recursion alert.
+The `more' member is subject to similar problems if set by one thread
+for one driver and wrongly used by another driver within another thread.
+
+Instead of adding a lock to protect the per-CPU variable it is simpler
+to make xmit per-task. Sending and receiving skbs happens always
+in thread context anyway.
+
+Having a lock to protected the per-CPU counter would block/ serialize two
+sending threads needlessly. It would also require a recursive lock to
+ensure that the owner can increment the counter further.
+
+Make the softnet_data.xmit a task_struct member on PREEMPT_RT. Add
+needed wrapper.
+
+Cc: Ben Segall <bsegall@google.com>
+Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
+Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Cc: Juri Lelli <juri.lelli@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Valentin Schneider <vschneid@redhat.com>
+Cc: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/netdevice.h | 42 ++++++++++++++++++++++++++++++-----------
+ include/linux/netdevice_xmit.h | 13 ++++++++++++
+ include/linux/sched.h | 5 +++-
+ net/core/dev.c | 14 +++++++++++++
+ net/core/dev.h | 18 +++++++++++++++++
+ 5 files changed, 80 insertions(+), 12 deletions(-)
+ create mode 100644 include/linux/netdevice_xmit.h
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -43,6 +43,7 @@
+
+ #include <linux/netdev_features.h>
+ #include <linux/neighbour.h>
++#include <linux/netdevice_xmit.h>
+ #include <uapi/linux/netdevice.h>
+ #include <uapi/linux/if_bonding.h>
+ #include <uapi/linux/pkt_cls.h>
+@@ -3222,13 +3223,7 @@ struct softnet_data {
+ struct sk_buff_head xfrm_backlog;
+ #endif
+ /* written and read only by owning cpu: */
+- struct {
+- u16 recursion;
+- u8 more;
+-#ifdef CONFIG_NET_EGRESS
+- u8 skip_txqueue;
+-#endif
+- } xmit;
++ struct netdev_xmit xmit;
+ #ifdef CONFIG_RPS
+ /* input_queue_head should be written by cpu owning this struct,
+ * and only read by other cpus. Worth using a cache line.
+@@ -3256,10 +3251,18 @@ struct softnet_data {
+
+ DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+
++#ifndef CONFIG_PREEMPT_RT
+ static inline int dev_recursion_level(void)
+ {
+ return this_cpu_read(softnet_data.xmit.recursion);
+ }
++#else
++static inline int dev_recursion_level(void)
++{
++ return current->net_xmit.recursion;
++}
++
++#endif
+
+ void __netif_schedule(struct Qdisc *q);
+ void netif_schedule_queue(struct netdev_queue *txq);
+@@ -4874,18 +4877,35 @@ static inline ktime_t netdev_get_tstamp(
+ return hwtstamps->hwtstamp;
+ }
+
+-static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
+- struct sk_buff *skb, struct net_device *dev,
+- bool more)
++#ifndef CONFIG_PREEMPT_RT
++static inline void netdev_xmit_set_more(bool more)
+ {
+ __this_cpu_write(softnet_data.xmit.more, more);
+- return ops->ndo_start_xmit(skb, dev);
+ }
+
+ static inline bool netdev_xmit_more(void)
+ {
+ return __this_cpu_read(softnet_data.xmit.more);
+ }
++#else
++static inline void netdev_xmit_set_more(bool more)
++{
++ current->net_xmit.more = more;
++}
++
++static inline bool netdev_xmit_more(void)
++{
++ return current->net_xmit.more;
++}
++#endif
++
++static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
++ struct sk_buff *skb, struct net_device *dev,
++ bool more)
++{
++ netdev_xmit_set_more(more);
++ return ops->ndo_start_xmit(skb, dev);
++}
+
+ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ struct netdev_queue *txq, bool more)
+--- /dev/null
++++ b/include/linux/netdevice_xmit.h
+@@ -0,0 +1,13 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++#ifndef _LINUX_NETDEVICE_XMIT_H
++#define _LINUX_NETDEVICE_XMIT_H
++
++struct netdev_xmit {
++ u16 recursion;
++ u8 more;
++#ifdef CONFIG_NET_EGRESS
++ u8 skip_txqueue;
++#endif
++};
++
++#endif
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -36,6 +36,7 @@
+ #include <linux/signal_types.h>
+ #include <linux/syscall_user_dispatch_types.h>
+ #include <linux/mm_types_task.h>
++#include <linux/netdevice_xmit.h>
+ #include <linux/task_io_accounting.h>
+ #include <linux/posix-timers_types.h>
+ #include <linux/restart_block.h>
+@@ -981,7 +982,9 @@ struct task_struct {
+ /* delay due to memory thrashing */
+ unsigned in_thrashing:1;
+ #endif
+-
++#ifdef CONFIG_PREEMPT_RT
++ struct netdev_xmit net_xmit;
++#endif
+ unsigned long atomic_flags; /* Flags requiring atomic access. */
+
+ struct restart_block restart_block;
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3940,6 +3940,7 @@ netdev_tx_queue_mapping(struct net_devic
+ return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
+ }
+
++#ifndef CONFIG_PREEMPT_RT
+ static bool netdev_xmit_txqueue_skipped(void)
+ {
+ return __this_cpu_read(softnet_data.xmit.skip_txqueue);
+@@ -3950,6 +3951,19 @@ void netdev_xmit_skip_txqueue(bool skip)
+ __this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
+ }
+ EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
++
++#else
++static bool netdev_xmit_txqueue_skipped(void)
++{
++ return current->net_xmit.skip_txqueue;
++}
++
++void netdev_xmit_skip_txqueue(bool skip)
++{
++ current->net_xmit.skip_txqueue = skip;
++}
++EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
++#endif
+ #endif /* CONFIG_NET_EGRESS */
+
+ #ifdef CONFIG_NET_XGRESS
+--- a/net/core/dev.h
++++ b/net/core/dev.h
+@@ -150,6 +150,8 @@ struct napi_struct *napi_by_id(unsigned
+ void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
+
+ #define XMIT_RECURSION_LIMIT 8
++
++#ifndef CONFIG_PREEMPT_RT
+ static inline bool dev_xmit_recursion(void)
+ {
+ return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
+@@ -165,5 +167,21 @@ static inline void dev_xmit_recursion_de
+ {
+ __this_cpu_dec(softnet_data.xmit.recursion);
+ }
++#else
++static inline bool dev_xmit_recursion(void)
++{
++ return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT);
++}
++
++static inline void dev_xmit_recursion_inc(void)
++{
++ current->net_xmit.recursion++;
++}
++
++static inline void dev_xmit_recursion_dec(void)
++{
++ current->net_xmit.recursion--;
++}
++#endif
+
+ #endif