summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch')
-rw-r--r--debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch164
1 files changed, 164 insertions, 0 deletions
diff --git a/debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch b/debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch
new file mode 100644
index 000000000..70de5b94e
--- /dev/null
+++ b/debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch
@@ -0,0 +1,164 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Sat, 9 Mar 2024 10:05:12 +0100
+Subject: [PATCH 4/4] net: Rename rps_lock to backlog_lock.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+
+The rps_lock.*() functions use the inner lock of a sk_buff_head for
+locking. This lock is used if RPS is enabled, otherwise the list is
+accessed lockless and disabling interrupts is enough for the
+synchronisation because it is only accessed CPU local. Not only the list
+is protected but also the NAPI state protected.
+With the addition of backlog threads, the lock is also needed because of
+the cross CPU access even without RPS. The clean up of the defer_list
+list is also done via backlog threads (if enabled).
+
+It has been suggested to rename the locking function since it is no
+longer just RPS.
+
+Rename the rps_lock*() functions to backlog_lock*().
+
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Acked-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/r/20240309090824.2956805-5-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/dev.c | 34 +++++++++++++++++-----------------
+ 1 file changed, 17 insertions(+), 17 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -242,8 +242,8 @@ static bool use_backlog_threads(void)
+
+ #endif
+
+-static inline void rps_lock_irqsave(struct softnet_data *sd,
+- unsigned long *flags)
++static inline void backlog_lock_irq_save(struct softnet_data *sd,
++ unsigned long *flags)
+ {
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
+@@ -251,7 +251,7 @@ static inline void rps_lock_irqsave(stru
+ local_irq_save(*flags);
+ }
+
+-static inline void rps_lock_irq_disable(struct softnet_data *sd)
++static inline void backlog_lock_irq_disable(struct softnet_data *sd)
+ {
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ spin_lock_irq(&sd->input_pkt_queue.lock);
+@@ -259,8 +259,8 @@ static inline void rps_lock_irq_disable(
+ local_irq_disable();
+ }
+
+-static inline void rps_unlock_irq_restore(struct softnet_data *sd,
+- unsigned long *flags)
++static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
++ unsigned long *flags)
+ {
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
+@@ -268,7 +268,7 @@ static inline void rps_unlock_irq_restor
+ local_irq_restore(*flags);
+ }
+
+-static inline void rps_unlock_irq_enable(struct softnet_data *sd)
++static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
+ {
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ spin_unlock_irq(&sd->input_pkt_queue.lock);
+@@ -4758,12 +4758,12 @@ void kick_defer_list_purge(struct softne
+ unsigned long flags;
+
+ if (use_backlog_threads()) {
+- rps_lock_irqsave(sd, &flags);
++ backlog_lock_irq_save(sd, &flags);
+
+ if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
+ __napi_schedule_irqoff(&sd->backlog);
+
+- rps_unlock_irq_restore(sd, &flags);
++ backlog_unlock_irq_restore(sd, &flags);
+
+ } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
+ smp_call_function_single_async(cpu, &sd->defer_csd);
+@@ -4825,7 +4825,7 @@ static int enqueue_to_backlog(struct sk_
+ reason = SKB_DROP_REASON_NOT_SPECIFIED;
+ sd = &per_cpu(softnet_data, cpu);
+
+- rps_lock_irqsave(sd, &flags);
++ backlog_lock_irq_save(sd, &flags);
+ if (!netif_running(skb->dev))
+ goto drop;
+ qlen = skb_queue_len(&sd->input_pkt_queue);
+@@ -4834,7 +4834,7 @@ static int enqueue_to_backlog(struct sk_
+ enqueue:
+ __skb_queue_tail(&sd->input_pkt_queue, skb);
+ input_queue_tail_incr_save(sd, qtail);
+- rps_unlock_irq_restore(sd, &flags);
++ backlog_unlock_irq_restore(sd, &flags);
+ return NET_RX_SUCCESS;
+ }
+
+@@ -4849,7 +4849,7 @@ static int enqueue_to_backlog(struct sk_
+
+ drop:
+ sd->dropped++;
+- rps_unlock_irq_restore(sd, &flags);
++ backlog_unlock_irq_restore(sd, &flags);
+
+ dev_core_stats_rx_dropped_inc(skb->dev);
+ kfree_skb_reason(skb, reason);
+@@ -5880,7 +5880,7 @@ static void flush_backlog(struct work_st
+ local_bh_disable();
+ sd = this_cpu_ptr(&softnet_data);
+
+- rps_lock_irq_disable(sd);
++ backlog_lock_irq_disable(sd);
+ skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
+ if (skb->dev->reg_state == NETREG_UNREGISTERING) {
+ __skb_unlink(skb, &sd->input_pkt_queue);
+@@ -5888,7 +5888,7 @@ static void flush_backlog(struct work_st
+ input_queue_head_incr(sd);
+ }
+ }
+- rps_unlock_irq_enable(sd);
++ backlog_unlock_irq_enable(sd);
+
+ skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
+ if (skb->dev->reg_state == NETREG_UNREGISTERING) {
+@@ -5906,14 +5906,14 @@ static bool flush_required(int cpu)
+ struct softnet_data *sd = &per_cpu(softnet_data, cpu);
+ bool do_flush;
+
+- rps_lock_irq_disable(sd);
++ backlog_lock_irq_disable(sd);
+
+ /* as insertion into process_queue happens with the rps lock held,
+ * process_queue access may race only with dequeue
+ */
+ do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
+ !skb_queue_empty_lockless(&sd->process_queue);
+- rps_unlock_irq_enable(sd);
++ backlog_unlock_irq_enable(sd);
+
+ return do_flush;
+ #endif
+@@ -6028,7 +6028,7 @@ static int process_backlog(struct napi_s
+
+ }
+
+- rps_lock_irq_disable(sd);
++ backlog_lock_irq_disable(sd);
+ if (skb_queue_empty(&sd->input_pkt_queue)) {
+ /*
+ * Inline a custom version of __napi_complete().
+@@ -6044,7 +6044,7 @@ static int process_backlog(struct napi_s
+ skb_queue_splice_tail_init(&sd->input_pkt_queue,
+ &sd->process_queue);
+ }
+- rps_unlock_irq_enable(sd);
++ backlog_unlock_irq_enable(sd);
+ }
+
+ return work;