diff options
Diffstat (limited to 'debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch')
-rw-r--r-- | debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch b/debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch index 70de5b94e2..c40d93a753 100644 --- a/debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch +++ b/debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Sat, 9 Mar 2024 10:05:12 +0100 +Date: Mon, 25 Mar 2024 08:40:31 +0100 Subject: [PATCH 4/4] net: Rename rps_lock to backlog_lock. -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz The rps_lock.*() functions use the inner lock of a sk_buff_head for locking. This lock is used if RPS is enabled, otherwise the list is @@ -19,7 +19,7 @@ Rename the rps_lock*() functions to backlog_lock*(). Suggested-by: Jakub Kicinski <kuba@kernel.org> Acked-by: Jakub Kicinski <kuba@kernel.org> -Link: https://lore.kernel.org/r/20240309090824.2956805-5-bigeasy@linutronix.de +Link: https://lore.kernel.org/r/20240325074943.289909-5-bigeasy@linutronix.de Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- net/core/dev.c | 34 +++++++++++++++++----------------- @@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -242,8 +242,8 @@ static bool use_backlog_threads(void) +@@ -223,8 +223,8 @@ static bool use_backlog_threads(void) #endif @@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags); -@@ -251,7 +251,7 @@ static inline void rps_lock_irqsave(stru +@@ -232,7 +232,7 @@ static inline void rps_lock_irqsave(stru local_irq_save(*flags); } @@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) spin_lock_irq(&sd->input_pkt_queue.lock); -@@ -259,8 +259,8 @@ static inline void rps_lock_irq_disable( +@@ -240,8 +240,8 @@ static inline void rps_lock_irq_disable( local_irq_disable(); } @@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags); -@@ -268,7 +268,7 @@ static inline void rps_unlock_irq_restor +@@ -249,7 +249,7 @@ static inline void rps_unlock_irq_restor local_irq_restore(*flags); } @@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) spin_unlock_irq(&sd->input_pkt_queue.lock); -@@ -4758,12 +4758,12 @@ void kick_defer_list_purge(struct softne +@@ -4733,12 +4733,12 @@ void kick_defer_list_purge(struct softne unsigned long flags; if (use_backlog_threads()) { @@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) { smp_call_function_single_async(cpu, &sd->defer_csd); -@@ -4825,7 +4825,7 @@ static int enqueue_to_backlog(struct sk_ +@@ -4800,7 +4800,7 @@ static int enqueue_to_backlog(struct sk_ reason = SKB_DROP_REASON_NOT_SPECIFIED; sd = &per_cpu(softnet_data, cpu); @@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!netif_running(skb->dev)) goto drop; qlen = skb_queue_len(&sd->input_pkt_queue); -@@ -4834,7 +4834,7 @@ static int enqueue_to_backlog(struct sk_ +@@ -4810,7 +4810,7 @@ static int enqueue_to_backlog(struct sk_ enqueue: __skb_queue_tail(&sd->input_pkt_queue, skb); input_queue_tail_incr_save(sd, qtail); @@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return NET_RX_SUCCESS; } -@@ -4849,7 +4849,7 @@ static int enqueue_to_backlog(struct sk_ +@@ -4825,7 +4825,7 @@ static int enqueue_to_backlog(struct sk_ drop: sd->dropped++; @@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> dev_core_stats_rx_dropped_inc(skb->dev); kfree_skb_reason(skb, reason); -@@ -5880,7 +5880,7 @@ static void flush_backlog(struct work_st +@@ -5891,7 +5891,7 @@ static void flush_backlog(struct work_st local_bh_disable(); sd = this_cpu_ptr(&softnet_data); @@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->input_pkt_queue); -@@ -5888,7 +5888,7 @@ static void flush_backlog(struct work_st +@@ -5899,7 +5899,7 @@ static void flush_backlog(struct work_st input_queue_head_incr(sd); } } @@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> skb_queue_walk_safe(&sd->process_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { -@@ -5906,14 +5906,14 @@ static bool flush_required(int cpu) +@@ -5917,14 +5917,14 @@ static bool flush_required(int cpu) struct softnet_data *sd = &per_cpu(softnet_data, cpu); bool do_flush; @@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return do_flush; #endif -@@ -6028,7 +6028,7 @@ static int process_backlog(struct napi_s +@@ -6039,7 +6039,7 @@ static int process_backlog(struct napi_s } @@ -153,7 +153,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (skb_queue_empty(&sd->input_pkt_queue)) { /* * Inline a custom version of __napi_complete(). -@@ -6044,7 +6044,7 @@ static int process_backlog(struct napi_s +@@ -6055,7 +6055,7 @@ static int process_backlog(struct napi_s skb_queue_splice_tail_init(&sd->input_pkt_queue, &sd->process_queue); } |