summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0344-net-Add-missing-xmit_lock_owner-hunks.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
commit08b74a000942a380fe028845f92cd3a0dee827d5 (patch)
treeaa78b4e12607c3e1fcce8d5cc42df4330792f118 /debian/patches-rt/0344-net-Add-missing-xmit_lock_owner-hunks.patch
parentAdding upstream version 4.19.249. (diff)
downloadlinux-08b74a000942a380fe028845f92cd3a0dee827d5.tar.xz
linux-08b74a000942a380fe028845f92cd3a0dee827d5.zip
Adding debian version 4.19.249-2.debian/4.19.249-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0344-net-Add-missing-xmit_lock_owner-hunks.patch')
-rw-r--r--debian/patches-rt/0344-net-Add-missing-xmit_lock_owner-hunks.patch117
1 files changed, 117 insertions, 0 deletions
diff --git a/debian/patches-rt/0344-net-Add-missing-xmit_lock_owner-hunks.patch b/debian/patches-rt/0344-net-Add-missing-xmit_lock_owner-hunks.patch
new file mode 100644
index 000000000..467bb1739
--- /dev/null
+++ b/debian/patches-rt/0344-net-Add-missing-xmit_lock_owner-hunks.patch
@@ -0,0 +1,117 @@
+From 1a0157a8d087c0c5bdf88ad97e2d4db77bee63aa Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 22 Dec 2021 20:35:22 +0100
+Subject: [PATCH 344/347] net: Add missing xmit_lock_owner hunks.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+The patch
+ net: move xmit_recursion to per-task variable on -RT
+
+lost a few hunks during its rebase.
+
+Add the `xmit_lock_owner' accessor/wrapper.
+
+Reported-by: Salvatore Bonaccorso <carnil@debian.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/netdevice.h | 29 +++++++++++++----------------
+ 1 file changed, 13 insertions(+), 16 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index c4713217b20e..bd73117e2636 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3885,17 +3885,17 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
+ {
+- txq->xmit_lock_owner = current;
++ WRITE_ONCE(txq->xmit_lock_owner, current);
+ }
+
+ static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
+ {
+- txq->xmit_lock_owner = NULL;
++ WRITE_ONCE(txq->xmit_lock_owner, NULL);
+ }
+
+ static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
+ {
+- if (txq->xmit_lock_owner != NULL)
++ if (READ_ONCE(txq->xmit_lock_owner) != NULL)
+ return true;
+ return false;
+ }
+@@ -3904,17 +3904,19 @@ static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
+
+ static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
+ {
+- txq->xmit_lock_owner = cpu;
++ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
++ WRITE_ONCE(txq->xmit_lock_owner, cpu);
+ }
+
+ static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
+ {
+- txq->xmit_lock_owner = -1;
++ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
++ WRITE_ONCE(txq->xmit_lock_owner, -1);
+ }
+
+ static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
+ {
+- if (txq->xmit_lock_owner != -1)
++ if (READ_ONCE(txq->xmit_lock_owner) != -1)
+ return true;
+ return false;
+ }
+@@ -3923,8 +3925,7 @@ static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
+ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
+ {
+ spin_lock(&txq->_xmit_lock);
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+- WRITE_ONCE(txq->xmit_lock_owner, cpu);
++ netdev_queue_set_owner(txq, cpu);
+ }
+
+ static inline bool __netif_tx_acquire(struct netdev_queue *txq)
+@@ -3941,8 +3942,7 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
+ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
+ {
+ spin_lock_bh(&txq->_xmit_lock);
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+- WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
++ netdev_queue_set_owner(txq, smp_processor_id());
+ }
+
+ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
+@@ -3950,23 +3950,20 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
+ bool ok = spin_trylock(&txq->_xmit_lock);
+
+ if (likely(ok)) {
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+- WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
++ netdev_queue_set_owner(txq, smp_processor_id());
+ }
+ return ok;
+ }
+
+ static inline void __netif_tx_unlock(struct netdev_queue *txq)
+ {
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+- WRITE_ONCE(txq->xmit_lock_owner, -1);
++ netdev_queue_clear_owner(txq);
+ spin_unlock(&txq->_xmit_lock);
+ }
+
+ static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
+ {
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+- WRITE_ONCE(txq->xmit_lock_owner, -1);
++ netdev_queue_clear_owner(txq);
+ spin_unlock_bh(&txq->_xmit_lock);
+ }
+
+--
+2.36.1
+