summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0339-net-Add-missing-xmit_lock_owner-hunks.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-08 03:21:37 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-08 03:21:37 +0000
commit06343b27411344fc542f4f3a643f8441aa35252d (patch)
tree66aa45187c93c350bbdf7e6ae4467a70bf3a8f4c /debian/patches-rt/0339-net-Add-missing-xmit_lock_owner-hunks.patch
parentMerging upstream version 4.19.260. (diff)
downloadlinux-06343b27411344fc542f4f3a643f8441aa35252d.tar.xz
linux-06343b27411344fc542f4f3a643f8441aa35252d.zip
Adding debian version 4.19.260-1.debian/4.19.260-1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0339-net-Add-missing-xmit_lock_owner-hunks.patch')
-rw-r--r--debian/patches-rt/0339-net-Add-missing-xmit_lock_owner-hunks.patch113
1 files changed, 113 insertions, 0 deletions
diff --git a/debian/patches-rt/0339-net-Add-missing-xmit_lock_owner-hunks.patch b/debian/patches-rt/0339-net-Add-missing-xmit_lock_owner-hunks.patch
new file mode 100644
index 000000000..c5bf85313
--- /dev/null
+++ b/debian/patches-rt/0339-net-Add-missing-xmit_lock_owner-hunks.patch
@@ -0,0 +1,113 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 22 Dec 2021 20:35:22 +0100
+Subject: [PATCH 339/342] net: Add missing xmit_lock_owner hunks.
+Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f9ce408f7c095f80030cfb2f6671eab84a42e643
+
+The patch
+ net: move xmit_recursion to per-task variable on -RT
+
+lost a few hunks during its rebase.
+
+Add the `xmit_lock_owner' accessor/wrapper.
+
+Reported-by: Salvatore Bonaccorso <carnil@debian.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/netdevice.h | 29 +++++++++++++----------------
+ 1 file changed, 13 insertions(+), 16 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index c4713217b20e..bd73117e2636 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3885,17 +3885,17 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
+ {
+- txq->xmit_lock_owner = current;
++ WRITE_ONCE(txq->xmit_lock_owner, current);
+ }
+
+ static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
+ {
+- txq->xmit_lock_owner = NULL;
++ WRITE_ONCE(txq->xmit_lock_owner, NULL);
+ }
+
+ static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
+ {
+- if (txq->xmit_lock_owner != NULL)
++ if (READ_ONCE(txq->xmit_lock_owner) != NULL)
+ return true;
+ return false;
+ }
+@@ -3904,17 +3904,19 @@ static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
+
+ static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
+ {
+- txq->xmit_lock_owner = cpu;
++ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
++ WRITE_ONCE(txq->xmit_lock_owner, cpu);
+ }
+
+ static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
+ {
+- txq->xmit_lock_owner = -1;
++ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
++ WRITE_ONCE(txq->xmit_lock_owner, -1);
+ }
+
+ static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
+ {
+- if (txq->xmit_lock_owner != -1)
++ if (READ_ONCE(txq->xmit_lock_owner) != -1)
+ return true;
+ return false;
+ }
+@@ -3923,8 +3925,7 @@ static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
+ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
+ {
+ spin_lock(&txq->_xmit_lock);
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+- WRITE_ONCE(txq->xmit_lock_owner, cpu);
++ netdev_queue_set_owner(txq, cpu);
+ }
+
+ static inline bool __netif_tx_acquire(struct netdev_queue *txq)
+@@ -3941,8 +3942,7 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
+ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
+ {
+ spin_lock_bh(&txq->_xmit_lock);
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+- WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
++ netdev_queue_set_owner(txq, smp_processor_id());
+ }
+
+ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
+@@ -3950,23 +3950,20 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
+ bool ok = spin_trylock(&txq->_xmit_lock);
+
+ if (likely(ok)) {
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+- WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
++ netdev_queue_set_owner(txq, smp_processor_id());
+ }
+ return ok;
+ }
+
+ static inline void __netif_tx_unlock(struct netdev_queue *txq)
+ {
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+- WRITE_ONCE(txq->xmit_lock_owner, -1);
++ netdev_queue_clear_owner(txq);
+ spin_unlock(&txq->_xmit_lock);
+ }
+
+ static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
+ {
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+- WRITE_ONCE(txq->xmit_lock_owner, -1);
++ netdev_queue_clear_owner(txq);
+ spin_unlock_bh(&txq->_xmit_lock);
+ }
+