summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0339-net-Add-missing-xmit_lock_owner-hunks.patch
blob: 8fc335457624958a311a708c23b60d6152eca150 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 22 Dec 2021 20:35:22 +0100
Subject: [PATCH 339/351] net: Add missing xmit_lock_owner hunks.
Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=42062a2e27ad3a9da95ef7c4c799c1ca71a6bd66

The patch
	net: move xmit_recursion to per-task variable on -RT

lost a few hunks during its rebase.

Add the `xmit_lock_owner' accessor/wrapper.

Reported-by: Salvatore Bonaccorso <carnil@debian.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 include/linux/netdevice.h | 29 +++++++++++++----------------
 1 file changed, 13 insertions(+), 16 deletions(-)

diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c4713217b20e..bd73117e2636 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3885,17 +3885,17 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
 #ifdef CONFIG_PREEMPT_RT_FULL
 static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
 {
-	txq->xmit_lock_owner = current;
+	WRITE_ONCE(txq->xmit_lock_owner, current);
 }
 
 static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
 {
-	txq->xmit_lock_owner = NULL;
+	WRITE_ONCE(txq->xmit_lock_owner, NULL);
 }
 
 static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
 {
-	if (txq->xmit_lock_owner != NULL)
+	if (READ_ONCE(txq->xmit_lock_owner) != NULL)
 		return true;
 	return false;
 }
@@ -3904,17 +3904,19 @@ static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
 
 static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
 {
-	txq->xmit_lock_owner = cpu;
+	/* Pairs with READ_ONCE() in __dev_queue_xmit() */
+	WRITE_ONCE(txq->xmit_lock_owner, cpu);
 }
 
 static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
 {
-	txq->xmit_lock_owner = -1;
+	/* Pairs with READ_ONCE() in __dev_queue_xmit() */
+	WRITE_ONCE(txq->xmit_lock_owner, -1);
 }
 
 static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
 {
-	if (txq->xmit_lock_owner != -1)
+	if (READ_ONCE(txq->xmit_lock_owner) != -1)
 		return true;
 	return false;
 }
@@ -3923,8 +3925,7 @@ static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
 {
 	spin_lock(&txq->_xmit_lock);
-	/* Pairs with READ_ONCE() in __dev_queue_xmit() */
-	WRITE_ONCE(txq->xmit_lock_owner, cpu);
+	netdev_queue_set_owner(txq, cpu);
 }
 
 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
@@ -3941,8 +3942,7 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
 {
 	spin_lock_bh(&txq->_xmit_lock);
-	/* Pairs with READ_ONCE() in __dev_queue_xmit() */
-	WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+	netdev_queue_set_owner(txq, smp_processor_id());
 }
 
 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
@@ -3950,23 +3950,20 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
 	bool ok = spin_trylock(&txq->_xmit_lock);
 
 	if (likely(ok)) {
-		/* Pairs with READ_ONCE() in __dev_queue_xmit() */
-		WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+		netdev_queue_set_owner(txq, smp_processor_id());
 	}
 	return ok;
 }
 
 static inline void __netif_tx_unlock(struct netdev_queue *txq)
 {
-	/* Pairs with READ_ONCE() in __dev_queue_xmit() */
-	WRITE_ONCE(txq->xmit_lock_owner, -1);
+	netdev_queue_clear_owner(txq);
 	spin_unlock(&txq->_xmit_lock);
 }
 
 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
 {
-	/* Pairs with READ_ONCE() in __dev_queue_xmit() */
-	WRITE_ONCE(txq->xmit_lock_owner, -1);
+	netdev_queue_clear_owner(txq);
 	spin_unlock_bh(&txq->_xmit_lock);
 }