summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0197-net-Use-skbufhead-with-raw-lock.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0197-net-Use-skbufhead-with-raw-lock.patch')
-rw-r--r--debian/patches-rt/0197-net-Use-skbufhead-with-raw-lock.patch148
1 files changed, 148 insertions, 0 deletions
diff --git a/debian/patches-rt/0197-net-Use-skbufhead-with-raw-lock.patch b/debian/patches-rt/0197-net-Use-skbufhead-with-raw-lock.patch
new file mode 100644
index 000000000..f54ab15b6
--- /dev/null
+++ b/debian/patches-rt/0197-net-Use-skbufhead-with-raw-lock.patch
@@ -0,0 +1,148 @@
+From 8c4af88efcc296bc2daa2be90fbea82190799883 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 12 Jul 2011 15:38:34 +0200
+Subject: [PATCH 197/347] net: Use skbufhead with raw lock
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+Use the rps lock as rawlock so we can keep irq-off regions. It looks low
+latency. However we can't kfree() from this context therefore we defer this
+to the softirq and use the tofree_queue list for it (similar to process_queue).
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/netdevice.h | 1 +
+ include/linux/skbuff.h | 7 +++++++
+ net/core/dev.c | 26 ++++++++++++++++++++------
+ 3 files changed, 28 insertions(+), 6 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 8d48b352ee74..d893dc112afc 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2990,6 +2990,7 @@ struct softnet_data {
+ unsigned int dropped;
+ struct sk_buff_head input_pkt_queue;
+ struct napi_struct backlog;
++ struct sk_buff_head tofree_queue;
+
+ };
+
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index f97734f34746..3ede4f0eac10 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -287,6 +287,7 @@ struct sk_buff_head {
+
+ __u32 qlen;
+ spinlock_t lock;
++ raw_spinlock_t raw_lock;
+ };
+
+ struct sk_buff;
+@@ -1735,6 +1736,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
+ __skb_queue_head_init(list);
+ }
+
++static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
++{
++ raw_spin_lock_init(&list->raw_lock);
++ __skb_queue_head_init(list);
++}
++
+ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
+ struct lock_class_key *class)
+ {
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 315bdaf00ac8..c279375fa5b9 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -219,14 +219,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
+ static inline void rps_lock(struct softnet_data *sd)
+ {
+ #ifdef CONFIG_RPS
+- spin_lock(&sd->input_pkt_queue.lock);
++ raw_spin_lock(&sd->input_pkt_queue.raw_lock);
+ #endif
+ }
+
+ static inline void rps_unlock(struct softnet_data *sd)
+ {
+ #ifdef CONFIG_RPS
+- spin_unlock(&sd->input_pkt_queue.lock);
++ raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
+ #endif
+ }
+
+@@ -5858,7 +5858,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
+ while (again) {
+ struct sk_buff *skb;
+
++ local_irq_disable();
+ while ((skb = __skb_dequeue(&sd->process_queue))) {
++ local_irq_enable();
+ rcu_read_lock();
+ __netif_receive_skb(skb);
+ rcu_read_unlock();
+@@ -5866,9 +5868,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
+ if (++work >= quota)
+ return work;
+
++ local_irq_disable();
+ }
+
+- local_irq_disable();
+ rps_lock(sd);
+ if (skb_queue_empty(&sd->input_pkt_queue)) {
+ /*
+@@ -6341,13 +6343,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
+ unsigned long time_limit = jiffies +
+ usecs_to_jiffies(netdev_budget_usecs);
+ int budget = netdev_budget;
++ struct sk_buff_head tofree_q;
++ struct sk_buff *skb;
+ LIST_HEAD(list);
+ LIST_HEAD(repoll);
+
++ __skb_queue_head_init(&tofree_q);
++
+ local_irq_disable();
++ skb_queue_splice_init(&sd->tofree_queue, &tofree_q);
+ list_splice_init(&sd->poll_list, &list);
+ local_irq_enable();
+
++ while ((skb = __skb_dequeue(&tofree_q)))
++ kfree_skb(skb);
++
+ for (;;) {
+ struct napi_struct *n;
+
+@@ -9545,10 +9555,13 @@ static int dev_cpu_dead(unsigned int oldcpu)
+ netif_rx_ni(skb);
+ input_queue_head_incr(oldsd);
+ }
+- while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
++ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
+ netif_rx_ni(skb);
+ input_queue_head_incr(oldsd);
+ }
++ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
++ kfree_skb(skb);
++ }
+
+ return 0;
+ }
+@@ -9859,8 +9872,9 @@ static int __init net_dev_init(void)
+
+ INIT_WORK(flush, flush_backlog);
+
+- skb_queue_head_init(&sd->input_pkt_queue);
+- skb_queue_head_init(&sd->process_queue);
++ skb_queue_head_init_raw(&sd->input_pkt_queue);
++ skb_queue_head_init_raw(&sd->process_queue);
++ skb_queue_head_init_raw(&sd->tofree_queue);
+ #ifdef CONFIG_XFRM_OFFLOAD
+ skb_queue_head_init(&sd->xfrm_backlog);
+ #endif
+--
+2.36.1
+