summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0233-net-core-protect-users-of-napi_alloc_cache-against-r.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0233-net-core-protect-users-of-napi_alloc_cache-against-r.patch')
-rw-r--r--debian/patches-rt/0233-net-core-protect-users-of-napi_alloc_cache-against-r.patch115
1 files changed, 115 insertions, 0 deletions
diff --git a/debian/patches-rt/0233-net-core-protect-users-of-napi_alloc_cache-against-r.patch b/debian/patches-rt/0233-net-core-protect-users-of-napi_alloc_cache-against-r.patch
new file mode 100644
index 000000000..58878036d
--- /dev/null
+++ b/debian/patches-rt/0233-net-core-protect-users-of-napi_alloc_cache-against-r.patch
@@ -0,0 +1,115 @@
+From e8e3e8f9d02d605ac0820c51015b248d4c4a0873 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 15 Jan 2016 16:33:34 +0100
+Subject: [PATCH 233/347] net/core: protect users of napi_alloc_cache against
+ reentrance
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+On -RT the code running in BH can not be moved to another CPU so CPU
+local variable remain local. However the code can be preempted
+and another task may enter BH accessing the same CPU using the same
+napi_alloc_cache variable.
+This patch ensures that each user of napi_alloc_cache uses a local lock.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/skbuff.c | 23 ++++++++++++++++++-----
+ 1 file changed, 18 insertions(+), 5 deletions(-)
+
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 0696d3cb809d..58f78205585c 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -332,6 +332,7 @@ struct napi_alloc_cache {
+ static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
+ static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
+ static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
++static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock);
+
+ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
+ {
+@@ -363,9 +364,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
+
+ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
+ {
+- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
++ struct napi_alloc_cache *nc;
++ void *data;
+
+- return page_frag_alloc(&nc->page, fragsz, gfp_mask);
++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
++ data = page_frag_alloc(&nc->page, fragsz, gfp_mask);
++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
++ return data;
+ }
+
+ void *napi_alloc_frag(unsigned int fragsz)
+@@ -468,6 +473,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
+ struct napi_alloc_cache *nc;
+ struct sk_buff *skb;
+ void *data;
++ bool pfmemalloc;
+
+ len += NET_SKB_PAD + NET_IP_ALIGN;
+
+@@ -490,7 +496,10 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
+ if (sk_memalloc_socks())
+ gfp_mask |= __GFP_MEMALLOC;
+
++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ data = page_frag_alloc(&nc->page, len, gfp_mask);
++ pfmemalloc = nc->page.pfmemalloc;
++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ if (unlikely(!data))
+ return NULL;
+
+@@ -501,7 +510,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
+ }
+
+ /* use OR instead of assignment to avoid clearing of bits in mask */
+- if (nc->page.pfmemalloc)
++ if (pfmemalloc)
+ skb->pfmemalloc = 1;
+ skb->head_frag = 1;
+
+@@ -733,23 +742,26 @@ void __consume_stateless_skb(struct sk_buff *skb)
+
+ void __kfree_skb_flush(void)
+ {
+- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
++ struct napi_alloc_cache *nc;
+
++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ /* flush skb_cache if containing objects */
+ if (nc->skb_count) {
+ kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
+ nc->skb_cache);
+ nc->skb_count = 0;
+ }
++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ }
+
+ static inline void _kfree_skb_defer(struct sk_buff *skb)
+ {
+- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
++ struct napi_alloc_cache *nc;
+
+ /* drop skb->head and call any destructors for packet */
+ skb_release_all(skb);
+
++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ /* record skb to CPU local list */
+ nc->skb_cache[nc->skb_count++] = skb;
+
+@@ -764,6 +776,7 @@ static inline void _kfree_skb_defer(struct sk_buff *skb)
+ nc->skb_cache);
+ nc->skb_count = 0;
+ }
++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ }
+ void __kfree_skb_defer(struct sk_buff *skb)
+ {
+--
+2.36.1
+