diff options
Diffstat (limited to 'debian/patches-rt/0004-net-Use-nested-BH-locking-for-napi_alloc_cache.patch')
-rw-r--r-- | debian/patches-rt/0004-net-Use-nested-BH-locking-for-napi_alloc_cache.patch | 131 |
1 files changed, 131 insertions, 0 deletions
diff --git a/debian/patches-rt/0004-net-Use-nested-BH-locking-for-napi_alloc_cache.patch b/debian/patches-rt/0004-net-Use-nested-BH-locking-for-napi_alloc_cache.patch new file mode 100644 index 0000000000..76e8f4e04c --- /dev/null +++ b/debian/patches-rt/0004-net-Use-nested-BH-locking-for-napi_alloc_cache.patch @@ -0,0 +1,131 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Mon, 23 Oct 2023 17:07:56 +0200 +Subject: [PATCH 04/15] net: Use nested-BH locking for napi_alloc_cache. +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz + +napi_alloc_cache is a per-CPU variable and relies on disabled BH for its +locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT +this data structure requires explicit locking. + +Add a local_lock_t to the data structure and use local_lock_nested_bh() +for locking. This change adds only lockdep coverage and does not alter +the functional behaviour for !PREEMPT_RT. + +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + net/core/skbuff.c | 29 ++++++++++++++++++++++++----- + 1 file changed, 24 insertions(+), 5 deletions(-) + +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -277,6 +277,7 @@ static void *page_frag_alloc_1k(struct p + #endif + + struct napi_alloc_cache { ++ local_lock_t bh_lock; + struct page_frag_cache page; + struct page_frag_1k page_small; + unsigned int skb_count; +@@ -284,7 +285,9 @@ struct napi_alloc_cache { + }; + + static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); +-static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); ++static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache) = { ++ .bh_lock = INIT_LOCAL_LOCK(bh_lock), ++}; + + /* Double check that napi_get_frags() allocates skbs with + * skb->head being backed by slab, not a page fragment. +@@ -306,11 +309,16 @@ void napi_get_frags_check(struct napi_st + void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) + { + struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); ++ void *data; + + fragsz = SKB_DATA_ALIGN(fragsz); + +- return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, ++ local_lock_nested_bh(&napi_alloc_cache.bh_lock); ++ data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, + align_mask); ++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock); ++ return data; ++ + } + EXPORT_SYMBOL(__napi_alloc_frag_align); + +@@ -338,16 +346,20 @@ static struct sk_buff *napi_skb_cache_ge + struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); + struct sk_buff *skb; + ++ local_lock_nested_bh(&napi_alloc_cache.bh_lock); + if (unlikely(!nc->skb_count)) { + nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, + GFP_ATOMIC, + NAPI_SKB_CACHE_BULK, + nc->skb_cache); +- if (unlikely(!nc->skb_count)) ++ if (unlikely(!nc->skb_count)) { ++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock); + return NULL; ++ } + } + + skb = nc->skb_cache[--nc->skb_count]; ++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock); + kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache)); + + return skb; +@@ -740,9 +752,13 @@ struct sk_buff *__netdev_alloc_skb(struc + pfmemalloc = nc->pfmemalloc; + } else { + local_bh_disable(); ++ local_lock_nested_bh(&napi_alloc_cache.bh_lock); ++ + nc = this_cpu_ptr(&napi_alloc_cache.page); + data = page_frag_alloc(nc, len, gfp_mask); + pfmemalloc = nc->pfmemalloc; ++ ++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock); + local_bh_enable(); + } + +@@ -806,11 +822,11 @@ struct sk_buff *napi_alloc_skb(struct na + goto skb_success; + } + +- nc = this_cpu_ptr(&napi_alloc_cache); +- + if (sk_memalloc_socks()) + gfp_mask |= __GFP_MEMALLOC; + ++ local_lock_nested_bh(&napi_alloc_cache.bh_lock); ++ nc = this_cpu_ptr(&napi_alloc_cache); + if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) { + /* we are artificially inflating the allocation size, but + * that is not as bad as it may look like, as: +@@ -832,6 +848,7 @@ struct sk_buff *napi_alloc_skb(struct na + data = page_frag_alloc(&nc->page, len, gfp_mask); + pfmemalloc = nc->page.pfmemalloc; + } ++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock); + + if (unlikely(!data)) + return NULL; +@@ -1429,6 +1446,7 @@ static void napi_skb_cache_put(struct sk + if (!kasan_mempool_poison_object(skb)) + return; + ++ local_lock_nested_bh(&napi_alloc_cache.bh_lock); + nc->skb_cache[nc->skb_count++] = skb; + + if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { +@@ -1440,6 +1458,7 @@ static void napi_skb_cache_put(struct sk + nc->skb_cache + NAPI_SKB_CACHE_HALF); + nc->skb_count = NAPI_SKB_CACHE_HALF; + } ++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock); + } + + void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) |