From 9f0fc191371843c4fc000a226b0a26b6c059aacd Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 19:40:19 +0200 Subject: Merging upstream version 6.7.7. Signed-off-by: Daniel Baumann --- net/core/skbuff.c | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 61 insertions(+), 5 deletions(-) (limited to 'net/core/skbuff.c') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 011d690291..94cc40a6f7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -62,6 +62,7 @@ #include #include #include +#include #include #include @@ -847,6 +848,8 @@ EXPORT_SYMBOL(__napi_alloc_skb); void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, int size, unsigned int truesize) { + DEBUG_NET_WARN_ON_ONCE(size > truesize); + skb_fill_page_desc(skb, i, page, off, size); skb->len += size; skb->data_len += size; @@ -859,6 +862,8 @@ void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + DEBUG_NET_WARN_ON_ONCE(size > truesize); + skb_frag_size_add(frag, size); skb->len += size; skb->data_len += size; @@ -3718,10 +3723,19 @@ EXPORT_SYMBOL(skb_dequeue_tail); void skb_queue_purge_reason(struct sk_buff_head *list, enum skb_drop_reason reason) { - struct sk_buff *skb; + struct sk_buff_head tmp; + unsigned long flags; - while ((skb = skb_dequeue(list)) != NULL) - kfree_skb_reason(skb, reason); + if (skb_queue_empty_lockless(list)) + return; + + __skb_queue_head_init(&tmp); + + spin_lock_irqsave(&list->lock, flags); + skb_queue_splice_init(list, &tmp); + spin_unlock_irqrestore(&list->lock, flags); + + __skb_queue_purge_reason(&tmp, reason); } EXPORT_SYMBOL(skb_queue_purge_reason); @@ -4510,7 +4524,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, * now. * Cap len to not accidentally hit GSO_BY_FRAGS. */ - partial_segs = min(len, GSO_BY_FRAGS - 1U) / mss; + partial_segs = min(len, GSO_BY_FRAGS - 1) / mss; if (partial_segs > 1) mss *= partial_segs; else @@ -5153,6 +5167,9 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk) bool icmp_next = false; unsigned long flags; + if (skb_queue_empty_lockless(q)) + return NULL; + spin_lock_irqsave(&q->lock, flags); skb = __skb_dequeue(q); if (skb && (skb_next = skb_peek(q))) { @@ -5752,7 +5769,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, /* In general, avoid mixing page_pool and non-page_pool allocated * pages within the same SKB. Additionally avoid dealing with clones * with page_pool pages, in case the SKB is using page_pool fragment - * references (PP_FLAG_PAGE_FRAG). Since we only take full page + * references (page_pool_alloc_frag()). Since we only take full page * references for cloned SKBs at the moment that would result in * inconsistent reference counts. * In theory we could take full references if @from is cloned and @@ -6935,3 +6952,42 @@ out: return spliced ?: ret; } EXPORT_SYMBOL(skb_splice_from_iter); + +static __always_inline +size_t memcpy_from_iter_csum(void *iter_from, size_t progress, + size_t len, void *to, void *priv2) +{ + __wsum *csum = priv2; + __wsum next = csum_partial_copy_nocheck(iter_from, to + progress, len); + + *csum = csum_block_add(*csum, next, progress); + return 0; +} + +static __always_inline +size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress, + size_t len, void *to, void *priv2) +{ + __wsum next, *csum = priv2; + + next = csum_and_copy_from_user(iter_from, to + progress, len); + *csum = csum_block_add(*csum, next, progress); + return next ? 0 : len; +} + +bool csum_and_copy_from_iter_full(void *addr, size_t bytes, + __wsum *csum, struct iov_iter *i) +{ + size_t copied; + + if (WARN_ON_ONCE(!i->data_source)) + return false; + copied = iterate_and_advance2(i, bytes, addr, csum, + copy_from_user_iter_csum, + memcpy_from_iter_csum); + if (likely(copied == bytes)) + return true; + iov_iter_revert(i, copied); + return false; +} +EXPORT_SYMBOL(csum_and_copy_from_iter_full); -- cgit v1.2.3