From 85c675d0d09a45a135bddd15d7b385f8758c32fb Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 19:35:05 +0200 Subject: Adding upstream version 6.7.7. Signed-off-by: Daniel Baumann --- drivers/net/ethernet/pensando/ionic/ionic_txrx.c | 77 +++++++++++++++++++++--- 1 file changed, 68 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet/pensando/ionic/ionic_txrx.c') diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 44466e8c5d..ccc1b1d407 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -1243,25 +1243,84 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) { struct ionic_tx_stats *stats = q_to_tx_stats(q); + bool too_many_frags = false; + skb_frag_t *frag; + int desc_bufs; + int chunk_len; + int frag_rem; + int tso_rem; + int seg_rem; + bool encap; + int hdrlen; int ndescs; int err; /* Each desc is mss long max, so a descriptor for each gso_seg */ - if (skb_is_gso(skb)) + if (skb_is_gso(skb)) { ndescs = skb_shinfo(skb)->gso_segs; - else + } else { ndescs = 1; + if (skb_shinfo(skb)->nr_frags > q->max_sg_elems) { + too_many_frags = true; + goto linearize; + } + } - /* If non-TSO, just need 1 desc and nr_frags sg elems */ - if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems) + /* If non-TSO, or no frags to check, we're done */ + if (!skb_is_gso(skb) || !skb_shinfo(skb)->nr_frags) return ndescs; - /* Too many frags, so linearize */ - err = skb_linearize(skb); - if (err) - return err; + /* We need to scan the skb to be sure that none of the MTU sized + * packets in the TSO will require more sgs per descriptor than we + * can support. We loop through the frags, add up the lengths for + * a packet, and count the number of sgs used per packet. + */ + tso_rem = skb->len; + frag = skb_shinfo(skb)->frags; + encap = skb->encapsulation; + + /* start with just hdr in first part of first descriptor */ + if (encap) + hdrlen = skb_inner_tcp_all_headers(skb); + else + hdrlen = skb_tcp_all_headers(skb); + seg_rem = min_t(int, tso_rem, hdrlen + skb_shinfo(skb)->gso_size); + frag_rem = hdrlen; + + while (tso_rem > 0) { + desc_bufs = 0; + while (seg_rem > 0) { + desc_bufs++; + + /* We add the +1 because we can take buffers for one + * more than we have SGs: one for the initial desc data + * in addition to the SG segments that might follow. + */ + if (desc_bufs > q->max_sg_elems + 1) { + too_many_frags = true; + goto linearize; + } + + if (frag_rem == 0) { + frag_rem = skb_frag_size(frag); + frag++; + } + chunk_len = min(frag_rem, seg_rem); + frag_rem -= chunk_len; + tso_rem -= chunk_len; + seg_rem -= chunk_len; + } + + seg_rem = min_t(int, tso_rem, skb_shinfo(skb)->gso_size); + } - stats->linearize++; +linearize: + if (too_many_frags) { + err = skb_linearize(skb); + if (err) + return err; + stats->linearize++; + } return ndescs; } -- cgit v1.2.3