diff options
Diffstat (limited to 'drivers/net/ethernet/intel/iavf/iavf_txrx.c')
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_txrx.c | 553 |
1 files changed, 97 insertions, 456 deletions
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index b71484c87a..26b424fd67 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -2,6 +2,7 @@ /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/bitfield.h> +#include <linux/net/intel/libie/rx.h> #include <linux/prefetch.h> #include "iavf.h" @@ -184,7 +185,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi) * pending work. */ packets = tx_ring->stats.packets & INT_MAX; - if (tx_ring->tx_stats.prev_pkt_ctr == packets) { + if (tx_ring->prev_pkt_ctr == packets) { iavf_force_wb(vsi, tx_ring->q_vector); continue; } @@ -193,7 +194,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi) * to iavf_get_tx_pending() */ smp_rmb(); - tx_ring->tx_stats.prev_pkt_ctr = + tx_ring->prev_pkt_ctr = iavf_get_tx_pending(tx_ring, true) ? packets : -1; } } @@ -319,7 +320,7 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi, ((j / WB_STRIDE) == 0) && (j > 0) && !test_bit(__IAVF_VSI_DOWN, vsi->state) && (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count)) - tx_ring->arm_wb = true; + tx_ring->flags |= IAVF_TXR_FLAGS_ARM_WB; } /* notify netdev of completed buffers */ @@ -674,7 +675,7 @@ int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - tx_ring->tx_stats.prev_pkt_ctr = -1; + tx_ring->prev_pkt_ctr = -1; return 0; err: @@ -689,11 +690,8 @@ err: **/ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring) { - unsigned long bi_size; - u16 i; - /* ring already cleared, nothing to do */ - if (!rx_ring->rx_bi) + if (!rx_ring->rx_fqes) return; if (rx_ring->skb) { @@ -701,41 +699,16 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring) rx_ring->skb = NULL; } - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; + /* Free all the Rx ring buffers */ + for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) { + const struct libeth_fqe *rx_fqes = &rx_ring->rx_fqes[i]; - if (!rx_bi->page) - continue; + page_pool_put_full_page(rx_ring->pp, rx_fqes->page, false); - /* Invalidate cache lines that may have been written to by - * device so that we avoid corrupting memory. - */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_bi->dma, - rx_bi->page_offset, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - - /* free resources associated with mapping */ - dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, - iavf_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IAVF_RX_DMA_ATTR); - - __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); - - rx_bi->page = NULL; - rx_bi->page_offset = 0; + if (unlikely(++i == rx_ring->count)) + i = 0; } - bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; - memset(rx_ring->rx_bi, 0, bi_size); - - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - - rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } @@ -748,15 +721,22 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring) **/ void iavf_free_rx_resources(struct iavf_ring *rx_ring) { + struct libeth_fq fq = { + .fqes = rx_ring->rx_fqes, + .pp = rx_ring->pp, + }; + iavf_clean_rx_ring(rx_ring); - kfree(rx_ring->rx_bi); - rx_ring->rx_bi = NULL; if (rx_ring->desc) { - dma_free_coherent(rx_ring->dev, rx_ring->size, + dma_free_coherent(rx_ring->pp->p.dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } + + libeth_rx_fq_destroy(&fq); + rx_ring->rx_fqes = NULL; + rx_ring->pp = NULL; } /** @@ -767,38 +747,46 @@ void iavf_free_rx_resources(struct iavf_ring *rx_ring) **/ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) { - struct device *dev = rx_ring->dev; - int bi_size; - - /* warn if we are about to overwrite the pointer */ - WARN_ON(rx_ring->rx_bi); - bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; - rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); - if (!rx_ring->rx_bi) - goto err; + struct libeth_fq fq = { + .count = rx_ring->count, + .buf_len = LIBIE_MAX_RX_BUF_LEN, + .nid = NUMA_NO_NODE, + }; + int ret; + + ret = libeth_rx_fq_create(&fq, &rx_ring->q_vector->napi); + if (ret) + return ret; + + rx_ring->pp = fq.pp; + rx_ring->rx_fqes = fq.fqes; + rx_ring->truesize = fq.truesize; + rx_ring->rx_buf_len = fq.buf_len; u64_stats_init(&rx_ring->syncp); /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); - rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { - dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", + dev_info(fq.pp->p.dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", rx_ring->size); goto err; } - rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; return 0; + err: - kfree(rx_ring->rx_bi); - rx_ring->rx_bi = NULL; + libeth_rx_fq_destroy(&fq); + rx_ring->rx_fqes = NULL; + rx_ring->pp = NULL; + return -ENOMEM; } @@ -811,9 +799,6 @@ static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; - /* update next to alloc since we have filled the ring */ - rx_ring->next_to_alloc = val; - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -824,69 +809,6 @@ static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) } /** - * iavf_rx_offset - Return expected offset into page to access data - * @rx_ring: Ring we are requesting offset of - * - * Returns the offset value for ring into the data buffer. - */ -static unsigned int iavf_rx_offset(struct iavf_ring *rx_ring) -{ - return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0; -} - -/** - * iavf_alloc_mapped_page - recycle or make a new page - * @rx_ring: ring to use - * @bi: rx_buffer struct to modify - * - * Returns true if the page was successfully allocated or - * reused. - **/ -static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *bi) -{ - struct page *page = bi->page; - dma_addr_t dma; - - /* since we are recycling buffers we should seldom need to alloc */ - if (likely(page)) { - rx_ring->rx_stats.page_reuse_count++; - return true; - } - - /* alloc new page for storage */ - page = dev_alloc_pages(iavf_rx_pg_order(rx_ring)); - if (unlikely(!page)) { - rx_ring->rx_stats.alloc_page_failed++; - return false; - } - - /* map page for use */ - dma = dma_map_page_attrs(rx_ring->dev, page, 0, - iavf_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IAVF_RX_DMA_ATTR); - - /* if mapping failed free memory back to system since - * there isn't much point in holding memory we can't use - */ - if (dma_mapping_error(rx_ring->dev, dma)) { - __free_pages(page, iavf_rx_pg_order(rx_ring)); - rx_ring->rx_stats.alloc_page_failed++; - return false; - } - - bi->dma = dma; - bi->page = page; - bi->page_offset = iavf_rx_offset(rx_ring); - - /* initialize pagecnt_bias to 1 representing we fully own page */ - bi->pagecnt_bias = 1; - - return true; -} - -/** * iavf_receive_skb - Send a completed packet up the stack * @rx_ring: rx ring in play * @skb: packet to send up @@ -916,38 +838,37 @@ static void iavf_receive_skb(struct iavf_ring *rx_ring, **/ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) { + const struct libeth_fq_fp fq = { + .pp = rx_ring->pp, + .fqes = rx_ring->rx_fqes, + .truesize = rx_ring->truesize, + .count = rx_ring->count, + }; u16 ntu = rx_ring->next_to_use; union iavf_rx_desc *rx_desc; - struct iavf_rx_buffer *bi; /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) return false; rx_desc = IAVF_RX_DESC(rx_ring, ntu); - bi = &rx_ring->rx_bi[ntu]; do { - if (!iavf_alloc_mapped_page(rx_ring, bi)) - goto no_buffers; + dma_addr_t addr; - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, bi->dma, - bi->page_offset, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); + addr = libeth_rx_alloc(&fq, ntu); + if (addr == DMA_MAPPING_ERROR) + goto no_buffers; /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + rx_desc->read.pkt_addr = cpu_to_le64(addr); rx_desc++; - bi++; ntu++; if (unlikely(ntu == rx_ring->count)) { rx_desc = IAVF_RX_DESC(rx_ring, 0); - bi = rx_ring->rx_bi; ntu = 0; } @@ -966,6 +887,8 @@ no_buffers: if (rx_ring->next_to_use != ntu) iavf_release_rx_desc(rx_ring, ntu); + rx_ring->rx_stats.alloc_page_failed++; + /* make sure to come back via polling to try again after * allocation failure */ @@ -982,38 +905,30 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi, struct sk_buff *skb, union iavf_rx_desc *rx_desc) { - struct iavf_rx_ptype_decoded decoded; + struct libeth_rx_pt decoded; u32 rx_error, rx_status; bool ipv4, ipv6; u8 ptype; u64 qword; - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword); - rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword); - rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword); - decoded = decode_rx_desc_ptype(ptype); - skb->ip_summed = CHECKSUM_NONE; - skb_checksum_none_assert(skb); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword); - /* Rx csum enabled and ip headers found? */ - if (!(vsi->netdev->features & NETIF_F_RXCSUM)) + decoded = libie_rx_pt_parse(ptype); + if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded)) return; + rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword); + rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword); + /* did the hardware decode the packet and checksum? */ if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT))) return; - /* both known and outer_ip must be set for the below code to work */ - if (!(decoded.known && decoded.outer_ip)) - return; - - ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4); - ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; if (ipv4 && (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) | @@ -1037,17 +952,7 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi, if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT)) return; - /* Only report checksum unnecessary for TCP, UDP, or SCTP */ - switch (decoded.inner_prot) { - case IAVF_RX_PTYPE_INNER_PROT_TCP: - case IAVF_RX_PTYPE_INNER_PROT_UDP: - case IAVF_RX_PTYPE_INNER_PROT_SCTP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - fallthrough; - default: - break; - } - + skb->ip_summed = CHECKSUM_UNNECESSARY; return; checksum_fail: @@ -1055,29 +960,6 @@ checksum_fail: } /** - * iavf_ptype_to_htype - get a hash type - * @ptype: the ptype value from the descriptor - * - * Returns a hash type to be used by skb_set_hash - **/ -static int iavf_ptype_to_htype(u8 ptype) -{ - struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); - - if (!decoded.known) - return PKT_HASH_TYPE_NONE; - - if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && - decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4) - return PKT_HASH_TYPE_L4; - else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && - decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3) - return PKT_HASH_TYPE_L3; - else - return PKT_HASH_TYPE_L2; -} - -/** * iavf_rx_hash - set the hash value in the skb * @ring: descriptor ring * @rx_desc: specific descriptor @@ -1089,17 +971,19 @@ static void iavf_rx_hash(struct iavf_ring *ring, struct sk_buff *skb, u8 rx_ptype) { + struct libeth_rx_pt decoded; u32 hash; const __le64 rss_mask = cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH << IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT); - if (!(ring->netdev->features & NETIF_F_RXHASH)) + decoded = libie_rx_pt_parse(rx_ptype); + if (!libeth_rx_pt_has_hash(ring->netdev, decoded)) return; if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype)); + libeth_rx_pt_set_hash(skb, hash, decoded); } } @@ -1152,95 +1036,9 @@ static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb) } /** - * iavf_reuse_rx_page - page flip buffer and store it back on the ring - * @rx_ring: rx descriptor ring to store buffers on - * @old_buff: donor buffer to have page reused - * - * Synchronizes page for reuse by the adapter - **/ -static void iavf_reuse_rx_page(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *old_buff) -{ - struct iavf_rx_buffer *new_buff; - u16 nta = rx_ring->next_to_alloc; - - new_buff = &rx_ring->rx_bi[nta]; - - /* update, and store next to alloc */ - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - /* transfer page from old buffer to new buffer */ - new_buff->dma = old_buff->dma; - new_buff->page = old_buff->page; - new_buff->page_offset = old_buff->page_offset; - new_buff->pagecnt_bias = old_buff->pagecnt_bias; -} - -/** - * iavf_can_reuse_rx_page - Determine if this page can be reused by - * the adapter for another receive - * - * @rx_buffer: buffer containing the page - * - * If page is reusable, rx_buffer->page_offset is adjusted to point to - * an unused region in the page. - * - * For small pages, @truesize will be a constant value, half the size - * of the memory at page. We'll attempt to alternate between high and - * low halves of the page, with one half ready for use by the hardware - * and the other half being consumed by the stack. We use the page - * ref count to determine whether the stack has finished consuming the - * portion of this page that was passed up with a previous packet. If - * the page ref count is >1, we'll assume the "other" half page is - * still busy, and this page cannot be reused. - * - * For larger pages, @truesize will be the actual space used by the - * received packet (adjusted upward to an even multiple of the cache - * line size). This will advance through the page by the amount - * actually consumed by the received packets while there is still - * space for a buffer. Each region of larger pages will be used at - * most once, after which the page will not be reused. - * - * In either case, if the page is reusable its refcount is increased. - **/ -static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer) -{ - unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; - struct page *page = rx_buffer->page; - - /* Is any reuse possible? */ - if (!dev_page_is_reusable(page)) - return false; - -#if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ - if (unlikely((page_count(page) - pagecnt_bias) > 1)) - return false; -#else -#define IAVF_LAST_OFFSET \ - (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048) - if (rx_buffer->page_offset > IAVF_LAST_OFFSET) - return false; -#endif - - /* If we have drained the page fragment pool we need to update - * the pagecnt_bias and page count so that we fully restock the - * number of references the driver holds. - */ - if (unlikely(!pagecnt_bias)) { - page_ref_add(page, USHRT_MAX); - rx_buffer->pagecnt_bias = USHRT_MAX; - } - - return true; -} - -/** * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff - * @rx_ring: rx descriptor ring to transact packets on - * @rx_buffer: buffer containing page to add * @skb: sk_buff to place the data into + * @rx_buffer: buffer containing page to add * @size: packet length from rx_desc * * This function will add the data contained in rx_buffer->page to the skb. @@ -1248,206 +1046,50 @@ static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer) * * The function will then update the page offset. **/ -static void iavf_add_rx_frag(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *rx_buffer, - struct sk_buff *skb, +static void iavf_add_rx_frag(struct sk_buff *skb, + const struct libeth_fqe *rx_buffer, unsigned int size) { -#if (PAGE_SIZE < 8192) - unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; -#else - unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring)); -#endif - - if (!size) - return; + u32 hr = rx_buffer->page->pp->p.offset; skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, - rx_buffer->page_offset, size, truesize); - - /* page is being used so we must update the page offset */ -#if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; -#else - rx_buffer->page_offset += truesize; -#endif -} - -/** - * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use - * @rx_ring: rx descriptor ring to transact packets on - * @size: size of buffer to add to skb - * - * This function will pull an Rx buffer from the ring and synchronize it - * for use by the CPU. - */ -static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, - const unsigned int size) -{ - struct iavf_rx_buffer *rx_buffer; - - rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; - prefetchw(rx_buffer->page); - if (!size) - return rx_buffer; - - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - size, - DMA_FROM_DEVICE); - - /* We have pulled a buffer for use, so decrement pagecnt_bias */ - rx_buffer->pagecnt_bias--; - - return rx_buffer; -} - -/** - * iavf_construct_skb - Allocate skb and populate it - * @rx_ring: rx descriptor ring to transact packets on - * @rx_buffer: rx buffer to pull data from - * @size: size of buffer to add to skb - * - * This function allocates an skb. It then populates it with the page - * data from the current receive descriptor, taking care to set up the - * skb correctly. - */ -static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *rx_buffer, - unsigned int size) -{ - void *va; -#if (PAGE_SIZE < 8192) - unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; -#else - unsigned int truesize = SKB_DATA_ALIGN(size); -#endif - unsigned int headlen; - struct sk_buff *skb; - - if (!rx_buffer) - return NULL; - /* prefetch first cache line of first page */ - va = page_address(rx_buffer->page) + rx_buffer->page_offset; - net_prefetch(va); - - /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - IAVF_RX_HDR_SIZE, - GFP_ATOMIC | __GFP_NOWARN); - if (unlikely(!skb)) - return NULL; - - /* Determine available headroom for copy */ - headlen = size; - if (headlen > IAVF_RX_HDR_SIZE) - headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); - - /* update all of the pointers */ - size -= headlen; - if (size) { - skb_add_rx_frag(skb, 0, rx_buffer->page, - rx_buffer->page_offset + headlen, - size, truesize); - - /* buffer is used by skb, update page_offset */ -#if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; -#else - rx_buffer->page_offset += truesize; -#endif - } else { - /* buffer is unused, reset bias back to rx_buffer */ - rx_buffer->pagecnt_bias++; - } - - return skb; + rx_buffer->offset + hr, size, rx_buffer->truesize); } /** * iavf_build_skb - Build skb around an existing buffer - * @rx_ring: Rx descriptor ring to transact packets on * @rx_buffer: Rx buffer to pull data from * @size: size of buffer to add to skb * * This function builds an skb around an existing Rx buffer, taking care * to set up the skb correctly and avoid any memcpy overhead. */ -static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *rx_buffer, +static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer, unsigned int size) { - void *va; -#if (PAGE_SIZE < 8192) - unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; -#else - unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(IAVF_SKB_PAD + size); -#endif + u32 hr = rx_buffer->page->pp->p.offset; struct sk_buff *skb; + void *va; - if (!rx_buffer || !size) - return NULL; /* prefetch first cache line of first page */ - va = page_address(rx_buffer->page) + rx_buffer->page_offset; - net_prefetch(va); + va = page_address(rx_buffer->page) + rx_buffer->offset; + net_prefetch(va + hr); /* build an skb around the page buffer */ - skb = napi_build_skb(va - IAVF_SKB_PAD, truesize); + skb = napi_build_skb(va, rx_buffer->truesize); if (unlikely(!skb)) return NULL; + skb_mark_for_recycle(skb); + /* update pointers within the skb to store the data */ - skb_reserve(skb, IAVF_SKB_PAD); + skb_reserve(skb, hr); __skb_put(skb, size); - /* buffer is used by skb, update page_offset */ -#if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; -#else - rx_buffer->page_offset += truesize; -#endif - return skb; } /** - * iavf_put_rx_buffer - Clean up used buffer and either recycle or free - * @rx_ring: rx descriptor ring to transact packets on - * @rx_buffer: rx buffer to pull data from - * - * This function will clean up the contents of the rx_buffer. It will - * either recycle the buffer or unmap it and free the associated resources. - */ -static void iavf_put_rx_buffer(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *rx_buffer) -{ - if (!rx_buffer) - return; - - if (iavf_can_reuse_rx_page(rx_buffer)) { - /* hand second half of page back to the ring */ - iavf_reuse_rx_page(rx_ring, rx_buffer); - rx_ring->rx_stats.page_reuse_count++; - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, - iavf_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR); - __page_frag_cache_drain(rx_buffer->page, - rx_buffer->pagecnt_bias); - } - - /* clear contents of buffer_info */ - rx_buffer->page = NULL; -} - -/** * iavf_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed * @rx_desc: Rx descriptor for current buffer @@ -1500,7 +1142,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) bool failure = false; while (likely(total_rx_packets < (unsigned int)budget)) { - struct iavf_rx_buffer *rx_buffer; + struct libeth_fqe *rx_buffer; union iavf_rx_desc *rx_desc; unsigned int size; u16 vlan_tag = 0; @@ -1535,28 +1177,27 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword); iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); - rx_buffer = iavf_get_rx_buffer(rx_ring, size); + + rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean]; + if (!libeth_rx_sync_for_cpu(rx_buffer, size)) + goto skip_data; /* retrieve a buffer from the ring */ if (skb) - iavf_add_rx_frag(rx_ring, rx_buffer, skb, size); - else if (ring_uses_build_skb(rx_ring)) - skb = iavf_build_skb(rx_ring, rx_buffer, size); + iavf_add_rx_frag(skb, rx_buffer, size); else - skb = iavf_construct_skb(rx_ring, rx_buffer, size); + skb = iavf_build_skb(rx_buffer, size); /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; - if (rx_buffer && size) - rx_buffer->pagecnt_bias++; break; } - iavf_put_rx_buffer(rx_ring, rx_buffer); +skip_data: cleaned_count++; - if (iavf_is_non_eop(rx_ring, rx_desc, skb)) + if (iavf_is_non_eop(rx_ring, rx_desc, skb) || unlikely(!skb)) continue; /* ERR_MASK will only have valid bits if EOP set, and @@ -1743,8 +1384,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget) clean_complete = false; continue; } - arm_wb |= ring->arm_wb; - ring->arm_wb = false; + arm_wb |= !!(ring->flags & IAVF_TXR_FLAGS_ARM_WB); + ring->flags &= ~IAVF_TXR_FLAGS_ARM_WB; } /* Handle case where we are called by netpoll with a budget of 0 */ |