From 638a9e433ecd61e64761352dbec1fa4f5874c941 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 7 Aug 2024 15:18:06 +0200 Subject: Merging upstream version 6.10.3. Signed-off-by: Daniel Baumann --- drivers/net/ethernet/google/gve/gve_rx.c | 138 ++++++++++++++++++++----------- 1 file changed, 90 insertions(+), 48 deletions(-) (limited to 'drivers/net/ethernet/google/gve/gve_rx.c') diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index 20f5a9e7fa..acb73d4d0d 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -30,6 +30,9 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, u32 slots = rx->mask + 1; int i; + if (!rx->data.page_info) + return; + if (rx->data.raw_addressing) { for (i = 0; i < slots; i++) gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i], @@ -38,8 +41,6 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, for (i = 0; i < slots; i++) page_ref_sub(rx->data.page_info[i].page, rx->data.page_info[i].pagecnt_bias - 1); - gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id); - rx->data.qpl = NULL; for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) { page_ref_sub(rx->qpl_copy_pool[i].page, @@ -51,6 +52,41 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, rx->data.page_info = NULL; } +static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx) +{ + ctx->skb_head = NULL; + ctx->skb_tail = NULL; + ctx->total_size = 0; + ctx->frag_cnt = 0; + ctx->drop_pkt = false; +} + +static void gve_rx_init_ring_state_gqi(struct gve_rx_ring *rx) +{ + rx->desc.seqno = 1; + rx->cnt = 0; + gve_rx_ctx_clear(&rx->ctx); +} + +static void gve_rx_reset_ring_gqi(struct gve_priv *priv, int idx) +{ + struct gve_rx_ring *rx = &priv->rx[idx]; + const u32 slots = priv->rx_desc_cnt; + size_t size; + + /* Reset desc ring */ + if (rx->desc.desc_ring) { + size = slots * sizeof(rx->desc.desc_ring[0]); + memset(rx->desc.desc_ring, 0, size); + } + + /* Reset q_resources */ + if (rx->q_resources) + memset(rx->q_resources, 0, sizeof(*rx->q_resources)); + + gve_rx_init_ring_state_gqi(rx); +} + void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx) { int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); @@ -60,34 +96,48 @@ void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx) gve_remove_napi(priv, ntfy_idx); gve_rx_remove_from_block(priv, idx); + gve_rx_reset_ring_gqi(priv, idx); } -static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx, - struct gve_rx_alloc_rings_cfg *cfg) +void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx, + struct gve_rx_alloc_rings_cfg *cfg) { struct device *dev = &priv->pdev->dev; u32 slots = rx->mask + 1; int idx = rx->q_num; size_t bytes; + u32 qpl_id; - bytes = sizeof(struct gve_rx_desc) * cfg->ring_size; - dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus); - rx->desc.desc_ring = NULL; + if (rx->desc.desc_ring) { + bytes = sizeof(struct gve_rx_desc) * cfg->ring_size; + dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus); + rx->desc.desc_ring = NULL; + } - dma_free_coherent(dev, sizeof(*rx->q_resources), - rx->q_resources, rx->q_resources_bus); - rx->q_resources = NULL; + if (rx->q_resources) { + dma_free_coherent(dev, sizeof(*rx->q_resources), + rx->q_resources, rx->q_resources_bus); + rx->q_resources = NULL; + } gve_rx_unfill_pages(priv, rx, cfg); - bytes = sizeof(*rx->data.data_ring) * slots; - dma_free_coherent(dev, bytes, rx->data.data_ring, - rx->data.data_bus); - rx->data.data_ring = NULL; + if (rx->data.data_ring) { + bytes = sizeof(*rx->data.data_ring) * slots; + dma_free_coherent(dev, bytes, rx->data.data_ring, + rx->data.data_bus); + rx->data.data_ring = NULL; + } kvfree(rx->qpl_copy_pool); rx->qpl_copy_pool = NULL; + if (rx->data.qpl) { + qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, idx); + gve_free_queue_page_list(priv, rx->data.qpl, qpl_id); + rx->data.qpl = NULL; + } + netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); } @@ -144,14 +194,6 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx, if (!rx->data.page_info) return -ENOMEM; - if (!rx->data.raw_addressing) { - rx->data.qpl = gve_assign_rx_qpl(cfg, rx->q_num); - if (!rx->data.qpl) { - kvfree(rx->data.page_info); - rx->data.page_info = NULL; - return -ENOMEM; - } - } for (i = 0; i < slots; i++) { if (!rx->data.raw_addressing) { struct page *page = rx->data.qpl->pages[i]; @@ -204,9 +246,6 @@ alloc_err_qpl: page_ref_sub(rx->data.page_info[i].page, rx->data.page_info[i].pagecnt_bias - 1); - gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id); - rx->data.qpl = NULL; - return err; alloc_err_rda: @@ -217,15 +256,6 @@ alloc_err_rda: return err; } -static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx) -{ - ctx->skb_head = NULL; - ctx->skb_tail = NULL; - ctx->total_size = 0; - ctx->frag_cnt = 0; - ctx->drop_pkt = false; -} - void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx) { int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); @@ -234,14 +264,16 @@ void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx) gve_add_napi(priv, ntfy_idx, gve_napi_poll); } -static int gve_rx_alloc_ring_gqi(struct gve_priv *priv, - struct gve_rx_alloc_rings_cfg *cfg, - struct gve_rx_ring *rx, - int idx) +int gve_rx_alloc_ring_gqi(struct gve_priv *priv, + struct gve_rx_alloc_rings_cfg *cfg, + struct gve_rx_ring *rx, + int idx) { struct device *hdev = &priv->pdev->dev; - u32 slots = priv->rx_data_slot_cnt; + u32 slots = cfg->ring_size; int filled_pages; + int qpl_page_cnt; + u32 qpl_id = 0; size_t bytes; int err; @@ -274,10 +306,22 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv, goto abort_with_slots; } + if (!rx->data.raw_addressing) { + qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num); + qpl_page_cnt = cfg->ring_size; + + rx->data.qpl = gve_alloc_queue_page_list(priv, qpl_id, + qpl_page_cnt); + if (!rx->data.qpl) { + err = -ENOMEM; + goto abort_with_copy_pool; + } + } + filled_pages = gve_rx_prefill_pages(rx, cfg); if (filled_pages < 0) { err = -ENOMEM; - goto abort_with_copy_pool; + goto abort_with_qpl; } rx->fill_cnt = filled_pages; /* Ensure data ring slots (packet buffers) are visible. */ @@ -304,9 +348,8 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv, err = -ENOMEM; goto abort_with_q_resources; } - rx->cnt = 0; rx->db_threshold = slots / 2; - rx->desc.seqno = 1; + gve_rx_init_ring_state_gqi(rx); rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE; gve_rx_ctx_clear(&rx->ctx); @@ -319,6 +362,11 @@ abort_with_q_resources: rx->q_resources = NULL; abort_filled: gve_rx_unfill_pages(priv, rx, cfg); +abort_with_qpl: + if (!rx->data.raw_addressing) { + gve_free_queue_page_list(priv, rx->data.qpl, qpl_id); + rx->data.qpl = NULL; + } abort_with_copy_pool: kvfree(rx->qpl_copy_pool); rx->qpl_copy_pool = NULL; @@ -337,12 +385,6 @@ int gve_rx_alloc_rings_gqi(struct gve_priv *priv, int err = 0; int i, j; - if (!cfg->raw_addressing && !cfg->qpls) { - netif_err(priv, drv, priv->dev, - "Cannot alloc QPL ring before allocing QPLs\n"); - return -EINVAL; - } - rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring), GFP_KERNEL); if (!rx) -- cgit v1.2.3