From 3afb00d3f86d3d924f88b56fa8285d4e9db85852 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 7 Aug 2024 15:17:52 +0200 Subject: Merging upstream version 6.10.3. Signed-off-by: Daniel Baumann --- net/xdp/xdp_umem.c | 9 ++++++--- net/xdp/xsk_buff_pool.c | 29 ++++------------------------- 2 files changed, 10 insertions(+), 28 deletions(-) (limited to 'net/xdp') diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index caa340134b..9f76ca591d 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -151,6 +151,7 @@ static int xdp_umem_account_pages(struct xdp_umem *umem) #define XDP_UMEM_FLAGS_VALID ( \ XDP_UMEM_UNALIGNED_CHUNK_FLAG | \ XDP_UMEM_TX_SW_CSUM | \ + XDP_UMEM_TX_METADATA_LEN | \ 0) static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) @@ -204,8 +205,11 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) if (headroom >= chunk_size - XDP_PACKET_HEADROOM) return -EINVAL; - if (mr->tx_metadata_len >= 256 || mr->tx_metadata_len % 8) - return -EINVAL; + if (mr->flags & XDP_UMEM_TX_METADATA_LEN) { + if (mr->tx_metadata_len >= 256 || mr->tx_metadata_len % 8) + return -EINVAL; + umem->tx_metadata_len = mr->tx_metadata_len; + } umem->size = size; umem->headroom = headroom; @@ -215,7 +219,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) umem->pgs = NULL; umem->user = NULL; umem->flags = mr->flags; - umem->tx_metadata_len = mr->tx_metadata_len; INIT_LIST_HEAD(&umem->xsk_dma_list); refcount_set(&umem->users, 1); diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index ce60ecd48a..c0e0204b96 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -338,7 +338,6 @@ static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_devi dma_map->netdev = netdev; dma_map->dev = dev; - dma_map->dma_need_sync = false; dma_map->dma_pages_cnt = nr_pages; refcount_set(&dma_map->users, 1); list_add(&dma_map->list, &umem->xsk_dma_list); @@ -424,7 +423,6 @@ static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_ pool->dev = dma_map->dev; pool->dma_pages_cnt = dma_map->dma_pages_cnt; - pool->dma_need_sync = dma_map->dma_need_sync; memcpy(pool->dma_pages, dma_map->dma_pages, pool->dma_pages_cnt * sizeof(*pool->dma_pages)); @@ -460,8 +458,6 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, __xp_dma_unmap(dma_map, attrs); return -ENOMEM; } - if (dma_need_sync(dev, dma)) - dma_map->dma_need_sync = true; dma_map->dma_pages[i] = dma; } @@ -557,11 +553,9 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) xskb->xdp.data_meta = xskb->xdp.data; xskb->xdp.flags = 0; - if (pool->dma_need_sync) { - dma_sync_single_range_for_device(pool->dev, xskb->dma, 0, - pool->frame_len, - DMA_BIDIRECTIONAL); - } + if (pool->dev) + xp_dma_sync_for_device(pool, xskb->dma, pool->frame_len); + return &xskb->xdp; } EXPORT_SYMBOL(xp_alloc); @@ -633,7 +627,7 @@ u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) { u32 nb_entries1 = 0, nb_entries2; - if (unlikely(pool->dma_need_sync)) { + if (unlikely(pool->dev && dma_dev_need_sync(pool->dev))) { struct xdp_buff *buff; /* Slow path */ @@ -693,18 +687,3 @@ dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) (addr & ~PAGE_MASK); } EXPORT_SYMBOL(xp_raw_get_dma); - -void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb) -{ - dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0, - xskb->pool->frame_len, DMA_BIDIRECTIONAL); -} -EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow); - -void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, - size_t size) -{ - dma_sync_single_range_for_device(pool->dev, dma, 0, - size, DMA_BIDIRECTIONAL); -} -EXPORT_SYMBOL(xp_dma_sync_for_device_slow); -- cgit v1.2.3