From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- net/xdp/xsk_queue.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 net/xdp/xsk_queue.c (limited to 'net/xdp/xsk_queue.c') diff --git a/net/xdp/xsk_queue.c b/net/xdp/xsk_queue.c new file mode 100644 index 0000000000..d2c2640300 --- /dev/null +++ b/net/xdp/xsk_queue.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +/* XDP user-space ring structure + * Copyright(c) 2018 Intel Corporation. + */ + +#include +#include +#include +#include +#include + +#include "xsk_queue.h" + +static size_t xskq_get_ring_size(struct xsk_queue *q, bool umem_queue) +{ + struct xdp_umem_ring *umem_ring; + struct xdp_rxtx_ring *rxtx_ring; + + if (umem_queue) + return struct_size(umem_ring, desc, q->nentries); + return struct_size(rxtx_ring, desc, q->nentries); +} + +struct xsk_queue *xskq_create(u32 nentries, bool umem_queue) +{ + struct xsk_queue *q; + size_t size; + + q = kzalloc(sizeof(*q), GFP_KERNEL); + if (!q) + return NULL; + + q->nentries = nentries; + q->ring_mask = nentries - 1; + + size = xskq_get_ring_size(q, umem_queue); + + /* size which is overflowing or close to SIZE_MAX will become 0 in + * PAGE_ALIGN(), checking SIZE_MAX is enough due to the previous + * is_power_of_2(), the rest will be handled by vmalloc_user() + */ + if (unlikely(size == SIZE_MAX)) { + kfree(q); + return NULL; + } + + size = PAGE_ALIGN(size); + + q->ring = vmalloc_user(size); + if (!q->ring) { + kfree(q); + return NULL; + } + + q->ring_vmalloc_size = size; + return q; +} + +void xskq_destroy(struct xsk_queue *q) +{ + if (!q) + return; + + vfree(q->ring); + kfree(q); +} -- cgit v1.2.3