diff options
Diffstat (limited to 'src/spdk/dpdk/lib/librte_mbuf')
-rw-r--r-- | src/spdk/dpdk/lib/librte_mbuf/Makefile | 26 | ||||
-rw-r--r-- | src/spdk/dpdk/lib/librte_mbuf/meson.build | 9 | ||||
-rw-r--r-- | src/spdk/dpdk/lib/librte_mbuf/rte_mbuf.c | 944 | ||||
-rw-r--r-- | src/spdk/dpdk/lib/librte_mbuf/rte_mbuf.h | 2009 | ||||
-rw-r--r-- | src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_core.h | 768 | ||||
-rw-r--r-- | src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_dyn.c | 553 | ||||
-rw-r--r-- | src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_dyn.h | 253 | ||||
-rw-r--r-- | src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_pool_ops.c | 103 | ||||
-rw-r--r-- | src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_pool_ops.h | 95 | ||||
-rw-r--r-- | src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_ptype.c | 209 | ||||
-rw-r--r-- | src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_ptype.h | 780 | ||||
-rw-r--r-- | src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_version.map | 49 |
12 files changed, 5798 insertions, 0 deletions
diff --git a/src/spdk/dpdk/lib/librte_mbuf/Makefile b/src/spdk/dpdk/lib/librte_mbuf/Makefile new file mode 100644 index 000000000..41ea5496e --- /dev/null +++ b/src/spdk/dpdk/lib/librte_mbuf/Makefile @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_mbuf.a + +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 + +LDLIBS += -lrte_eal -lrte_mempool + +EXPORT_MAP := rte_mbuf_version.map + +# all source are stored in SRCS-y +SRCS-$(CONFIG_RTE_LIBRTE_MBUF) := rte_mbuf.c rte_mbuf_ptype.c rte_mbuf_pool_ops.c +SRCS-$(CONFIG_RTE_LIBRTE_MBUF) += rte_mbuf_dyn.c + +# install includes +SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF)-include := rte_mbuf.h +SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF)-include += rte_mbuf_core.h +SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF)-include += rte_mbuf_ptype.h +SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF)-include += rte_mbuf_pool_ops.h +SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF)-include += rte_mbuf_dyn.h + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/lib/librte_mbuf/meson.build b/src/spdk/dpdk/lib/librte_mbuf/meson.build new file mode 100644 index 000000000..e95c770e5 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_mbuf/meson.build @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +sources = files('rte_mbuf.c', 'rte_mbuf_ptype.c', 'rte_mbuf_pool_ops.c', + 'rte_mbuf_dyn.c') +headers = files('rte_mbuf.h', 'rte_mbuf_core.h', + 'rte_mbuf_ptype.h', 'rte_mbuf_pool_ops.h', + 'rte_mbuf_dyn.h') +deps += ['mempool'] diff --git a/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf.c b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf.c new file mode 100644 index 000000000..220eb2fb0 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf.c @@ -0,0 +1,944 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation. + * Copyright 2014 6WIND S.A. + */ + +#include <string.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdint.h> +#include <stdarg.h> +#include <inttypes.h> +#include <errno.h> +#include <ctype.h> +#include <sys/queue.h> + +#include <rte_compat.h> +#include <rte_debug.h> +#include <rte_common.h> +#include <rte_log.h> +#include <rte_memory.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_mbuf_pool_ops.h> +#include <rte_string_fns.h> +#include <rte_hexdump.h> +#include <rte_errno.h> +#include <rte_memcpy.h> + +/* + * pktmbuf pool constructor, given as a callback function to + * rte_mempool_create(), or called directly if using + * rte_mempool_create_empty()/rte_mempool_populate() + */ +void +rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg) +{ + struct rte_pktmbuf_pool_private *user_mbp_priv, *mbp_priv; + struct rte_pktmbuf_pool_private default_mbp_priv; + uint16_t roomsz; + + RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf)); + + /* if no structure is provided, assume no mbuf private area */ + user_mbp_priv = opaque_arg; + if (user_mbp_priv == NULL) { + memset(&default_mbp_priv, 0, sizeof(default_mbp_priv)); + if (mp->elt_size > sizeof(struct rte_mbuf)) + roomsz = mp->elt_size - sizeof(struct rte_mbuf); + else + roomsz = 0; + default_mbp_priv.mbuf_data_room_size = roomsz; + user_mbp_priv = &default_mbp_priv; + } + + RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf) + + ((user_mbp_priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) ? + sizeof(struct rte_mbuf_ext_shared_info) : + user_mbp_priv->mbuf_data_room_size) + + user_mbp_priv->mbuf_priv_size); + RTE_ASSERT((user_mbp_priv->flags & + ~RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) == 0); + + mbp_priv = rte_mempool_get_priv(mp); + memcpy(mbp_priv, user_mbp_priv, sizeof(*mbp_priv)); +} + +/* + * pktmbuf constructor, given as a callback function to + * rte_mempool_obj_iter() or rte_mempool_create(). + * Set the fields of a packet mbuf to their default values. + */ +void +rte_pktmbuf_init(struct rte_mempool *mp, + __rte_unused void *opaque_arg, + void *_m, + __rte_unused unsigned i) +{ + struct rte_mbuf *m = _m; + uint32_t mbuf_size, buf_len, priv_size; + + priv_size = rte_pktmbuf_priv_size(mp); + mbuf_size = sizeof(struct rte_mbuf) + priv_size; + buf_len = rte_pktmbuf_data_room_size(mp); + + RTE_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size); + RTE_ASSERT(mp->elt_size >= mbuf_size); + RTE_ASSERT(buf_len <= UINT16_MAX); + + memset(m, 0, mbuf_size); + /* start of buffer is after mbuf structure and priv data */ + m->priv_size = priv_size; + m->buf_addr = (char *)m + mbuf_size; + m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size; + m->buf_len = (uint16_t)buf_len; + + /* keep some headroom between start of buffer and data */ + m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len); + + /* init some constant fields */ + m->pool = mp; + m->nb_segs = 1; + m->port = MBUF_INVALID_PORT; + rte_mbuf_refcnt_set(m, 1); + m->next = NULL; +} + +/* + * @internal The callback routine called when reference counter in shinfo + * for mbufs with pinned external buffer reaches zero. It means there is + * no more reference to buffer backing mbuf and this one should be freed. + * This routine is called for the regular (not with pinned external or + * indirect buffer) mbufs on detaching from the mbuf with pinned external + * buffer. + */ +static void +rte_pktmbuf_free_pinned_extmem(void *addr, void *opaque) +{ + struct rte_mbuf *m = opaque; + + RTE_SET_USED(addr); + RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m)); + RTE_ASSERT(RTE_MBUF_HAS_PINNED_EXTBUF(m)); + RTE_ASSERT(m->shinfo->fcb_opaque == m); + + rte_mbuf_ext_refcnt_set(m->shinfo, 1); + m->ol_flags = EXT_ATTACHED_MBUF; + if (m->next != NULL) { + m->next = NULL; + m->nb_segs = 1; + } + rte_mbuf_raw_free(m); +} + +/** The context to initialize the mbufs with pinned external buffers. */ +struct rte_pktmbuf_extmem_init_ctx { + const struct rte_pktmbuf_extmem *ext_mem; /* descriptor array. */ + unsigned int ext_num; /* number of descriptors in array. */ + unsigned int ext; /* loop descriptor index. */ + size_t off; /* loop buffer offset. */ +}; + +/** + * @internal Packet mbuf constructor for pools with pinned external memory. + * + * This function initializes some fields in the mbuf structure that are + * not modified by the user once created (origin pool, buffer start + * address, and so on). This function is given as a callback function to + * rte_mempool_obj_iter() called from rte_mempool_create_extmem(). + * + * @param mp + * The mempool from which mbufs originate. + * @param opaque_arg + * A pointer to the rte_pktmbuf_extmem_init_ctx - initialization + * context structure + * @param m + * The mbuf to initialize. + * @param i + * The index of the mbuf in the pool table. + */ +static void +__rte_pktmbuf_init_extmem(struct rte_mempool *mp, + void *opaque_arg, + void *_m, + __rte_unused unsigned int i) +{ + struct rte_mbuf *m = _m; + struct rte_pktmbuf_extmem_init_ctx *ctx = opaque_arg; + const struct rte_pktmbuf_extmem *ext_mem; + uint32_t mbuf_size, buf_len, priv_size; + struct rte_mbuf_ext_shared_info *shinfo; + + priv_size = rte_pktmbuf_priv_size(mp); + mbuf_size = sizeof(struct rte_mbuf) + priv_size; + buf_len = rte_pktmbuf_data_room_size(mp); + + RTE_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size); + RTE_ASSERT(mp->elt_size >= mbuf_size); + RTE_ASSERT(buf_len <= UINT16_MAX); + + memset(m, 0, mbuf_size); + m->priv_size = priv_size; + m->buf_len = (uint16_t)buf_len; + + /* set the data buffer pointers to external memory */ + ext_mem = ctx->ext_mem + ctx->ext; + + RTE_ASSERT(ctx->ext < ctx->ext_num); + RTE_ASSERT(ctx->off < ext_mem->buf_len); + + m->buf_addr = RTE_PTR_ADD(ext_mem->buf_ptr, ctx->off); + m->buf_iova = ext_mem->buf_iova == RTE_BAD_IOVA ? + RTE_BAD_IOVA : (ext_mem->buf_iova + ctx->off); + + ctx->off += ext_mem->elt_size; + if (ctx->off >= ext_mem->buf_len) { + ctx->off = 0; + ++ctx->ext; + } + /* keep some headroom between start of buffer and data */ + m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len); + + /* init some constant fields */ + m->pool = mp; + m->nb_segs = 1; + m->port = MBUF_INVALID_PORT; + m->ol_flags = EXT_ATTACHED_MBUF; + rte_mbuf_refcnt_set(m, 1); + m->next = NULL; + + /* init external buffer shared info items */ + shinfo = RTE_PTR_ADD(m, mbuf_size); + m->shinfo = shinfo; + shinfo->free_cb = rte_pktmbuf_free_pinned_extmem; + shinfo->fcb_opaque = m; + rte_mbuf_ext_refcnt_set(shinfo, 1); +} + +/* Helper to create a mbuf pool with given mempool ops name*/ +struct rte_mempool * +rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, + unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, + int socket_id, const char *ops_name) +{ + struct rte_mempool *mp; + struct rte_pktmbuf_pool_private mbp_priv; + const char *mp_ops_name = ops_name; + unsigned elt_size; + int ret; + + if (RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) != priv_size) { + RTE_LOG(ERR, MBUF, "mbuf priv_size=%u is not aligned\n", + priv_size); + rte_errno = EINVAL; + return NULL; + } + elt_size = sizeof(struct rte_mbuf) + (unsigned)priv_size + + (unsigned)data_room_size; + memset(&mbp_priv, 0, sizeof(mbp_priv)); + mbp_priv.mbuf_data_room_size = data_room_size; + mbp_priv.mbuf_priv_size = priv_size; + + mp = rte_mempool_create_empty(name, n, elt_size, cache_size, + sizeof(struct rte_pktmbuf_pool_private), socket_id, 0); + if (mp == NULL) + return NULL; + + if (mp_ops_name == NULL) + mp_ops_name = rte_mbuf_best_mempool_ops(); + ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL); + if (ret != 0) { + RTE_LOG(ERR, MBUF, "error setting mempool handler\n"); + rte_mempool_free(mp); + rte_errno = -ret; + return NULL; + } + rte_pktmbuf_pool_init(mp, &mbp_priv); + + ret = rte_mempool_populate_default(mp); + if (ret < 0) { + rte_mempool_free(mp); + rte_errno = -ret; + return NULL; + } + + rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL); + + return mp; +} + +/* helper to create a mbuf pool */ +struct rte_mempool * +rte_pktmbuf_pool_create(const char *name, unsigned int n, + unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, + int socket_id) +{ + return rte_pktmbuf_pool_create_by_ops(name, n, cache_size, priv_size, + data_room_size, socket_id, NULL); +} + +/* Helper to create a mbuf pool with pinned external data buffers. */ +struct rte_mempool * +rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n, + unsigned int cache_size, uint16_t priv_size, + uint16_t data_room_size, int socket_id, + const struct rte_pktmbuf_extmem *ext_mem, + unsigned int ext_num) +{ + struct rte_mempool *mp; + struct rte_pktmbuf_pool_private mbp_priv; + struct rte_pktmbuf_extmem_init_ctx init_ctx; + const char *mp_ops_name; + unsigned int elt_size; + unsigned int i, n_elts = 0; + int ret; + + if (RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) != priv_size) { + RTE_LOG(ERR, MBUF, "mbuf priv_size=%u is not aligned\n", + priv_size); + rte_errno = EINVAL; + return NULL; + } + /* Check the external memory descriptors. */ + for (i = 0; i < ext_num; i++) { + const struct rte_pktmbuf_extmem *extm = ext_mem + i; + + if (!extm->elt_size || !extm->buf_len || !extm->buf_ptr) { + RTE_LOG(ERR, MBUF, "invalid extmem descriptor\n"); + rte_errno = EINVAL; + return NULL; + } + if (data_room_size > extm->elt_size) { + RTE_LOG(ERR, MBUF, "ext elt_size=%u is too small\n", + priv_size); + rte_errno = EINVAL; + return NULL; + } + n_elts += extm->buf_len / extm->elt_size; + } + /* Check whether enough external memory provided. */ + if (n_elts < n) { + RTE_LOG(ERR, MBUF, "not enough extmem\n"); + rte_errno = ENOMEM; + return NULL; + } + elt_size = sizeof(struct rte_mbuf) + + (unsigned int)priv_size + + sizeof(struct rte_mbuf_ext_shared_info); + + memset(&mbp_priv, 0, sizeof(mbp_priv)); + mbp_priv.mbuf_data_room_size = data_room_size; + mbp_priv.mbuf_priv_size = priv_size; + mbp_priv.flags = RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF; + + mp = rte_mempool_create_empty(name, n, elt_size, cache_size, + sizeof(struct rte_pktmbuf_pool_private), socket_id, 0); + if (mp == NULL) + return NULL; + + mp_ops_name = rte_mbuf_best_mempool_ops(); + ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL); + if (ret != 0) { + RTE_LOG(ERR, MBUF, "error setting mempool handler\n"); + rte_mempool_free(mp); + rte_errno = -ret; + return NULL; + } + rte_pktmbuf_pool_init(mp, &mbp_priv); + + ret = rte_mempool_populate_default(mp); + if (ret < 0) { + rte_mempool_free(mp); + rte_errno = -ret; + return NULL; + } + + init_ctx = (struct rte_pktmbuf_extmem_init_ctx){ + .ext_mem = ext_mem, + .ext_num = ext_num, + .ext = 0, + .off = 0, + }; + rte_mempool_obj_iter(mp, __rte_pktmbuf_init_extmem, &init_ctx); + + return mp; +} + +/* do some sanity checks on a mbuf: panic if it fails */ +void +rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header) +{ + const char *reason; + + if (rte_mbuf_check(m, is_header, &reason)) + rte_panic("%s\n", reason); +} + +int rte_mbuf_check(const struct rte_mbuf *m, int is_header, + const char **reason) +{ + unsigned int nb_segs, pkt_len; + + if (m == NULL) { + *reason = "mbuf is NULL"; + return -1; + } + + /* generic checks */ + if (m->pool == NULL) { + *reason = "bad mbuf pool"; + return -1; + } + if (m->buf_iova == 0) { + *reason = "bad IO addr"; + return -1; + } + if (m->buf_addr == NULL) { + *reason = "bad virt addr"; + return -1; + } + + uint16_t cnt = rte_mbuf_refcnt_read(m); + if ((cnt == 0) || (cnt == UINT16_MAX)) { + *reason = "bad ref cnt"; + return -1; + } + + /* nothing to check for sub-segments */ + if (is_header == 0) + return 0; + + /* data_len is supposed to be not more than pkt_len */ + if (m->data_len > m->pkt_len) { + *reason = "bad data_len"; + return -1; + } + + nb_segs = m->nb_segs; + pkt_len = m->pkt_len; + + do { + if (m->data_off > m->buf_len) { + *reason = "data offset too big in mbuf segment"; + return -1; + } + if (m->data_off + m->data_len > m->buf_len) { + *reason = "data length too big in mbuf segment"; + return -1; + } + nb_segs -= 1; + pkt_len -= m->data_len; + } while ((m = m->next) != NULL); + + if (nb_segs) { + *reason = "bad nb_segs"; + return -1; + } + if (pkt_len) { + *reason = "bad pkt_len"; + return -1; + } + + return 0; +} + +/** + * @internal helper function for freeing a bulk of packet mbuf segments + * via an array holding the packet mbuf segments from the same mempool + * pending to be freed. + * + * @param m + * The packet mbuf segment to be freed. + * @param pending + * Pointer to the array of packet mbuf segments pending to be freed. + * @param nb_pending + * Pointer to the number of elements held in the array. + * @param pending_sz + * Number of elements the array can hold. + * Note: The compiler should optimize this parameter away when using a + * constant value, such as RTE_PKTMBUF_FREE_PENDING_SZ. + */ +static void +__rte_pktmbuf_free_seg_via_array(struct rte_mbuf *m, + struct rte_mbuf ** const pending, unsigned int * const nb_pending, + const unsigned int pending_sz) +{ + m = rte_pktmbuf_prefree_seg(m); + if (likely(m != NULL)) { + if (*nb_pending == pending_sz || + (*nb_pending > 0 && m->pool != pending[0]->pool)) { + rte_mempool_put_bulk(pending[0]->pool, + (void **)pending, *nb_pending); + *nb_pending = 0; + } + + pending[(*nb_pending)++] = m; + } +} + +/** + * Size of the array holding mbufs from the same mempool pending to be freed + * in bulk. + */ +#define RTE_PKTMBUF_FREE_PENDING_SZ 64 + +/* Free a bulk of packet mbufs back into their original mempools. */ +void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count) +{ + struct rte_mbuf *m, *m_next, *pending[RTE_PKTMBUF_FREE_PENDING_SZ]; + unsigned int idx, nb_pending = 0; + + for (idx = 0; idx < count; idx++) { + m = mbufs[idx]; + if (unlikely(m == NULL)) + continue; + + __rte_mbuf_sanity_check(m, 1); + + do { + m_next = m->next; + __rte_pktmbuf_free_seg_via_array(m, + pending, &nb_pending, + RTE_PKTMBUF_FREE_PENDING_SZ); + m = m_next; + } while (m != NULL); + } + + if (nb_pending > 0) + rte_mempool_put_bulk(pending[0]->pool, (void **)pending, nb_pending); +} + +/* Creates a shallow copy of mbuf */ +struct rte_mbuf * +rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp) +{ + struct rte_mbuf *mc, *mi, **prev; + uint32_t pktlen; + uint16_t nseg; + + mc = rte_pktmbuf_alloc(mp); + if (unlikely(mc == NULL)) + return NULL; + + mi = mc; + prev = &mi->next; + pktlen = md->pkt_len; + nseg = 0; + + do { + nseg++; + rte_pktmbuf_attach(mi, md); + *prev = mi; + prev = &mi->next; + } while ((md = md->next) != NULL && + (mi = rte_pktmbuf_alloc(mp)) != NULL); + + *prev = NULL; + mc->nb_segs = nseg; + mc->pkt_len = pktlen; + + /* Allocation of new indirect segment failed */ + if (unlikely(mi == NULL)) { + rte_pktmbuf_free(mc); + return NULL; + } + + __rte_mbuf_sanity_check(mc, 1); + return mc; +} + +/* convert multi-segment mbuf to single mbuf */ +int +__rte_pktmbuf_linearize(struct rte_mbuf *mbuf) +{ + size_t seg_len, copy_len; + struct rte_mbuf *m; + struct rte_mbuf *m_next; + char *buffer; + + /* Extend first segment to the total packet length */ + copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf); + + if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf))) + return -1; + + buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len); + mbuf->data_len = (uint16_t)(mbuf->pkt_len); + + /* Append data from next segments to the first one */ + m = mbuf->next; + while (m != NULL) { + m_next = m->next; + + seg_len = rte_pktmbuf_data_len(m); + rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len); + buffer += seg_len; + + rte_pktmbuf_free_seg(m); + m = m_next; + } + + mbuf->next = NULL; + mbuf->nb_segs = 1; + + return 0; +} + +/* Create a deep copy of mbuf */ +struct rte_mbuf * +rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp, + uint32_t off, uint32_t len) +{ + const struct rte_mbuf *seg = m; + struct rte_mbuf *mc, *m_last, **prev; + + /* garbage in check */ + __rte_mbuf_sanity_check(m, 1); + + /* check for request to copy at offset past end of mbuf */ + if (unlikely(off >= m->pkt_len)) + return NULL; + + mc = rte_pktmbuf_alloc(mp); + if (unlikely(mc == NULL)) + return NULL; + + /* truncate requested length to available data */ + if (len > m->pkt_len - off) + len = m->pkt_len - off; + + __rte_pktmbuf_copy_hdr(mc, m); + + /* copied mbuf is not indirect or external */ + mc->ol_flags = m->ol_flags & ~(IND_ATTACHED_MBUF|EXT_ATTACHED_MBUF); + + prev = &mc->next; + m_last = mc; + while (len > 0) { + uint32_t copy_len; + + /* skip leading mbuf segments */ + while (off >= seg->data_len) { + off -= seg->data_len; + seg = seg->next; + } + + /* current buffer is full, chain a new one */ + if (rte_pktmbuf_tailroom(m_last) == 0) { + m_last = rte_pktmbuf_alloc(mp); + if (unlikely(m_last == NULL)) { + rte_pktmbuf_free(mc); + return NULL; + } + ++mc->nb_segs; + *prev = m_last; + prev = &m_last->next; + } + + /* + * copy the min of data in input segment (seg) + * vs space available in output (m_last) + */ + copy_len = RTE_MIN(seg->data_len - off, len); + if (copy_len > rte_pktmbuf_tailroom(m_last)) + copy_len = rte_pktmbuf_tailroom(m_last); + + /* append from seg to m_last */ + rte_memcpy(rte_pktmbuf_mtod_offset(m_last, char *, + m_last->data_len), + rte_pktmbuf_mtod_offset(seg, char *, off), + copy_len); + + /* update offsets and lengths */ + m_last->data_len += copy_len; + mc->pkt_len += copy_len; + off += copy_len; + len -= copy_len; + } + + /* garbage out check */ + __rte_mbuf_sanity_check(mc, 1); + return mc; +} + +/* dump a mbuf on console */ +void +rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len) +{ + unsigned int len; + unsigned int nb_segs; + + __rte_mbuf_sanity_check(m, 1); + + fprintf(f, "dump mbuf at %p, iova=%#"PRIx64", buf_len=%u\n", + m, m->buf_iova, m->buf_len); + fprintf(f, " pkt_len=%u, ol_flags=%#"PRIx64", nb_segs=%u, port=%u", + m->pkt_len, m->ol_flags, m->nb_segs, m->port); + + if (m->ol_flags & (PKT_RX_VLAN | PKT_TX_VLAN)) + fprintf(f, ", vlan_tci=%u", m->vlan_tci); + + fprintf(f, ", ptype=%#"PRIx32"\n", m->packet_type); + + nb_segs = m->nb_segs; + + while (m && nb_segs != 0) { + __rte_mbuf_sanity_check(m, 0); + + fprintf(f, " segment at %p, data=%p, len=%u, off=%u, refcnt=%u\n", + m, rte_pktmbuf_mtod(m, void *), + m->data_len, m->data_off, rte_mbuf_refcnt_read(m)); + + len = dump_len; + if (len > m->data_len) + len = m->data_len; + if (len != 0) + rte_hexdump(f, NULL, rte_pktmbuf_mtod(m, void *), len); + dump_len -= len; + m = m->next; + nb_segs --; + } +} + +/* read len data bytes in a mbuf at specified offset (internal) */ +const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, + uint32_t len, void *buf) +{ + const struct rte_mbuf *seg = m; + uint32_t buf_off = 0, copy_len; + + if (off + len > rte_pktmbuf_pkt_len(m)) + return NULL; + + while (off >= rte_pktmbuf_data_len(seg)) { + off -= rte_pktmbuf_data_len(seg); + seg = seg->next; + } + + if (off + len <= rte_pktmbuf_data_len(seg)) + return rte_pktmbuf_mtod_offset(seg, char *, off); + + /* rare case: header is split among several segments */ + while (len > 0) { + copy_len = rte_pktmbuf_data_len(seg) - off; + if (copy_len > len) + copy_len = len; + rte_memcpy((char *)buf + buf_off, + rte_pktmbuf_mtod_offset(seg, char *, off), copy_len); + off = 0; + buf_off += copy_len; + len -= copy_len; + seg = seg->next; + } + + return buf; +} + +/* + * Get the name of a RX offload flag. Must be kept synchronized with flag + * definitions in rte_mbuf.h. + */ +const char *rte_get_rx_ol_flag_name(uint64_t mask) +{ + switch (mask) { + case PKT_RX_VLAN: return "PKT_RX_VLAN"; + case PKT_RX_RSS_HASH: return "PKT_RX_RSS_HASH"; + case PKT_RX_FDIR: return "PKT_RX_FDIR"; + case PKT_RX_L4_CKSUM_BAD: return "PKT_RX_L4_CKSUM_BAD"; + case PKT_RX_L4_CKSUM_GOOD: return "PKT_RX_L4_CKSUM_GOOD"; + case PKT_RX_L4_CKSUM_NONE: return "PKT_RX_L4_CKSUM_NONE"; + case PKT_RX_IP_CKSUM_BAD: return "PKT_RX_IP_CKSUM_BAD"; + case PKT_RX_IP_CKSUM_GOOD: return "PKT_RX_IP_CKSUM_GOOD"; + case PKT_RX_IP_CKSUM_NONE: return "PKT_RX_IP_CKSUM_NONE"; + case PKT_RX_EIP_CKSUM_BAD: return "PKT_RX_EIP_CKSUM_BAD"; + case PKT_RX_VLAN_STRIPPED: return "PKT_RX_VLAN_STRIPPED"; + case PKT_RX_IEEE1588_PTP: return "PKT_RX_IEEE1588_PTP"; + case PKT_RX_IEEE1588_TMST: return "PKT_RX_IEEE1588_TMST"; + case PKT_RX_FDIR_ID: return "PKT_RX_FDIR_ID"; + case PKT_RX_FDIR_FLX: return "PKT_RX_FDIR_FLX"; + case PKT_RX_QINQ_STRIPPED: return "PKT_RX_QINQ_STRIPPED"; + case PKT_RX_QINQ: return "PKT_RX_QINQ"; + case PKT_RX_LRO: return "PKT_RX_LRO"; + case PKT_RX_TIMESTAMP: return "PKT_RX_TIMESTAMP"; + case PKT_RX_SEC_OFFLOAD: return "PKT_RX_SEC_OFFLOAD"; + case PKT_RX_SEC_OFFLOAD_FAILED: return "PKT_RX_SEC_OFFLOAD_FAILED"; + case PKT_RX_OUTER_L4_CKSUM_BAD: return "PKT_RX_OUTER_L4_CKSUM_BAD"; + case PKT_RX_OUTER_L4_CKSUM_GOOD: return "PKT_RX_OUTER_L4_CKSUM_GOOD"; + case PKT_RX_OUTER_L4_CKSUM_INVALID: + return "PKT_RX_OUTER_L4_CKSUM_INVALID"; + + default: return NULL; + } +} + +struct flag_mask { + uint64_t flag; + uint64_t mask; + const char *default_name; +}; + +/* write the list of rx ol flags in buffer buf */ +int +rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen) +{ + const struct flag_mask rx_flags[] = { + { PKT_RX_VLAN, PKT_RX_VLAN, NULL }, + { PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, NULL }, + { PKT_RX_FDIR, PKT_RX_FDIR, NULL }, + { PKT_RX_L4_CKSUM_BAD, PKT_RX_L4_CKSUM_MASK, NULL }, + { PKT_RX_L4_CKSUM_GOOD, PKT_RX_L4_CKSUM_MASK, NULL }, + { PKT_RX_L4_CKSUM_NONE, PKT_RX_L4_CKSUM_MASK, NULL }, + { PKT_RX_L4_CKSUM_UNKNOWN, PKT_RX_L4_CKSUM_MASK, + "PKT_RX_L4_CKSUM_UNKNOWN" }, + { PKT_RX_IP_CKSUM_BAD, PKT_RX_IP_CKSUM_MASK, NULL }, + { PKT_RX_IP_CKSUM_GOOD, PKT_RX_IP_CKSUM_MASK, NULL }, + { PKT_RX_IP_CKSUM_NONE, PKT_RX_IP_CKSUM_MASK, NULL }, + { PKT_RX_IP_CKSUM_UNKNOWN, PKT_RX_IP_CKSUM_MASK, + "PKT_RX_IP_CKSUM_UNKNOWN" }, + { PKT_RX_EIP_CKSUM_BAD, PKT_RX_EIP_CKSUM_BAD, NULL }, + { PKT_RX_VLAN_STRIPPED, PKT_RX_VLAN_STRIPPED, NULL }, + { PKT_RX_IEEE1588_PTP, PKT_RX_IEEE1588_PTP, NULL }, + { PKT_RX_IEEE1588_TMST, PKT_RX_IEEE1588_TMST, NULL }, + { PKT_RX_FDIR_ID, PKT_RX_FDIR_ID, NULL }, + { PKT_RX_FDIR_FLX, PKT_RX_FDIR_FLX, NULL }, + { PKT_RX_QINQ_STRIPPED, PKT_RX_QINQ_STRIPPED, NULL }, + { PKT_RX_LRO, PKT_RX_LRO, NULL }, + { PKT_RX_TIMESTAMP, PKT_RX_TIMESTAMP, NULL }, + { PKT_RX_SEC_OFFLOAD, PKT_RX_SEC_OFFLOAD, NULL }, + { PKT_RX_SEC_OFFLOAD_FAILED, PKT_RX_SEC_OFFLOAD_FAILED, NULL }, + { PKT_RX_QINQ, PKT_RX_QINQ, NULL }, + { PKT_RX_OUTER_L4_CKSUM_BAD, PKT_RX_OUTER_L4_CKSUM_MASK, NULL }, + { PKT_RX_OUTER_L4_CKSUM_GOOD, PKT_RX_OUTER_L4_CKSUM_MASK, + NULL }, + { PKT_RX_OUTER_L4_CKSUM_INVALID, PKT_RX_OUTER_L4_CKSUM_MASK, + NULL }, + { PKT_RX_OUTER_L4_CKSUM_UNKNOWN, PKT_RX_OUTER_L4_CKSUM_MASK, + "PKT_RX_OUTER_L4_CKSUM_UNKNOWN" }, + }; + const char *name; + unsigned int i; + int ret; + + if (buflen == 0) + return -1; + + buf[0] = '\0'; + for (i = 0; i < RTE_DIM(rx_flags); i++) { + if ((mask & rx_flags[i].mask) != rx_flags[i].flag) + continue; + name = rte_get_rx_ol_flag_name(rx_flags[i].flag); + if (name == NULL) + name = rx_flags[i].default_name; + ret = snprintf(buf, buflen, "%s ", name); + if (ret < 0) + return -1; + if ((size_t)ret >= buflen) + return -1; + buf += ret; + buflen -= ret; + } + + return 0; +} + +/* + * Get the name of a TX offload flag. Must be kept synchronized with flag + * definitions in rte_mbuf.h. + */ +const char *rte_get_tx_ol_flag_name(uint64_t mask) +{ + switch (mask) { + case PKT_TX_VLAN: return "PKT_TX_VLAN"; + case PKT_TX_IP_CKSUM: return "PKT_TX_IP_CKSUM"; + case PKT_TX_TCP_CKSUM: return "PKT_TX_TCP_CKSUM"; + case PKT_TX_SCTP_CKSUM: return "PKT_TX_SCTP_CKSUM"; + case PKT_TX_UDP_CKSUM: return "PKT_TX_UDP_CKSUM"; + case PKT_TX_IEEE1588_TMST: return "PKT_TX_IEEE1588_TMST"; + case PKT_TX_TCP_SEG: return "PKT_TX_TCP_SEG"; + case PKT_TX_IPV4: return "PKT_TX_IPV4"; + case PKT_TX_IPV6: return "PKT_TX_IPV6"; + case PKT_TX_OUTER_IP_CKSUM: return "PKT_TX_OUTER_IP_CKSUM"; + case PKT_TX_OUTER_IPV4: return "PKT_TX_OUTER_IPV4"; + case PKT_TX_OUTER_IPV6: return "PKT_TX_OUTER_IPV6"; + case PKT_TX_TUNNEL_VXLAN: return "PKT_TX_TUNNEL_VXLAN"; + case PKT_TX_TUNNEL_GTP: return "PKT_TX_TUNNEL_GTP"; + case PKT_TX_TUNNEL_GRE: return "PKT_TX_TUNNEL_GRE"; + case PKT_TX_TUNNEL_IPIP: return "PKT_TX_TUNNEL_IPIP"; + case PKT_TX_TUNNEL_GENEVE: return "PKT_TX_TUNNEL_GENEVE"; + case PKT_TX_TUNNEL_MPLSINUDP: return "PKT_TX_TUNNEL_MPLSINUDP"; + case PKT_TX_TUNNEL_VXLAN_GPE: return "PKT_TX_TUNNEL_VXLAN_GPE"; + case PKT_TX_TUNNEL_IP: return "PKT_TX_TUNNEL_IP"; + case PKT_TX_TUNNEL_UDP: return "PKT_TX_TUNNEL_UDP"; + case PKT_TX_QINQ: return "PKT_TX_QINQ"; + case PKT_TX_MACSEC: return "PKT_TX_MACSEC"; + case PKT_TX_SEC_OFFLOAD: return "PKT_TX_SEC_OFFLOAD"; + case PKT_TX_UDP_SEG: return "PKT_TX_UDP_SEG"; + case PKT_TX_OUTER_UDP_CKSUM: return "PKT_TX_OUTER_UDP_CKSUM"; + default: return NULL; + } +} + +/* write the list of tx ol flags in buffer buf */ +int +rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen) +{ + const struct flag_mask tx_flags[] = { + { PKT_TX_VLAN, PKT_TX_VLAN, NULL }, + { PKT_TX_IP_CKSUM, PKT_TX_IP_CKSUM, NULL }, + { PKT_TX_TCP_CKSUM, PKT_TX_L4_MASK, NULL }, + { PKT_TX_SCTP_CKSUM, PKT_TX_L4_MASK, NULL }, + { PKT_TX_UDP_CKSUM, PKT_TX_L4_MASK, NULL }, + { PKT_TX_L4_NO_CKSUM, PKT_TX_L4_MASK, "PKT_TX_L4_NO_CKSUM" }, + { PKT_TX_IEEE1588_TMST, PKT_TX_IEEE1588_TMST, NULL }, + { PKT_TX_TCP_SEG, PKT_TX_TCP_SEG, NULL }, + { PKT_TX_IPV4, PKT_TX_IPV4, NULL }, + { PKT_TX_IPV6, PKT_TX_IPV6, NULL }, + { PKT_TX_OUTER_IP_CKSUM, PKT_TX_OUTER_IP_CKSUM, NULL }, + { PKT_TX_OUTER_IPV4, PKT_TX_OUTER_IPV4, NULL }, + { PKT_TX_OUTER_IPV6, PKT_TX_OUTER_IPV6, NULL }, + { PKT_TX_TUNNEL_VXLAN, PKT_TX_TUNNEL_MASK, NULL }, + { PKT_TX_TUNNEL_GTP, PKT_TX_TUNNEL_MASK, NULL }, + { PKT_TX_TUNNEL_GRE, PKT_TX_TUNNEL_MASK, NULL }, + { PKT_TX_TUNNEL_IPIP, PKT_TX_TUNNEL_MASK, NULL }, + { PKT_TX_TUNNEL_GENEVE, PKT_TX_TUNNEL_MASK, NULL }, + { PKT_TX_TUNNEL_MPLSINUDP, PKT_TX_TUNNEL_MASK, NULL }, + { PKT_TX_TUNNEL_VXLAN_GPE, PKT_TX_TUNNEL_MASK, NULL }, + { PKT_TX_TUNNEL_IP, PKT_TX_TUNNEL_MASK, NULL }, + { PKT_TX_TUNNEL_UDP, PKT_TX_TUNNEL_MASK, NULL }, + { PKT_TX_QINQ, PKT_TX_QINQ, NULL }, + { PKT_TX_MACSEC, PKT_TX_MACSEC, NULL }, + { PKT_TX_SEC_OFFLOAD, PKT_TX_SEC_OFFLOAD, NULL }, + { PKT_TX_UDP_SEG, PKT_TX_UDP_SEG, NULL }, + { PKT_TX_OUTER_UDP_CKSUM, PKT_TX_OUTER_UDP_CKSUM, NULL }, + }; + const char *name; + unsigned int i; + int ret; + + if (buflen == 0) + return -1; + + buf[0] = '\0'; + for (i = 0; i < RTE_DIM(tx_flags); i++) { + if ((mask & tx_flags[i].mask) != tx_flags[i].flag) + continue; + name = rte_get_tx_ol_flag_name(tx_flags[i].flag); + if (name == NULL) + name = tx_flags[i].default_name; + ret = snprintf(buf, buflen, "%s ", name); + if (ret < 0) + return -1; + if ((size_t)ret >= buflen) + return -1; + buf += ret; + buflen -= ret; + } + + return 0; +} diff --git a/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf.h b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf.h new file mode 100644 index 000000000..f8e492e59 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf.h @@ -0,0 +1,2009 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation. + * Copyright 2014 6WIND S.A. + */ + +#ifndef _RTE_MBUF_H_ +#define _RTE_MBUF_H_ + +/** + * @file + * RTE Mbuf + * + * The mbuf library provides the ability to create and destroy buffers + * that may be used by the RTE application to store message + * buffers. The message buffers are stored in a mempool, using the + * RTE mempool library. + * + * The preferred way to create a mbuf pool is to use + * rte_pktmbuf_pool_create(). However, in some situations, an + * application may want to have more control (ex: populate the pool with + * specific memory), in this case it is possible to use functions from + * rte_mempool. See how rte_pktmbuf_pool_create() is implemented for + * details. + * + * This library provides an API to allocate/free packet mbufs, which are + * used to carry network packets. + * + * To understand the concepts of packet buffers or mbufs, you + * should read "TCP/IP Illustrated, Volume 2: The Implementation, + * Addison-Wesley, 1995, ISBN 0-201-63354-X from Richard Stevens" + * http://www.kohala.com/start/tcpipiv2.html + */ + +#include <stdint.h> +#include <rte_compat.h> +#include <rte_common.h> +#include <rte_config.h> +#include <rte_mempool.h> +#include <rte_memory.h> +#include <rte_atomic.h> +#include <rte_prefetch.h> +#include <rte_branch_prediction.h> +#include <rte_byteorder.h> +#include <rte_mbuf_ptype.h> +#include <rte_mbuf_core.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Get the name of a RX offload flag + * + * @param mask + * The mask describing the flag. + * @return + * The name of this flag, or NULL if it's not a valid RX flag. + */ +const char *rte_get_rx_ol_flag_name(uint64_t mask); + +/** + * Dump the list of RX offload flags in a buffer + * + * @param mask + * The mask describing the RX flags. + * @param buf + * The output buffer. + * @param buflen + * The length of the buffer. + * @return + * 0 on success, (-1) on error. + */ +int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen); + +/** + * Get the name of a TX offload flag + * + * @param mask + * The mask describing the flag. Usually only one bit must be set. + * Several bits can be given if they belong to the same mask. + * Ex: PKT_TX_L4_MASK. + * @return + * The name of this flag, or NULL if it's not a valid TX flag. + */ +const char *rte_get_tx_ol_flag_name(uint64_t mask); + +/** + * Dump the list of TX offload flags in a buffer + * + * @param mask + * The mask describing the TX flags. + * @param buf + * The output buffer. + * @param buflen + * The length of the buffer. + * @return + * 0 on success, (-1) on error. + */ +int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen); + +/** + * Prefetch the first part of the mbuf + * + * The first 64 bytes of the mbuf corresponds to fields that are used early + * in the receive path. If the cache line of the architecture is higher than + * 64B, the second part will also be prefetched. + * + * @param m + * The pointer to the mbuf. + */ +static inline void +rte_mbuf_prefetch_part1(struct rte_mbuf *m) +{ + rte_prefetch0(&m->cacheline0); +} + +/** + * Prefetch the second part of the mbuf + * + * The next 64 bytes of the mbuf corresponds to fields that are used in the + * transmit path. If the cache line of the architecture is higher than 64B, + * this function does nothing as it is expected that the full mbuf is + * already in cache. + * + * @param m + * The pointer to the mbuf. + */ +static inline void +rte_mbuf_prefetch_part2(struct rte_mbuf *m) +{ +#if RTE_CACHE_LINE_SIZE == 64 + rte_prefetch0(&m->cacheline1); +#else + RTE_SET_USED(m); +#endif +} + + +static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp); + +/** + * Return the IO address of the beginning of the mbuf data + * + * @param mb + * The pointer to the mbuf. + * @return + * The IO address of the beginning of the mbuf data + */ +static inline rte_iova_t +rte_mbuf_data_iova(const struct rte_mbuf *mb) +{ + return mb->buf_iova + mb->data_off; +} + +__rte_deprecated +static inline phys_addr_t +rte_mbuf_data_dma_addr(const struct rte_mbuf *mb) +{ + return rte_mbuf_data_iova(mb); +} + +/** + * Return the default IO address of the beginning of the mbuf data + * + * This function is used by drivers in their receive function, as it + * returns the location where data should be written by the NIC, taking + * the default headroom in account. + * + * @param mb + * The pointer to the mbuf. + * @return + * The IO address of the beginning of the mbuf data + */ +static inline rte_iova_t +rte_mbuf_data_iova_default(const struct rte_mbuf *mb) +{ + return mb->buf_iova + RTE_PKTMBUF_HEADROOM; +} + +__rte_deprecated +static inline phys_addr_t +rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb) +{ + return rte_mbuf_data_iova_default(mb); +} + +/** + * Return the mbuf owning the data buffer address of an indirect mbuf. + * + * @param mi + * The pointer to the indirect mbuf. + * @return + * The address of the direct mbuf corresponding to buffer_addr. + */ +static inline struct rte_mbuf * +rte_mbuf_from_indirect(struct rte_mbuf *mi) +{ + return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size); +} + +/** + * Return address of buffer embedded in the given mbuf. + * + * The return value shall be same as mb->buf_addr if the mbuf is already + * initialized and direct. However, this API is useful if mempool of the + * mbuf is already known because it doesn't need to access mbuf contents in + * order to get the mempool pointer. + * + * @warning + * @b EXPERIMENTAL: This API may change without prior notice. + * This will be used by rte_mbuf_to_baddr() which has redundant code once + * experimental tag is removed. + * + * @param mb + * The pointer to the mbuf. + * @param mp + * The pointer to the mempool of the mbuf. + * @return + * The pointer of the mbuf buffer. + */ +__rte_experimental +static inline char * +rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp) +{ + return (char *)mb + sizeof(*mb) + rte_pktmbuf_priv_size(mp); +} + +/** + * Return the default address of the beginning of the mbuf data. + * + * @warning + * @b EXPERIMENTAL: This API may change without prior notice. + * + * @param mb + * The pointer to the mbuf. + * @return + * The pointer of the beginning of the mbuf data. + */ +__rte_experimental +static inline char * +rte_mbuf_data_addr_default(__rte_unused struct rte_mbuf *mb) +{ + /* gcc complains about calling this experimental function even + * when not using it. Hide it with ALLOW_EXPERIMENTAL_API. + */ +#ifdef ALLOW_EXPERIMENTAL_API + return rte_mbuf_buf_addr(mb, mb->pool) + RTE_PKTMBUF_HEADROOM; +#else + return NULL; +#endif +} + +/** + * Return address of buffer embedded in the given mbuf. + * + * @note: Accessing mempool pointer of a mbuf is expensive because the + * pointer is stored in the 2nd cache line of mbuf. If mempool is known, it + * is better not to reference the mempool pointer in mbuf but calling + * rte_mbuf_buf_addr() would be more efficient. + * + * @param md + * The pointer to the mbuf. + * @return + * The address of the data buffer owned by the mbuf. + */ +static inline char * +rte_mbuf_to_baddr(struct rte_mbuf *md) +{ +#ifdef ALLOW_EXPERIMENTAL_API + return rte_mbuf_buf_addr(md, md->pool); +#else + char *buffer_addr; + buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool); + return buffer_addr; +#endif +} + +/** + * Return the starting address of the private data area embedded in + * the given mbuf. + * + * Note that no check is made to ensure that a private data area + * actually exists in the supplied mbuf. + * + * @param m + * The pointer to the mbuf. + * @return + * The starting address of the private data area of the given mbuf. + */ +__rte_experimental +static inline void * +rte_mbuf_to_priv(struct rte_mbuf *m) +{ + return RTE_PTR_ADD(m, sizeof(struct rte_mbuf)); +} + +/** + * Private data in case of pktmbuf pool. + * + * A structure that contains some pktmbuf_pool-specific data that are + * appended after the mempool structure (in private data). + */ +struct rte_pktmbuf_pool_private { + uint16_t mbuf_data_room_size; /**< Size of data space in each mbuf. */ + uint16_t mbuf_priv_size; /**< Size of private area in each mbuf. */ + uint32_t flags; /**< reserved for future use. */ +}; + +/** + * Return the flags from private data in an mempool structure. + * + * @param mp + * A pointer to the mempool structure. + * @return + * The flags from the private data structure. + */ +static inline uint32_t +rte_pktmbuf_priv_flags(struct rte_mempool *mp) +{ + struct rte_pktmbuf_pool_private *mbp_priv; + + mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp); + return mbp_priv->flags; +} + +/** + * When set, pktmbuf mempool will hold only mbufs with pinned external + * buffer. The external buffer will be attached to the mbuf at the + * memory pool creation and will never be detached by the mbuf free calls. + * mbuf should not contain any room for data after the mbuf structure. + */ +#define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF (1 << 0) + +/** + * Returns non zero if given mbuf has a pinned external buffer, or zero + * otherwise. The pinned external buffer is allocated at pool creation + * time and should not be freed on mbuf freeing. + * + * External buffer is a user-provided anonymous buffer. + */ +#define RTE_MBUF_HAS_PINNED_EXTBUF(mb) \ + (rte_pktmbuf_priv_flags(mb->pool) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) + +#ifdef RTE_LIBRTE_MBUF_DEBUG + +/** check mbuf type in debug mode */ +#define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h) + +#else /* RTE_LIBRTE_MBUF_DEBUG */ + +/** check mbuf type in debug mode */ +#define __rte_mbuf_sanity_check(m, is_h) do { } while (0) + +#endif /* RTE_LIBRTE_MBUF_DEBUG */ + +#ifdef RTE_MBUF_REFCNT_ATOMIC + +/** + * Reads the value of an mbuf's refcnt. + * @param m + * Mbuf to read + * @return + * Reference count number. + */ +static inline uint16_t +rte_mbuf_refcnt_read(const struct rte_mbuf *m) +{ + return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic)); +} + +/** + * Sets an mbuf's refcnt to a defined value. + * @param m + * Mbuf to update + * @param new_value + * Value set + */ +static inline void +rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value) +{ + rte_atomic16_set(&m->refcnt_atomic, (int16_t)new_value); +} + +/* internal */ +static inline uint16_t +__rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) +{ + return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value)); +} + +/** + * Adds given value to an mbuf's refcnt and returns its new value. + * @param m + * Mbuf to update + * @param value + * Value to add/subtract + * @return + * Updated value + */ +static inline uint16_t +rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) +{ + /* + * The atomic_add is an expensive operation, so we don't want to + * call it in the case where we know we are the unique holder of + * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic + * operation has to be used because concurrent accesses on the + * reference counter can occur. + */ + if (likely(rte_mbuf_refcnt_read(m) == 1)) { + ++value; + rte_mbuf_refcnt_set(m, (uint16_t)value); + return (uint16_t)value; + } + + return __rte_mbuf_refcnt_update(m, value); +} + +#else /* ! RTE_MBUF_REFCNT_ATOMIC */ + +/* internal */ +static inline uint16_t +__rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) +{ + m->refcnt = (uint16_t)(m->refcnt + value); + return m->refcnt; +} + +/** + * Adds given value to an mbuf's refcnt and returns its new value. + */ +static inline uint16_t +rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) +{ + return __rte_mbuf_refcnt_update(m, value); +} + +/** + * Reads the value of an mbuf's refcnt. + */ +static inline uint16_t +rte_mbuf_refcnt_read(const struct rte_mbuf *m) +{ + return m->refcnt; +} + +/** + * Sets an mbuf's refcnt to the defined value. + */ +static inline void +rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value) +{ + m->refcnt = new_value; +} + +#endif /* RTE_MBUF_REFCNT_ATOMIC */ + +/** + * Reads the refcnt of an external buffer. + * + * @param shinfo + * Shared data of the external buffer. + * @return + * Reference count number. + */ +static inline uint16_t +rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo) +{ + return (uint16_t)(rte_atomic16_read(&shinfo->refcnt_atomic)); +} + +/** + * Set refcnt of an external buffer. + * + * @param shinfo + * Shared data of the external buffer. + * @param new_value + * Value set + */ +static inline void +rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, + uint16_t new_value) +{ + rte_atomic16_set(&shinfo->refcnt_atomic, (int16_t)new_value); +} + +/** + * Add given value to refcnt of an external buffer and return its new + * value. + * + * @param shinfo + * Shared data of the external buffer. + * @param value + * Value to add/subtract + * @return + * Updated value + */ +static inline uint16_t +rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, + int16_t value) +{ + if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) { + ++value; + rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value); + return (uint16_t)value; + } + + return (uint16_t)rte_atomic16_add_return(&shinfo->refcnt_atomic, value); +} + +/** Mbuf prefetch */ +#define RTE_MBUF_PREFETCH_TO_FREE(m) do { \ + if ((m) != NULL) \ + rte_prefetch0(m); \ +} while (0) + + +/** + * Sanity checks on an mbuf. + * + * Check the consistency of the given mbuf. The function will cause a + * panic if corruption is detected. + * + * @param m + * The mbuf to be checked. + * @param is_header + * True if the mbuf is a packet header, false if it is a sub-segment + * of a packet (in this case, some fields like nb_segs are not checked) + */ +void +rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header); + +/** + * Sanity checks on a mbuf. + * + * Almost like rte_mbuf_sanity_check(), but this function gives the reason + * if corruption is detected rather than panic. + * + * @param m + * The mbuf to be checked. + * @param is_header + * True if the mbuf is a packet header, false if it is a sub-segment + * of a packet (in this case, some fields like nb_segs are not checked) + * @param reason + * A reference to a string pointer where to store the reason why a mbuf is + * considered invalid. + * @return + * - 0 if no issue has been found, reason is left untouched. + * - -1 if a problem is detected, reason then points to a string describing + * the reason why the mbuf is deemed invalid. + */ +__rte_experimental +int rte_mbuf_check(const struct rte_mbuf *m, int is_header, + const char **reason); + +#define MBUF_RAW_ALLOC_CHECK(m) do { \ + RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \ + RTE_ASSERT((m)->next == NULL); \ + RTE_ASSERT((m)->nb_segs == 1); \ + __rte_mbuf_sanity_check(m, 0); \ +} while (0) + +/** + * Allocate an uninitialized mbuf from mempool *mp*. + * + * This function can be used by PMDs (especially in RX functions) to + * allocate an uninitialized mbuf. The driver is responsible of + * initializing all the required fields. See rte_pktmbuf_reset(). + * For standard needs, prefer rte_pktmbuf_alloc(). + * + * The caller can expect that the following fields of the mbuf structure + * are initialized: buf_addr, buf_iova, buf_len, refcnt=1, nb_segs=1, + * next=NULL, pool, priv_size. The other fields must be initialized + * by the caller. + * + * @param mp + * The mempool from which mbuf is allocated. + * @return + * - The pointer to the new mbuf on success. + * - NULL if allocation failed. + */ +static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + + if (rte_mempool_get(mp, (void **)&m) < 0) + return NULL; + MBUF_RAW_ALLOC_CHECK(m); + return m; +} + +/** + * Put mbuf back into its original mempool. + * + * The caller must ensure that the mbuf is direct and properly + * reinitialized (refcnt=1, next=NULL, nb_segs=1), as done by + * rte_pktmbuf_prefree_seg(). + * + * This function should be used with care, when optimization is + * required. For standard needs, prefer rte_pktmbuf_free() or + * rte_pktmbuf_free_seg(). + * + * @param m + * The mbuf to be freed. + */ +static __rte_always_inline void +rte_mbuf_raw_free(struct rte_mbuf *m) +{ + RTE_ASSERT(!RTE_MBUF_CLONED(m) && + (!RTE_MBUF_HAS_EXTBUF(m) || RTE_MBUF_HAS_PINNED_EXTBUF(m))); + RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); + RTE_ASSERT(m->next == NULL); + RTE_ASSERT(m->nb_segs == 1); + __rte_mbuf_sanity_check(m, 0); + rte_mempool_put(m->pool, m); +} + +/** + * The packet mbuf constructor. + * + * This function initializes some fields in the mbuf structure that are + * not modified by the user once created (origin pool, buffer start + * address, and so on). This function is given as a callback function to + * rte_mempool_obj_iter() or rte_mempool_create() at pool creation time. + * + * @param mp + * The mempool from which mbufs originate. + * @param opaque_arg + * A pointer that can be used by the user to retrieve useful information + * for mbuf initialization. This pointer is the opaque argument passed to + * rte_mempool_obj_iter() or rte_mempool_create(). + * @param m + * The mbuf to initialize. + * @param i + * The index of the mbuf in the pool table. + */ +void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, + void *m, unsigned i); + +/** + * A packet mbuf pool constructor. + * + * This function initializes the mempool private data in the case of a + * pktmbuf pool. This private data is needed by the driver. The + * function must be called on the mempool before it is used, or it + * can be given as a callback function to rte_mempool_create() at + * pool creation. It can be extended by the user, for example, to + * provide another packet size. + * + * @param mp + * The mempool from which mbufs originate. + * @param opaque_arg + * A pointer that can be used by the user to retrieve useful information + * for mbuf initialization. This pointer is the opaque argument passed to + * rte_mempool_create(). + */ +void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg); + +/** + * Create a mbuf pool. + * + * This function creates and initializes a packet mbuf pool. It is + * a wrapper to rte_mempool functions. + * + * @param name + * The name of the mbuf pool. + * @param n + * The number of elements in the mbuf pool. The optimum size (in terms + * of memory usage) for a mempool is when n is a power of two minus one: + * n = (2^q - 1). + * @param cache_size + * Size of the per-core object cache. See rte_mempool_create() for + * details. + * @param priv_size + * Size of application private are between the rte_mbuf structure + * and the data buffer. This value must be aligned to RTE_MBUF_PRIV_ALIGN. + * @param data_room_size + * Size of data buffer in each mbuf, including RTE_PKTMBUF_HEADROOM. + * @param socket_id + * The socket identifier where the memory should be allocated. The + * value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the + * reserved zone. + * @return + * The pointer to the new allocated mempool, on success. NULL on error + * with rte_errno set appropriately. Possible rte_errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - EINVAL - cache size provided is too large, or priv_size is not aligned. + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + */ +struct rte_mempool * +rte_pktmbuf_pool_create(const char *name, unsigned n, + unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, + int socket_id); + +/** + * Create a mbuf pool with a given mempool ops name + * + * This function creates and initializes a packet mbuf pool. It is + * a wrapper to rte_mempool functions. + * + * @param name + * The name of the mbuf pool. + * @param n + * The number of elements in the mbuf pool. The optimum size (in terms + * of memory usage) for a mempool is when n is a power of two minus one: + * n = (2^q - 1). + * @param cache_size + * Size of the per-core object cache. See rte_mempool_create() for + * details. + * @param priv_size + * Size of application private are between the rte_mbuf structure + * and the data buffer. This value must be aligned to RTE_MBUF_PRIV_ALIGN. + * @param data_room_size + * Size of data buffer in each mbuf, including RTE_PKTMBUF_HEADROOM. + * @param socket_id + * The socket identifier where the memory should be allocated. The + * value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the + * reserved zone. + * @param ops_name + * The mempool ops name to be used for this mempool instead of + * default mempool. The value can be *NULL* to use default mempool. + * @return + * The pointer to the new allocated mempool, on success. NULL on error + * with rte_errno set appropriately. Possible rte_errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - EINVAL - cache size provided is too large, or priv_size is not aligned. + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + */ +struct rte_mempool * +rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, + unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, + int socket_id, const char *ops_name); + +/** A structure that describes the pinned external buffer segment. */ +struct rte_pktmbuf_extmem { + void *buf_ptr; /**< The virtual address of data buffer. */ + rte_iova_t buf_iova; /**< The IO address of the data buffer. */ + size_t buf_len; /**< External buffer length in bytes. */ + uint16_t elt_size; /**< mbuf element size in bytes. */ +}; + +/** + * Create a mbuf pool with external pinned data buffers. + * + * This function creates and initializes a packet mbuf pool that contains + * only mbufs with external buffer. It is a wrapper to rte_mempool functions. + * + * @param name + * The name of the mbuf pool. + * @param n + * The number of elements in the mbuf pool. The optimum size (in terms + * of memory usage) for a mempool is when n is a power of two minus one: + * n = (2^q - 1). + * @param cache_size + * Size of the per-core object cache. See rte_mempool_create() for + * details. + * @param priv_size + * Size of application private are between the rte_mbuf structure + * and the data buffer. This value must be aligned to RTE_MBUF_PRIV_ALIGN. + * @param data_room_size + * Size of data buffer in each mbuf, including RTE_PKTMBUF_HEADROOM. + * @param socket_id + * The socket identifier where the memory should be allocated. The + * value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the + * reserved zone. + * @param ext_mem + * Pointer to the array of structures describing the external memory + * for data buffers. It is caller responsibility to register this memory + * with rte_extmem_register() (if needed), map this memory to appropriate + * physical device, etc. + * @param ext_num + * Number of elements in the ext_mem array. + * @return + * The pointer to the new allocated mempool, on success. NULL on error + * with rte_errno set appropriately. Possible rte_errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - EINVAL - cache size provided is too large, or priv_size is not aligned. + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + */ +__rte_experimental +struct rte_mempool * +rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n, + unsigned int cache_size, uint16_t priv_size, + uint16_t data_room_size, int socket_id, + const struct rte_pktmbuf_extmem *ext_mem, + unsigned int ext_num); + +/** + * Get the data room size of mbufs stored in a pktmbuf_pool + * + * The data room size is the amount of data that can be stored in a + * mbuf including the headroom (RTE_PKTMBUF_HEADROOM). + * + * @param mp + * The packet mbuf pool. + * @return + * The data room size of mbufs stored in this mempool. + */ +static inline uint16_t +rte_pktmbuf_data_room_size(struct rte_mempool *mp) +{ + struct rte_pktmbuf_pool_private *mbp_priv; + + mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp); + return mbp_priv->mbuf_data_room_size; +} + +/** + * Get the application private size of mbufs stored in a pktmbuf_pool + * + * The private size of mbuf is a zone located between the rte_mbuf + * structure and the data buffer where an application can store data + * associated to a packet. + * + * @param mp + * The packet mbuf pool. + * @return + * The private size of mbufs stored in this mempool. + */ +static inline uint16_t +rte_pktmbuf_priv_size(struct rte_mempool *mp) +{ + struct rte_pktmbuf_pool_private *mbp_priv; + + mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp); + return mbp_priv->mbuf_priv_size; +} + +/** + * Reset the data_off field of a packet mbuf to its default value. + * + * The given mbuf must have only one segment, which should be empty. + * + * @param m + * The packet mbuf's data_off field has to be reset. + */ +static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m) +{ + m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM, + (uint16_t)m->buf_len); +} + +/** + * Reset the fields of a packet mbuf to their default values. + * + * The given mbuf must have only one segment. + * + * @param m + * The packet mbuf to be reset. + */ +#define MBUF_INVALID_PORT UINT16_MAX + +static inline void rte_pktmbuf_reset(struct rte_mbuf *m) +{ + m->next = NULL; + m->pkt_len = 0; + m->tx_offload = 0; + m->vlan_tci = 0; + m->vlan_tci_outer = 0; + m->nb_segs = 1; + m->port = MBUF_INVALID_PORT; + + m->ol_flags &= EXT_ATTACHED_MBUF; + m->packet_type = 0; + rte_pktmbuf_reset_headroom(m); + + m->data_len = 0; + __rte_mbuf_sanity_check(m, 1); +} + +/** + * Allocate a new mbuf from a mempool. + * + * This new mbuf contains one segment, which has a length of 0. The pointer + * to data is initialized to have some bytes of headroom in the buffer + * (if buffer size allows). + * + * @param mp + * The mempool from which the mbuf is allocated. + * @return + * - The pointer to the new mbuf on success. + * - NULL if allocation failed. + */ +static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + if ((m = rte_mbuf_raw_alloc(mp)) != NULL) + rte_pktmbuf_reset(m); + return m; +} + +/** + * Allocate a bulk of mbufs, initialize refcnt and reset the fields to default + * values. + * + * @param pool + * The mempool from which mbufs are allocated. + * @param mbufs + * Array of pointers to mbufs + * @param count + * Array size + * @return + * - 0: Success + * - -ENOENT: Not enough entries in the mempool; no mbufs are retrieved. + */ +static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, + struct rte_mbuf **mbufs, unsigned count) +{ + unsigned idx = 0; + int rc; + + rc = rte_mempool_get_bulk(pool, (void **)mbufs, count); + if (unlikely(rc)) + return rc; + + /* To understand duff's device on loop unwinding optimization, see + * https://en.wikipedia.org/wiki/Duff's_device. + * Here while() loop is used rather than do() while{} to avoid extra + * check if count is zero. + */ + switch (count % 4) { + case 0: + while (idx != count) { + MBUF_RAW_ALLOC_CHECK(mbufs[idx]); + rte_pktmbuf_reset(mbufs[idx]); + idx++; + /* fall-through */ + case 3: + MBUF_RAW_ALLOC_CHECK(mbufs[idx]); + rte_pktmbuf_reset(mbufs[idx]); + idx++; + /* fall-through */ + case 2: + MBUF_RAW_ALLOC_CHECK(mbufs[idx]); + rte_pktmbuf_reset(mbufs[idx]); + idx++; + /* fall-through */ + case 1: + MBUF_RAW_ALLOC_CHECK(mbufs[idx]); + rte_pktmbuf_reset(mbufs[idx]); + idx++; + /* fall-through */ + } + } + return 0; +} + +/** + * Initialize shared data at the end of an external buffer before attaching + * to a mbuf by ``rte_pktmbuf_attach_extbuf()``. This is not a mandatory + * initialization but a helper function to simply spare a few bytes at the + * end of the buffer for shared data. If shared data is allocated + * separately, this should not be called but application has to properly + * initialize the shared data according to its need. + * + * Free callback and its argument is saved and the refcnt is set to 1. + * + * @warning + * The value of buf_len will be reduced to RTE_PTR_DIFF(shinfo, buf_addr) + * after this initialization. This shall be used for + * ``rte_pktmbuf_attach_extbuf()`` + * + * @param buf_addr + * The pointer to the external buffer. + * @param [in,out] buf_len + * The pointer to length of the external buffer. Input value must be + * larger than the size of ``struct rte_mbuf_ext_shared_info`` and + * padding for alignment. If not enough, this function will return NULL. + * Adjusted buffer length will be returned through this pointer. + * @param free_cb + * Free callback function to call when the external buffer needs to be + * freed. + * @param fcb_opaque + * Argument for the free callback function. + * + * @return + * A pointer to the initialized shared data on success, return NULL + * otherwise. + */ +static inline struct rte_mbuf_ext_shared_info * +rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, + rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque) +{ + struct rte_mbuf_ext_shared_info *shinfo; + void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len); + void *addr; + + addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)), + sizeof(uintptr_t)); + if (addr <= buf_addr) + return NULL; + + shinfo = (struct rte_mbuf_ext_shared_info *)addr; + shinfo->free_cb = free_cb; + shinfo->fcb_opaque = fcb_opaque; + rte_mbuf_ext_refcnt_set(shinfo, 1); + + *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr); + return shinfo; +} + +/** + * Attach an external buffer to a mbuf. + * + * User-managed anonymous buffer can be attached to an mbuf. When attaching + * it, corresponding free callback function and its argument should be + * provided via shinfo. This callback function will be called once all the + * mbufs are detached from the buffer (refcnt becomes zero). + * + * The headroom length of the attaching mbuf will be set to zero and this + * can be properly adjusted after attachment. For example, ``rte_pktmbuf_adj()`` + * or ``rte_pktmbuf_reset_headroom()`` might be used. + * + * Similarly, the packet length is initialized to 0. If the buffer contains + * data, the user has to adjust ``data_len`` and the ``pkt_len`` field of + * the mbuf accordingly. + * + * More mbufs can be attached to the same external buffer by + * ``rte_pktmbuf_attach()`` once the external buffer has been attached by + * this API. + * + * Detachment can be done by either ``rte_pktmbuf_detach_extbuf()`` or + * ``rte_pktmbuf_detach()``. + * + * Memory for shared data must be provided and user must initialize all of + * the content properly, especially free callback and refcnt. The pointer + * of shared data will be stored in m->shinfo. + * ``rte_pktmbuf_ext_shinfo_init_helper`` can help to simply spare a few + * bytes at the end of buffer for the shared data, store free callback and + * its argument and set the refcnt to 1. The following is an example: + * + * struct rte_mbuf_ext_shared_info *shinfo = + * rte_pktmbuf_ext_shinfo_init_helper(buf_addr, &buf_len, + * free_cb, fcb_arg); + * rte_pktmbuf_attach_extbuf(m, buf_addr, buf_iova, buf_len, shinfo); + * rte_pktmbuf_reset_headroom(m); + * rte_pktmbuf_adj(m, data_len); + * + * Attaching an external buffer is quite similar to mbuf indirection in + * replacing buffer addresses and length of a mbuf, but a few differences: + * - When an indirect mbuf is attached, refcnt of the direct mbuf would be + * 2 as long as the direct mbuf itself isn't freed after the attachment. + * In such cases, the buffer area of a direct mbuf must be read-only. But + * external buffer has its own refcnt and it starts from 1. Unless + * multiple mbufs are attached to a mbuf having an external buffer, the + * external buffer is writable. + * - There's no need to allocate buffer from a mempool. Any buffer can be + * attached with appropriate free callback and its IO address. + * - Smaller metadata is required to maintain shared data such as refcnt. + * + * @param m + * The pointer to the mbuf. + * @param buf_addr + * The pointer to the external buffer. + * @param buf_iova + * IO address of the external buffer. + * @param buf_len + * The size of the external buffer. + * @param shinfo + * User-provided memory for shared data of the external buffer. + */ +static inline void +rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, + rte_iova_t buf_iova, uint16_t buf_len, + struct rte_mbuf_ext_shared_info *shinfo) +{ + /* mbuf should not be read-only */ + RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1); + RTE_ASSERT(shinfo->free_cb != NULL); + + m->buf_addr = buf_addr; + m->buf_iova = buf_iova; + m->buf_len = buf_len; + + m->data_len = 0; + m->data_off = 0; + + m->ol_flags |= EXT_ATTACHED_MBUF; + m->shinfo = shinfo; +} + +/** + * Detach the external buffer attached to a mbuf, same as + * ``rte_pktmbuf_detach()`` + * + * @param m + * The mbuf having external buffer. + */ +#define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m) + +/** + * Copy dynamic fields from msrc to mdst. + * + * @param mdst + * The destination mbuf. + * @param msrc + * The source mbuf. + */ +static inline void +rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc) +{ + memcpy(&mdst->dynfield1, msrc->dynfield1, sizeof(mdst->dynfield1)); +} + +/* internal */ +static inline void +__rte_pktmbuf_copy_hdr(struct rte_mbuf *mdst, const struct rte_mbuf *msrc) +{ + mdst->port = msrc->port; + mdst->vlan_tci = msrc->vlan_tci; + mdst->vlan_tci_outer = msrc->vlan_tci_outer; + mdst->tx_offload = msrc->tx_offload; + mdst->hash = msrc->hash; + mdst->packet_type = msrc->packet_type; + mdst->timestamp = msrc->timestamp; + rte_mbuf_dynfield_copy(mdst, msrc); +} + +/** + * Attach packet mbuf to another packet mbuf. + * + * If the mbuf we are attaching to isn't a direct buffer and is attached to + * an external buffer, the mbuf being attached will be attached to the + * external buffer instead of mbuf indirection. + * + * Otherwise, the mbuf will be indirectly attached. After attachment we + * refer the mbuf we attached as 'indirect', while mbuf we attached to as + * 'direct'. The direct mbuf's reference counter is incremented. + * + * Right now, not supported: + * - attachment for already indirect mbuf (e.g. - mi has to be direct). + * - mbuf we trying to attach (mi) is used by someone else + * e.g. it's reference counter is greater then 1. + * + * @param mi + * The indirect packet mbuf. + * @param m + * The packet mbuf we're attaching to. + */ +static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m) +{ + RTE_ASSERT(RTE_MBUF_DIRECT(mi) && + rte_mbuf_refcnt_read(mi) == 1); + + if (RTE_MBUF_HAS_EXTBUF(m)) { + rte_mbuf_ext_refcnt_update(m->shinfo, 1); + mi->ol_flags = m->ol_flags; + mi->shinfo = m->shinfo; + } else { + /* if m is not direct, get the mbuf that embeds the data */ + rte_mbuf_refcnt_update(rte_mbuf_from_indirect(m), 1); + mi->priv_size = m->priv_size; + mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF; + } + + __rte_pktmbuf_copy_hdr(mi, m); + + mi->data_off = m->data_off; + mi->data_len = m->data_len; + mi->buf_iova = m->buf_iova; + mi->buf_addr = m->buf_addr; + mi->buf_len = m->buf_len; + + mi->next = NULL; + mi->pkt_len = mi->data_len; + mi->nb_segs = 1; + + __rte_mbuf_sanity_check(mi, 1); + __rte_mbuf_sanity_check(m, 0); +} + +/** + * @internal used by rte_pktmbuf_detach(). + * + * Decrement the reference counter of the external buffer. When the + * reference counter becomes 0, the buffer is freed by pre-registered + * callback. + */ +static inline void +__rte_pktmbuf_free_extbuf(struct rte_mbuf *m) +{ + RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m)); + RTE_ASSERT(m->shinfo != NULL); + + if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0) + m->shinfo->free_cb(m->buf_addr, m->shinfo->fcb_opaque); +} + +/** + * @internal used by rte_pktmbuf_detach(). + * + * Decrement the direct mbuf's reference counter. When the reference + * counter becomes 0, the direct mbuf is freed. + */ +static inline void +__rte_pktmbuf_free_direct(struct rte_mbuf *m) +{ + struct rte_mbuf *md; + + RTE_ASSERT(RTE_MBUF_CLONED(m)); + + md = rte_mbuf_from_indirect(m); + + if (rte_mbuf_refcnt_update(md, -1) == 0) { + md->next = NULL; + md->nb_segs = 1; + rte_mbuf_refcnt_set(md, 1); + rte_mbuf_raw_free(md); + } +} + +/** + * Detach a packet mbuf from external buffer or direct buffer. + * + * - decrement refcnt and free the external/direct buffer if refcnt + * becomes zero. + * - restore original mbuf address and length values. + * - reset pktmbuf data and data_len to their default values. + * + * All other fields of the given packet mbuf will be left intact. + * + * If the packet mbuf was allocated from the pool with pinned + * external buffers the rte_pktmbuf_detach does nothing with the + * mbuf of this kind, because the pinned buffers are not supposed + * to be detached. + * + * @param m + * The indirect attached packet mbuf. + */ +static inline void rte_pktmbuf_detach(struct rte_mbuf *m) +{ + struct rte_mempool *mp = m->pool; + uint32_t mbuf_size, buf_len; + uint16_t priv_size; + + if (RTE_MBUF_HAS_EXTBUF(m)) { + /* + * The mbuf has the external attached buffer, + * we should check the type of the memory pool where + * the mbuf was allocated from to detect the pinned + * external buffer. + */ + uint32_t flags = rte_pktmbuf_priv_flags(mp); + + if (flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) { + /* + * The pinned external buffer should not be + * detached from its backing mbuf, just exit. + */ + return; + } + __rte_pktmbuf_free_extbuf(m); + } else { + __rte_pktmbuf_free_direct(m); + } + priv_size = rte_pktmbuf_priv_size(mp); + mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size); + buf_len = rte_pktmbuf_data_room_size(mp); + + m->priv_size = priv_size; + m->buf_addr = (char *)m + mbuf_size; + m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size; + m->buf_len = (uint16_t)buf_len; + rte_pktmbuf_reset_headroom(m); + m->data_len = 0; + m->ol_flags = 0; +} + +/** + * @internal Handle the packet mbufs with attached pinned external buffer + * on the mbuf freeing: + * + * - return zero if reference counter in shinfo is one. It means there is + * no more reference to this pinned buffer and mbuf can be returned to + * the pool + * + * - otherwise (if reference counter is not one), decrement reference + * counter and return non-zero value to prevent freeing the backing mbuf. + * + * Returns non zero if mbuf should not be freed. + */ +static inline int __rte_pktmbuf_pinned_extbuf_decref(struct rte_mbuf *m) +{ + struct rte_mbuf_ext_shared_info *shinfo; + + /* Clear flags, mbuf is being freed. */ + m->ol_flags = EXT_ATTACHED_MBUF; + shinfo = m->shinfo; + + /* Optimize for performance - do not dec/reinit */ + if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) + return 0; + + /* + * Direct usage of add primitive to avoid + * duplication of comparing with one. + */ + if (likely(rte_atomic16_add_return + (&shinfo->refcnt_atomic, -1))) + return 1; + + /* Reinitialize counter before mbuf freeing. */ + rte_mbuf_ext_refcnt_set(shinfo, 1); + return 0; +} + +/** + * Decrease reference counter and unlink a mbuf segment + * + * This function does the same than a free, except that it does not + * return the segment to its pool. + * It decreases the reference counter, and if it reaches 0, it is + * detached from its parent for an indirect mbuf. + * + * @param m + * The mbuf to be unlinked + * @return + * - (m) if it is the last reference. It can be recycled or freed. + * - (NULL) if the mbuf still has remaining references on it. + */ +static __rte_always_inline struct rte_mbuf * +rte_pktmbuf_prefree_seg(struct rte_mbuf *m) +{ + __rte_mbuf_sanity_check(m, 0); + + if (likely(rte_mbuf_refcnt_read(m) == 1)) { + + if (!RTE_MBUF_DIRECT(m)) { + rte_pktmbuf_detach(m); + if (RTE_MBUF_HAS_EXTBUF(m) && + RTE_MBUF_HAS_PINNED_EXTBUF(m) && + __rte_pktmbuf_pinned_extbuf_decref(m)) + return NULL; + } + + if (m->next != NULL) { + m->next = NULL; + m->nb_segs = 1; + } + + return m; + + } else if (__rte_mbuf_refcnt_update(m, -1) == 0) { + + if (!RTE_MBUF_DIRECT(m)) { + rte_pktmbuf_detach(m); + if (RTE_MBUF_HAS_EXTBUF(m) && + RTE_MBUF_HAS_PINNED_EXTBUF(m) && + __rte_pktmbuf_pinned_extbuf_decref(m)) + return NULL; + } + + if (m->next != NULL) { + m->next = NULL; + m->nb_segs = 1; + } + rte_mbuf_refcnt_set(m, 1); + + return m; + } + return NULL; +} + +/** + * Free a segment of a packet mbuf into its original mempool. + * + * Free an mbuf, without parsing other segments in case of chained + * buffers. + * + * @param m + * The packet mbuf segment to be freed. + */ +static __rte_always_inline void +rte_pktmbuf_free_seg(struct rte_mbuf *m) +{ + m = rte_pktmbuf_prefree_seg(m); + if (likely(m != NULL)) + rte_mbuf_raw_free(m); +} + +/** + * Free a packet mbuf back into its original mempool. + * + * Free an mbuf, and all its segments in case of chained buffers. Each + * segment is added back into its original mempool. + * + * @param m + * The packet mbuf to be freed. If NULL, the function does nothing. + */ +static inline void rte_pktmbuf_free(struct rte_mbuf *m) +{ + struct rte_mbuf *m_next; + + if (m != NULL) + __rte_mbuf_sanity_check(m, 1); + + while (m != NULL) { + m_next = m->next; + rte_pktmbuf_free_seg(m); + m = m_next; + } +} + +/** + * Free a bulk of packet mbufs back into their original mempools. + * + * Free a bulk of mbufs, and all their segments in case of chained buffers. + * Each segment is added back into its original mempool. + * + * @param mbufs + * Array of pointers to packet mbufs. + * The array may contain NULL pointers. + * @param count + * Array size. + */ +__rte_experimental +void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count); + +/** + * Create a "clone" of the given packet mbuf. + * + * Walks through all segments of the given packet mbuf, and for each of them: + * - Creates a new packet mbuf from the given pool. + * - Attaches newly created mbuf to the segment. + * Then updates pkt_len and nb_segs of the "clone" packet mbuf to match values + * from the original packet mbuf. + * + * @param md + * The packet mbuf to be cloned. + * @param mp + * The mempool from which the "clone" mbufs are allocated. + * @return + * - The pointer to the new "clone" mbuf on success. + * - NULL if allocation fails. + */ +struct rte_mbuf * +rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp); + +/** + * Create a full copy of a given packet mbuf. + * + * Copies all the data from a given packet mbuf to a newly allocated + * set of mbufs. The private data are is not copied. + * + * @param m + * The packet mbuf to be copiedd. + * @param mp + * The mempool from which the "clone" mbufs are allocated. + * @param offset + * The number of bytes to skip before copying. + * If the mbuf does not have that many bytes, it is an error + * and NULL is returned. + * @param length + * The upper limit on bytes to copy. Passing UINT32_MAX + * means all data (after offset). + * @return + * - The pointer to the new "clone" mbuf on success. + * - NULL if allocation fails. + */ +__rte_experimental +struct rte_mbuf * +rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp, + uint32_t offset, uint32_t length); + +/** + * Adds given value to the refcnt of all packet mbuf segments. + * + * Walks through all segments of given packet mbuf and for each of them + * invokes rte_mbuf_refcnt_update(). + * + * @param m + * The packet mbuf whose refcnt to be updated. + * @param v + * The value to add to the mbuf's segments refcnt. + */ +static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v) +{ + __rte_mbuf_sanity_check(m, 1); + + do { + rte_mbuf_refcnt_update(m, v); + } while ((m = m->next) != NULL); +} + +/** + * Get the headroom in a packet mbuf. + * + * @param m + * The packet mbuf. + * @return + * The length of the headroom. + */ +static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m) +{ + __rte_mbuf_sanity_check(m, 0); + return m->data_off; +} + +/** + * Get the tailroom of a packet mbuf. + * + * @param m + * The packet mbuf. + * @return + * The length of the tailroom. + */ +static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m) +{ + __rte_mbuf_sanity_check(m, 0); + return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) - + m->data_len); +} + +/** + * Get the last segment of the packet. + * + * @param m + * The packet mbuf. + * @return + * The last segment of the given mbuf. + */ +static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m) +{ + __rte_mbuf_sanity_check(m, 1); + while (m->next != NULL) + m = m->next; + return m; +} + +/* deprecated */ +#define rte_pktmbuf_mtophys_offset(m, o) \ + rte_pktmbuf_iova_offset(m, o) + +/* deprecated */ +#define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m) + +/** + * A macro that returns the length of the packet. + * + * The value can be read or assigned. + * + * @param m + * The packet mbuf. + */ +#define rte_pktmbuf_pkt_len(m) ((m)->pkt_len) + +/** + * A macro that returns the length of the segment. + * + * The value can be read or assigned. + * + * @param m + * The packet mbuf. + */ +#define rte_pktmbuf_data_len(m) ((m)->data_len) + +/** + * Prepend len bytes to an mbuf data area. + * + * Returns a pointer to the new + * data start address. If there is not enough headroom in the first + * segment, the function will return NULL, without modifying the mbuf. + * + * @param m + * The pkt mbuf. + * @param len + * The amount of data to prepend (in bytes). + * @return + * A pointer to the start of the newly prepended data, or + * NULL if there is not enough headroom space in the first segment + */ +static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m, + uint16_t len) +{ + __rte_mbuf_sanity_check(m, 1); + + if (unlikely(len > rte_pktmbuf_headroom(m))) + return NULL; + + /* NB: elaborating the subtraction like this instead of using + * -= allows us to ensure the result type is uint16_t + * avoiding compiler warnings on gcc 8.1 at least */ + m->data_off = (uint16_t)(m->data_off - len); + m->data_len = (uint16_t)(m->data_len + len); + m->pkt_len = (m->pkt_len + len); + + return (char *)m->buf_addr + m->data_off; +} + +/** + * Append len bytes to an mbuf. + * + * Append len bytes to an mbuf and return a pointer to the start address + * of the added data. If there is not enough tailroom in the last + * segment, the function will return NULL, without modifying the mbuf. + * + * @param m + * The packet mbuf. + * @param len + * The amount of data to append (in bytes). + * @return + * A pointer to the start of the newly appended data, or + * NULL if there is not enough tailroom space in the last segment + */ +static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len) +{ + void *tail; + struct rte_mbuf *m_last; + + __rte_mbuf_sanity_check(m, 1); + + m_last = rte_pktmbuf_lastseg(m); + if (unlikely(len > rte_pktmbuf_tailroom(m_last))) + return NULL; + + tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len; + m_last->data_len = (uint16_t)(m_last->data_len + len); + m->pkt_len = (m->pkt_len + len); + return (char*) tail; +} + +/** + * Remove len bytes at the beginning of an mbuf. + * + * Returns a pointer to the start address of the new data area. If the + * length is greater than the length of the first segment, then the + * function will fail and return NULL, without modifying the mbuf. + * + * @param m + * The packet mbuf. + * @param len + * The amount of data to remove (in bytes). + * @return + * A pointer to the new start of the data. + */ +static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len) +{ + __rte_mbuf_sanity_check(m, 1); + + if (unlikely(len > m->data_len)) + return NULL; + + /* NB: elaborating the addition like this instead of using + * += allows us to ensure the result type is uint16_t + * avoiding compiler warnings on gcc 8.1 at least */ + m->data_len = (uint16_t)(m->data_len - len); + m->data_off = (uint16_t)(m->data_off + len); + m->pkt_len = (m->pkt_len - len); + return (char *)m->buf_addr + m->data_off; +} + +/** + * Remove len bytes of data at the end of the mbuf. + * + * If the length is greater than the length of the last segment, the + * function will fail and return -1 without modifying the mbuf. + * + * @param m + * The packet mbuf. + * @param len + * The amount of data to remove (in bytes). + * @return + * - 0: On success. + * - -1: On error. + */ +static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len) +{ + struct rte_mbuf *m_last; + + __rte_mbuf_sanity_check(m, 1); + + m_last = rte_pktmbuf_lastseg(m); + if (unlikely(len > m_last->data_len)) + return -1; + + m_last->data_len = (uint16_t)(m_last->data_len - len); + m->pkt_len = (m->pkt_len - len); + return 0; +} + +/** + * Test if mbuf data is contiguous. + * + * @param m + * The packet mbuf. + * @return + * - 1, if all data is contiguous (one segment). + * - 0, if there is several segments. + */ +static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m) +{ + __rte_mbuf_sanity_check(m, 1); + return m->nb_segs == 1; +} + +/** + * @internal used by rte_pktmbuf_read(). + */ +const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, + uint32_t len, void *buf); + +/** + * Read len data bytes in a mbuf at specified offset. + * + * If the data is contiguous, return the pointer in the mbuf data, else + * copy the data in the buffer provided by the user and return its + * pointer. + * + * @param m + * The pointer to the mbuf. + * @param off + * The offset of the data in the mbuf. + * @param len + * The amount of bytes to read. + * @param buf + * The buffer where data is copied if it is not contiguous in mbuf + * data. Its length should be at least equal to the len parameter. + * @return + * The pointer to the data, either in the mbuf if it is contiguous, + * or in the user buffer. If mbuf is too small, NULL is returned. + */ +static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m, + uint32_t off, uint32_t len, void *buf) +{ + if (likely(off + len <= rte_pktmbuf_data_len(m))) + return rte_pktmbuf_mtod_offset(m, char *, off); + else + return __rte_pktmbuf_read(m, off, len, buf); +} + +/** + * Chain an mbuf to another, thereby creating a segmented packet. + * + * Note: The implementation will do a linear walk over the segments to find + * the tail entry. For cases when there are many segments, it's better to + * chain the entries manually. + * + * @param head + * The head of the mbuf chain (the first packet) + * @param tail + * The mbuf to put last in the chain + * + * @return + * - 0, on success. + * - -EOVERFLOW, if the chain segment limit exceeded + */ +static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail) +{ + struct rte_mbuf *cur_tail; + + /* Check for number-of-segments-overflow */ + if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS) + return -EOVERFLOW; + + /* Chain 'tail' onto the old tail */ + cur_tail = rte_pktmbuf_lastseg(head); + cur_tail->next = tail; + + /* accumulate number of segments and total length. + * NB: elaborating the addition like this instead of using + * -= allows us to ensure the result type is uint16_t + * avoiding compiler warnings on gcc 8.1 at least */ + head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs); + head->pkt_len += tail->pkt_len; + + /* pkt_len is only set in the head */ + tail->pkt_len = tail->data_len; + + return 0; +} + +/* + * @warning + * @b EXPERIMENTAL: This API may change without prior notice. + * + * For given input values generate raw tx_offload value. + * Note that it is caller responsibility to make sure that input parameters + * don't exceed maximum bit-field values. + * @param il2 + * l2_len value. + * @param il3 + * l3_len value. + * @param il4 + * l4_len value. + * @param tso + * tso_segsz value. + * @param ol3 + * outer_l3_len value. + * @param ol2 + * outer_l2_len value. + * @param unused + * unused value. + * @return + * raw tx_offload value. + */ +static __rte_always_inline uint64_t +rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso, + uint64_t ol3, uint64_t ol2, uint64_t unused) +{ + return il2 << RTE_MBUF_L2_LEN_OFS | + il3 << RTE_MBUF_L3_LEN_OFS | + il4 << RTE_MBUF_L4_LEN_OFS | + tso << RTE_MBUF_TSO_SEGSZ_OFS | + ol3 << RTE_MBUF_OUTL3_LEN_OFS | + ol2 << RTE_MBUF_OUTL2_LEN_OFS | + unused << RTE_MBUF_TXOFLD_UNUSED_OFS; +} + +/** + * Validate general requirements for Tx offload in mbuf. + * + * This function checks correctness and completeness of Tx offload settings. + * + * @param m + * The packet mbuf to be validated. + * @return + * 0 if packet is valid + */ +static inline int +rte_validate_tx_offload(const struct rte_mbuf *m) +{ + uint64_t ol_flags = m->ol_flags; + + /* Does packet set any of available offloads? */ + if (!(ol_flags & PKT_TX_OFFLOAD_MASK)) + return 0; + + /* IP checksum can be counted only for IPv4 packet */ + if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6)) + return -EINVAL; + + /* IP type not set when required */ + if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) + if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6))) + return -EINVAL; + + /* Check requirements for TSO packet */ + if (ol_flags & PKT_TX_TCP_SEG) + if ((m->tso_segsz == 0) || + ((ol_flags & PKT_TX_IPV4) && + !(ol_flags & PKT_TX_IP_CKSUM))) + return -EINVAL; + + /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */ + if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) && + !(ol_flags & PKT_TX_OUTER_IPV4)) + return -EINVAL; + + return 0; +} + +/** + * @internal used by rte_pktmbuf_linearize(). + */ +int __rte_pktmbuf_linearize(struct rte_mbuf *mbuf); + +/** + * Linearize data in mbuf. + * + * This function moves the mbuf data in the first segment if there is enough + * tailroom. The subsequent segments are unchained and freed. + * + * @param mbuf + * mbuf to linearize + * @return + * - 0, on success + * - -1, on error + */ +static inline int +rte_pktmbuf_linearize(struct rte_mbuf *mbuf) +{ + if (rte_pktmbuf_is_contiguous(mbuf)) + return 0; + return __rte_pktmbuf_linearize(mbuf); +} + +/** + * Dump an mbuf structure to a file. + * + * Dump all fields for the given packet mbuf and all its associated + * segments (in the case of a chained buffer). + * + * @param f + * A pointer to a file for output + * @param m + * The packet mbuf. + * @param dump_len + * If dump_len != 0, also dump the "dump_len" first data bytes of + * the packet. + */ +void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len); + +/** + * Get the value of mbuf sched queue_id field. + */ +static inline uint32_t +rte_mbuf_sched_queue_get(const struct rte_mbuf *m) +{ + return m->hash.sched.queue_id; +} + +/** + * Get the value of mbuf sched traffic_class field. + */ +static inline uint8_t +rte_mbuf_sched_traffic_class_get(const struct rte_mbuf *m) +{ + return m->hash.sched.traffic_class; +} + +/** + * Get the value of mbuf sched color field. + */ +static inline uint8_t +rte_mbuf_sched_color_get(const struct rte_mbuf *m) +{ + return m->hash.sched.color; +} + +/** + * Get the values of mbuf sched queue_id, traffic_class and color. + * + * @param m + * Mbuf to read + * @param queue_id + * Returns the queue id + * @param traffic_class + * Returns the traffic class id + * @param color + * Returns the colour id + */ +static inline void +rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id, + uint8_t *traffic_class, + uint8_t *color) +{ + struct rte_mbuf_sched sched = m->hash.sched; + + *queue_id = sched.queue_id; + *traffic_class = sched.traffic_class; + *color = sched.color; +} + +/** + * Set the mbuf sched queue_id to the defined value. + */ +static inline void +rte_mbuf_sched_queue_set(struct rte_mbuf *m, uint32_t queue_id) +{ + m->hash.sched.queue_id = queue_id; +} + +/** + * Set the mbuf sched traffic_class id to the defined value. + */ +static inline void +rte_mbuf_sched_traffic_class_set(struct rte_mbuf *m, uint8_t traffic_class) +{ + m->hash.sched.traffic_class = traffic_class; +} + +/** + * Set the mbuf sched color id to the defined value. + */ +static inline void +rte_mbuf_sched_color_set(struct rte_mbuf *m, uint8_t color) +{ + m->hash.sched.color = color; +} + +/** + * Set the mbuf sched queue_id, traffic_class and color. + * + * @param m + * Mbuf to set + * @param queue_id + * Queue id value to be set + * @param traffic_class + * Traffic class id value to be set + * @param color + * Color id to be set + */ +static inline void +rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id, + uint8_t traffic_class, + uint8_t color) +{ + m->hash.sched = (struct rte_mbuf_sched){ + .queue_id = queue_id, + .traffic_class = traffic_class, + .color = color, + .reserved = 0, + }; +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_MBUF_H_ */ diff --git a/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_core.h b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_core.h new file mode 100644 index 000000000..b9a59c879 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_core.h @@ -0,0 +1,768 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation. + * Copyright 2014 6WIND S.A. + */ + +#ifndef _RTE_MBUF_CORE_H_ +#define _RTE_MBUF_CORE_H_ + +/** + * @file + * This file contains definion of RTE mbuf structure itself, + * packet offload flags and some related macros. + * For majority of DPDK entities, it is not recommended to include + * this file directly, use include <rte_mbuf.h> instead. + */ + +#include <stdint.h> +#include <rte_compat.h> +#include <generic/rte_atomic.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Packet Offload Features Flags. It also carry packet type information. + * Critical resources. Both rx/tx shared these bits. Be cautious on any change + * + * - RX flags start at bit position zero, and get added to the left of previous + * flags. + * - The most-significant 3 bits are reserved for generic mbuf flags + * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get + * added to the right of the previously defined flags i.e. they should count + * downwards, not upwards. + * + * Keep these flags synchronized with rte_get_rx_ol_flag_name() and + * rte_get_tx_ol_flag_name(). + */ + +/** + * The RX packet is a 802.1q VLAN packet, and the tci has been + * saved in in mbuf->vlan_tci. + * If the flag PKT_RX_VLAN_STRIPPED is also present, the VLAN + * header has been stripped from mbuf data, else it is still + * present. + */ +#define PKT_RX_VLAN (1ULL << 0) + +/** RX packet with RSS hash result. */ +#define PKT_RX_RSS_HASH (1ULL << 1) + + /** RX packet with FDIR match indicate. */ +#define PKT_RX_FDIR (1ULL << 2) + +/** + * Deprecated. + * Checking this flag alone is deprecated: check the 2 bits of + * PKT_RX_L4_CKSUM_MASK. + * This flag was set when the L4 checksum of a packet was detected as + * wrong by the hardware. + */ +#define PKT_RX_L4_CKSUM_BAD (1ULL << 3) + +/** + * Deprecated. + * Checking this flag alone is deprecated: check the 2 bits of + * PKT_RX_IP_CKSUM_MASK. + * This flag was set when the IP checksum of a packet was detected as + * wrong by the hardware. + */ +#define PKT_RX_IP_CKSUM_BAD (1ULL << 4) + + /** External IP header checksum error. */ +#define PKT_RX_EIP_CKSUM_BAD (1ULL << 5) + +/** + * A vlan has been stripped by the hardware and its tci is saved in + * mbuf->vlan_tci. This can only happen if vlan stripping is enabled + * in the RX configuration of the PMD. + * When PKT_RX_VLAN_STRIPPED is set, PKT_RX_VLAN must also be set. + */ +#define PKT_RX_VLAN_STRIPPED (1ULL << 6) + +/** + * Mask of bits used to determine the status of RX IP checksum. + * - PKT_RX_IP_CKSUM_UNKNOWN: no information about the RX IP checksum + * - PKT_RX_IP_CKSUM_BAD: the IP checksum in the packet is wrong + * - PKT_RX_IP_CKSUM_GOOD: the IP checksum in the packet is valid + * - PKT_RX_IP_CKSUM_NONE: the IP checksum is not correct in the packet + * data, but the integrity of the IP header is verified. + */ +#define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7)) + +#define PKT_RX_IP_CKSUM_UNKNOWN 0 +#define PKT_RX_IP_CKSUM_BAD (1ULL << 4) +#define PKT_RX_IP_CKSUM_GOOD (1ULL << 7) +#define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7)) + +/** + * Mask of bits used to determine the status of RX L4 checksum. + * - PKT_RX_L4_CKSUM_UNKNOWN: no information about the RX L4 checksum + * - PKT_RX_L4_CKSUM_BAD: the L4 checksum in the packet is wrong + * - PKT_RX_L4_CKSUM_GOOD: the L4 checksum in the packet is valid + * - PKT_RX_L4_CKSUM_NONE: the L4 checksum is not correct in the packet + * data, but the integrity of the L4 data is verified. + */ +#define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8)) + +#define PKT_RX_L4_CKSUM_UNKNOWN 0 +#define PKT_RX_L4_CKSUM_BAD (1ULL << 3) +#define PKT_RX_L4_CKSUM_GOOD (1ULL << 8) +#define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8)) + +/** RX IEEE1588 L2 Ethernet PT Packet. */ +#define PKT_RX_IEEE1588_PTP (1ULL << 9) + +/** RX IEEE1588 L2/L4 timestamped packet.*/ +#define PKT_RX_IEEE1588_TMST (1ULL << 10) + +/** FD id reported if FDIR match. */ +#define PKT_RX_FDIR_ID (1ULL << 13) + +/** Flexible bytes reported if FDIR match. */ +#define PKT_RX_FDIR_FLX (1ULL << 14) + +/** + * The 2 vlans have been stripped by the hardware and their tci are + * saved in mbuf->vlan_tci (inner) and mbuf->vlan_tci_outer (outer). + * This can only happen if vlan stripping is enabled in the RX + * configuration of the PMD. + * When PKT_RX_QINQ_STRIPPED is set, the flags (PKT_RX_VLAN | + * PKT_RX_VLAN_STRIPPED | PKT_RX_QINQ) must also be set. + */ +#define PKT_RX_QINQ_STRIPPED (1ULL << 15) + +/** + * When packets are coalesced by a hardware or virtual driver, this flag + * can be set in the RX mbuf, meaning that the m->tso_segsz field is + * valid and is set to the segment size of original packets. + */ +#define PKT_RX_LRO (1ULL << 16) + +/** + * Indicate that the timestamp field in the mbuf is valid. + */ +#define PKT_RX_TIMESTAMP (1ULL << 17) + +/** + * Indicate that security offload processing was applied on the RX packet. + */ +#define PKT_RX_SEC_OFFLOAD (1ULL << 18) + +/** + * Indicate that security offload processing failed on the RX packet. + */ +#define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19) + +/** + * The RX packet is a double VLAN, and the outer tci has been + * saved in in mbuf->vlan_tci_outer. If PKT_RX_QINQ set, PKT_RX_VLAN + * also should be set and inner tci should be saved to mbuf->vlan_tci. + * If the flag PKT_RX_QINQ_STRIPPED is also present, both VLANs + * headers have been stripped from mbuf data, else they are still + * present. + */ +#define PKT_RX_QINQ (1ULL << 20) + +/** + * Mask of bits used to determine the status of outer RX L4 checksum. + * - PKT_RX_OUTER_L4_CKSUM_UNKNOWN: no info about the outer RX L4 checksum + * - PKT_RX_OUTER_L4_CKSUM_BAD: the outer L4 checksum in the packet is wrong + * - PKT_RX_OUTER_L4_CKSUM_GOOD: the outer L4 checksum in the packet is valid + * - PKT_RX_OUTER_L4_CKSUM_INVALID: invalid outer L4 checksum state. + * + * The detection of PKT_RX_OUTER_L4_CKSUM_GOOD shall be based on the given + * HW capability, At minimum, the PMD should support + * PKT_RX_OUTER_L4_CKSUM_UNKNOWN and PKT_RX_OUTER_L4_CKSUM_BAD states + * if the DEV_RX_OFFLOAD_OUTER_UDP_CKSUM offload is available. + */ +#define PKT_RX_OUTER_L4_CKSUM_MASK ((1ULL << 21) | (1ULL << 22)) + +#define PKT_RX_OUTER_L4_CKSUM_UNKNOWN 0 +#define PKT_RX_OUTER_L4_CKSUM_BAD (1ULL << 21) +#define PKT_RX_OUTER_L4_CKSUM_GOOD (1ULL << 22) +#define PKT_RX_OUTER_L4_CKSUM_INVALID ((1ULL << 21) | (1ULL << 22)) + +/* add new RX flags here, don't forget to update PKT_FIRST_FREE */ + +#define PKT_FIRST_FREE (1ULL << 23) +#define PKT_LAST_FREE (1ULL << 40) + +/* add new TX flags here, don't forget to update PKT_LAST_FREE */ + +/** + * Outer UDP checksum offload flag. This flag is used for enabling + * outer UDP checksum in PMD. To use outer UDP checksum, the user needs to + * 1) Enable the following in mbuf, + * a) Fill outer_l2_len and outer_l3_len in mbuf. + * b) Set the PKT_TX_OUTER_UDP_CKSUM flag. + * c) Set the PKT_TX_OUTER_IPV4 or PKT_TX_OUTER_IPV6 flag. + * 2) Configure DEV_TX_OFFLOAD_OUTER_UDP_CKSUM offload flag. + */ +#define PKT_TX_OUTER_UDP_CKSUM (1ULL << 41) + +/** + * UDP Fragmentation Offload flag. This flag is used for enabling UDP + * fragmentation in SW or in HW. When use UFO, mbuf->tso_segsz is used + * to store the MSS of UDP fragments. + */ +#define PKT_TX_UDP_SEG (1ULL << 42) + +/** + * Request security offload processing on the TX packet. + */ +#define PKT_TX_SEC_OFFLOAD (1ULL << 43) + +/** + * Offload the MACsec. This flag must be set by the application to enable + * this offload feature for a packet to be transmitted. + */ +#define PKT_TX_MACSEC (1ULL << 44) + +/** + * Bits 45:48 used for the tunnel type. + * The tunnel type must be specified for TSO or checksum on the inner part + * of tunnel packets. + * These flags can be used with PKT_TX_TCP_SEG for TSO, or PKT_TX_xxx_CKSUM. + * The mbuf fields for inner and outer header lengths are required: + * outer_l2_len, outer_l3_len, l2_len, l3_len, l4_len and tso_segsz for TSO. + */ +#define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45) +#define PKT_TX_TUNNEL_GRE (0x2ULL << 45) +#define PKT_TX_TUNNEL_IPIP (0x3ULL << 45) +#define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45) +/** TX packet with MPLS-in-UDP RFC 7510 header. */ +#define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45) +#define PKT_TX_TUNNEL_VXLAN_GPE (0x6ULL << 45) +#define PKT_TX_TUNNEL_GTP (0x7ULL << 45) +/** + * Generic IP encapsulated tunnel type, used for TSO and checksum offload. + * It can be used for tunnels which are not standards or listed above. + * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_GRE + * or PKT_TX_TUNNEL_IPIP if possible. + * The ethdev must be configured with DEV_TX_OFFLOAD_IP_TNL_TSO. + * Outer and inner checksums are done according to the existing flags like + * PKT_TX_xxx_CKSUM. + * Specific tunnel headers that contain payload length, sequence id + * or checksum are not expected to be updated. + */ +#define PKT_TX_TUNNEL_IP (0xDULL << 45) +/** + * Generic UDP encapsulated tunnel type, used for TSO and checksum offload. + * UDP tunnel type implies outer IP layer. + * It can be used for tunnels which are not standards or listed above. + * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_VXLAN + * if possible. + * The ethdev must be configured with DEV_TX_OFFLOAD_UDP_TNL_TSO. + * Outer and inner checksums are done according to the existing flags like + * PKT_TX_xxx_CKSUM. + * Specific tunnel headers that contain payload length, sequence id + * or checksum are not expected to be updated. + */ +#define PKT_TX_TUNNEL_UDP (0xEULL << 45) +/* add new TX TUNNEL type here */ +#define PKT_TX_TUNNEL_MASK (0xFULL << 45) + +/** + * Double VLAN insertion (QinQ) request to driver, driver may offload the + * insertion based on device capability. + * mbuf 'vlan_tci' & 'vlan_tci_outer' must be valid when this flag is set. + */ +#define PKT_TX_QINQ (1ULL << 49) +/* this old name is deprecated */ +#define PKT_TX_QINQ_PKT PKT_TX_QINQ + +/** + * TCP segmentation offload. To enable this offload feature for a + * packet to be transmitted on hardware supporting TSO: + * - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag implies + * PKT_TX_TCP_CKSUM) + * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6 + * - if it's IPv4, set the PKT_TX_IP_CKSUM flag + * - fill the mbuf offload information: l2_len, l3_len, l4_len, tso_segsz + */ +#define PKT_TX_TCP_SEG (1ULL << 50) + +/** TX IEEE1588 packet to timestamp. */ +#define PKT_TX_IEEE1588_TMST (1ULL << 51) + +/** + * Bits 52+53 used for L4 packet type with checksum enabled: 00: Reserved, + * 01: TCP checksum, 10: SCTP checksum, 11: UDP checksum. To use hardware + * L4 checksum offload, the user needs to: + * - fill l2_len and l3_len in mbuf + * - set the flags PKT_TX_TCP_CKSUM, PKT_TX_SCTP_CKSUM or PKT_TX_UDP_CKSUM + * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6 + */ +#define PKT_TX_L4_NO_CKSUM (0ULL << 52) /**< Disable L4 cksum of TX pkt. */ + +/** TCP cksum of TX pkt. computed by NIC. */ +#define PKT_TX_TCP_CKSUM (1ULL << 52) + +/** SCTP cksum of TX pkt. computed by NIC. */ +#define PKT_TX_SCTP_CKSUM (2ULL << 52) + +/** UDP cksum of TX pkt. computed by NIC. */ +#define PKT_TX_UDP_CKSUM (3ULL << 52) + +/** Mask for L4 cksum offload request. */ +#define PKT_TX_L4_MASK (3ULL << 52) + +/** + * Offload the IP checksum in the hardware. The flag PKT_TX_IPV4 should + * also be set by the application, although a PMD will only check + * PKT_TX_IP_CKSUM. + * - fill the mbuf offload information: l2_len, l3_len + */ +#define PKT_TX_IP_CKSUM (1ULL << 54) + +/** + * Packet is IPv4. This flag must be set when using any offload feature + * (TSO, L3 or L4 checksum) to tell the NIC that the packet is an IPv4 + * packet. If the packet is a tunneled packet, this flag is related to + * the inner headers. + */ +#define PKT_TX_IPV4 (1ULL << 55) + +/** + * Packet is IPv6. This flag must be set when using an offload feature + * (TSO or L4 checksum) to tell the NIC that the packet is an IPv6 + * packet. If the packet is a tunneled packet, this flag is related to + * the inner headers. + */ +#define PKT_TX_IPV6 (1ULL << 56) + +/** + * VLAN tag insertion request to driver, driver may offload the insertion + * based on the device capability. + * mbuf 'vlan_tci' field must be valid when this flag is set. + */ +#define PKT_TX_VLAN (1ULL << 57) +/* this old name is deprecated */ +#define PKT_TX_VLAN_PKT PKT_TX_VLAN + +/** + * Offload the IP checksum of an external header in the hardware. The + * flag PKT_TX_OUTER_IPV4 should also be set by the application, although + * a PMD will only check PKT_TX_OUTER_IP_CKSUM. + * - fill the mbuf offload information: outer_l2_len, outer_l3_len + */ +#define PKT_TX_OUTER_IP_CKSUM (1ULL << 58) + +/** + * Packet outer header is IPv4. This flag must be set when using any + * outer offload feature (L3 or L4 checksum) to tell the NIC that the + * outer header of the tunneled packet is an IPv4 packet. + */ +#define PKT_TX_OUTER_IPV4 (1ULL << 59) + +/** + * Packet outer header is IPv6. This flag must be set when using any + * outer offload feature (L4 checksum) to tell the NIC that the outer + * header of the tunneled packet is an IPv6 packet. + */ +#define PKT_TX_OUTER_IPV6 (1ULL << 60) + +/** + * Bitmask of all supported packet Tx offload features flags, + * which can be set for packet. + */ +#define PKT_TX_OFFLOAD_MASK ( \ + PKT_TX_OUTER_IPV6 | \ + PKT_TX_OUTER_IPV4 | \ + PKT_TX_OUTER_IP_CKSUM | \ + PKT_TX_VLAN_PKT | \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_IEEE1588_TMST | \ + PKT_TX_TCP_SEG | \ + PKT_TX_QINQ_PKT | \ + PKT_TX_TUNNEL_MASK | \ + PKT_TX_MACSEC | \ + PKT_TX_SEC_OFFLOAD | \ + PKT_TX_UDP_SEG | \ + PKT_TX_OUTER_UDP_CKSUM) + +/** + * Mbuf having an external buffer attached. shinfo in mbuf must be filled. + */ +#define EXT_ATTACHED_MBUF (1ULL << 61) + +#define IND_ATTACHED_MBUF (1ULL << 62) /**< Indirect attached mbuf */ + +/** Alignment constraint of mbuf private area. */ +#define RTE_MBUF_PRIV_ALIGN 8 + +/** + * Some NICs need at least 2KB buffer to RX standard Ethernet frame without + * splitting it into multiple segments. + * So, for mbufs that planned to be involved into RX/TX, the recommended + * minimal buffer length is 2KB + RTE_PKTMBUF_HEADROOM. + */ +#define RTE_MBUF_DEFAULT_DATAROOM 2048 +#define RTE_MBUF_DEFAULT_BUF_SIZE \ + (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM) + +struct rte_mbuf_sched { + uint32_t queue_id; /**< Queue ID. */ + uint8_t traffic_class; + /**< Traffic class ID. Traffic class 0 + * is the highest priority traffic class. + */ + uint8_t color; + /**< Color. @see enum rte_color.*/ + uint16_t reserved; /**< Reserved. */ +}; /**< Hierarchical scheduler */ + +/** + * enum for the tx_offload bit-fields lengths and offsets. + * defines the layout of rte_mbuf tx_offload field. + */ +enum { + RTE_MBUF_L2_LEN_BITS = 7, + RTE_MBUF_L3_LEN_BITS = 9, + RTE_MBUF_L4_LEN_BITS = 8, + RTE_MBUF_TSO_SEGSZ_BITS = 16, + RTE_MBUF_OUTL3_LEN_BITS = 9, + RTE_MBUF_OUTL2_LEN_BITS = 7, + RTE_MBUF_TXOFLD_UNUSED_BITS = sizeof(uint64_t) * CHAR_BIT - + RTE_MBUF_L2_LEN_BITS - + RTE_MBUF_L3_LEN_BITS - + RTE_MBUF_L4_LEN_BITS - + RTE_MBUF_TSO_SEGSZ_BITS - + RTE_MBUF_OUTL3_LEN_BITS - + RTE_MBUF_OUTL2_LEN_BITS, +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + RTE_MBUF_L2_LEN_OFS = + sizeof(uint64_t) * CHAR_BIT - RTE_MBUF_L2_LEN_BITS, + RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS - RTE_MBUF_L3_LEN_BITS, + RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS - RTE_MBUF_L4_LEN_BITS, + RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS - RTE_MBUF_TSO_SEGSZ_BITS, + RTE_MBUF_OUTL3_LEN_OFS = + RTE_MBUF_TSO_SEGSZ_OFS - RTE_MBUF_OUTL3_LEN_BITS, + RTE_MBUF_OUTL2_LEN_OFS = + RTE_MBUF_OUTL3_LEN_OFS - RTE_MBUF_OUTL2_LEN_BITS, + RTE_MBUF_TXOFLD_UNUSED_OFS = + RTE_MBUF_OUTL2_LEN_OFS - RTE_MBUF_TXOFLD_UNUSED_BITS, +#else + RTE_MBUF_L2_LEN_OFS = 0, + RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS + RTE_MBUF_L2_LEN_BITS, + RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS + RTE_MBUF_L3_LEN_BITS, + RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS + RTE_MBUF_L4_LEN_BITS, + RTE_MBUF_OUTL3_LEN_OFS = + RTE_MBUF_TSO_SEGSZ_OFS + RTE_MBUF_TSO_SEGSZ_BITS, + RTE_MBUF_OUTL2_LEN_OFS = + RTE_MBUF_OUTL3_LEN_OFS + RTE_MBUF_OUTL3_LEN_BITS, + RTE_MBUF_TXOFLD_UNUSED_OFS = + RTE_MBUF_OUTL2_LEN_OFS + RTE_MBUF_OUTL2_LEN_BITS, +#endif +}; + +/** + * The generic rte_mbuf, containing a packet mbuf. + */ +struct rte_mbuf { + RTE_MARKER cacheline0; + + void *buf_addr; /**< Virtual address of segment buffer. */ + /** + * Physical address of segment buffer. + * Force alignment to 8-bytes, so as to ensure we have the exact + * same mbuf cacheline0 layout for 32-bit and 64-bit. This makes + * working on vector drivers easier. + */ + RTE_STD_C11 + union { + rte_iova_t buf_iova; + rte_iova_t buf_physaddr; /**< deprecated */ + } __rte_aligned(sizeof(rte_iova_t)); + + /* next 8 bytes are initialised on RX descriptor rearm */ + RTE_MARKER64 rearm_data; + uint16_t data_off; + + /** + * Reference counter. Its size should at least equal to the size + * of port field (16 bits), to support zero-copy broadcast. + * It should only be accessed using the following functions: + * rte_mbuf_refcnt_update(), rte_mbuf_refcnt_read(), and + * rte_mbuf_refcnt_set(). The functionality of these functions (atomic, + * or non-atomic) is controlled by the CONFIG_RTE_MBUF_REFCNT_ATOMIC + * config option. + */ + RTE_STD_C11 + union { + rte_atomic16_t refcnt_atomic; /**< Atomically accessed refcnt */ + /** Non-atomically accessed refcnt */ + uint16_t refcnt; + }; + uint16_t nb_segs; /**< Number of segments. */ + + /** Input port (16 bits to support more than 256 virtual ports). + * The event eth Tx adapter uses this field to specify the output port. + */ + uint16_t port; + + uint64_t ol_flags; /**< Offload features. */ + + /* remaining bytes are set on RX when pulling packet from descriptor */ + RTE_MARKER rx_descriptor_fields1; + + /* + * The packet type, which is the combination of outer/inner L2, L3, L4 + * and tunnel types. The packet_type is about data really present in the + * mbuf. Example: if vlan stripping is enabled, a received vlan packet + * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the + * vlan is stripped from the data. + */ + RTE_STD_C11 + union { + uint32_t packet_type; /**< L2/L3/L4 and tunnel information. */ + struct { + uint32_t l2_type:4; /**< (Outer) L2 type. */ + uint32_t l3_type:4; /**< (Outer) L3 type. */ + uint32_t l4_type:4; /**< (Outer) L4 type. */ + uint32_t tun_type:4; /**< Tunnel type. */ + RTE_STD_C11 + union { + uint8_t inner_esp_next_proto; + /**< ESP next protocol type, valid if + * RTE_PTYPE_TUNNEL_ESP tunnel type is set + * on both Tx and Rx. + */ + __extension__ + struct { + uint8_t inner_l2_type:4; + /**< Inner L2 type. */ + uint8_t inner_l3_type:4; + /**< Inner L3 type. */ + }; + }; + uint32_t inner_l4_type:4; /**< Inner L4 type. */ + }; + }; + + uint32_t pkt_len; /**< Total pkt len: sum of all segments. */ + uint16_t data_len; /**< Amount of data in segment buffer. */ + /** VLAN TCI (CPU order), valid if PKT_RX_VLAN is set. */ + uint16_t vlan_tci; + + RTE_STD_C11 + union { + union { + uint32_t rss; /**< RSS hash result if RSS enabled */ + struct { + union { + struct { + uint16_t hash; + uint16_t id; + }; + uint32_t lo; + /**< Second 4 flexible bytes */ + }; + uint32_t hi; + /**< First 4 flexible bytes or FD ID, dependent + * on PKT_RX_FDIR_* flag in ol_flags. + */ + } fdir; /**< Filter identifier if FDIR enabled */ + struct rte_mbuf_sched sched; + /**< Hierarchical scheduler : 8 bytes */ + struct { + uint32_t reserved1; + uint16_t reserved2; + uint16_t txq; + /**< The event eth Tx adapter uses this field + * to store Tx queue id. + * @see rte_event_eth_tx_adapter_txq_set() + */ + } txadapter; /**< Eventdev ethdev Tx adapter */ + /**< User defined tags. See rte_distributor_process() */ + uint32_t usr; + } hash; /**< hash information */ + }; + + /** Outer VLAN TCI (CPU order), valid if PKT_RX_QINQ is set. */ + uint16_t vlan_tci_outer; + + uint16_t buf_len; /**< Length of segment buffer. */ + + /** Valid if PKT_RX_TIMESTAMP is set. The unit and time reference + * are not normalized but are always the same for a given port. + * Some devices allow to query rte_eth_read_clock that will return the + * current device timestamp. + */ + uint64_t timestamp; + + /* second cache line - fields only used in slow path or on TX */ + RTE_MARKER cacheline1 __rte_cache_min_aligned; + + RTE_STD_C11 + union { + void *userdata; /**< Can be used for external metadata */ + uint64_t udata64; /**< Allow 8-byte userdata on 32-bit */ + }; + + struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */ + struct rte_mbuf *next; /**< Next segment of scattered packet. */ + + /* fields to support TX offloads */ + RTE_STD_C11 + union { + uint64_t tx_offload; /**< combined for easy fetch */ + __extension__ + struct { + uint64_t l2_len:RTE_MBUF_L2_LEN_BITS; + /**< L2 (MAC) Header Length for non-tunneling pkt. + * Outer_L4_len + ... + Inner_L2_len for tunneling pkt. + */ + uint64_t l3_len:RTE_MBUF_L3_LEN_BITS; + /**< L3 (IP) Header Length. */ + uint64_t l4_len:RTE_MBUF_L4_LEN_BITS; + /**< L4 (TCP/UDP) Header Length. */ + uint64_t tso_segsz:RTE_MBUF_TSO_SEGSZ_BITS; + /**< TCP TSO segment size */ + + /* + * Fields for Tx offloading of tunnels. + * These are undefined for packets which don't request + * any tunnel offloads (outer IP or UDP checksum, + * tunnel TSO). + * + * PMDs should not use these fields unconditionally + * when calculating offsets. + * + * Applications are expected to set appropriate tunnel + * offload flags when they fill in these fields. + */ + uint64_t outer_l3_len:RTE_MBUF_OUTL3_LEN_BITS; + /**< Outer L3 (IP) Hdr Length. */ + uint64_t outer_l2_len:RTE_MBUF_OUTL2_LEN_BITS; + /**< Outer L2 (MAC) Hdr Length. */ + + /* uint64_t unused:RTE_MBUF_TXOFLD_UNUSED_BITS; */ + }; + }; + + /** Size of the application private data. In case of an indirect + * mbuf, it stores the direct mbuf private data size. + */ + uint16_t priv_size; + + /** Timesync flags for use with IEEE1588. */ + uint16_t timesync; + + /** Sequence number. See also rte_reorder_insert(). */ + uint32_t seqn; + + /** Shared data for external buffer attached to mbuf. See + * rte_pktmbuf_attach_extbuf(). + */ + struct rte_mbuf_ext_shared_info *shinfo; + + uint64_t dynfield1[2]; /**< Reserved for dynamic fields. */ +} __rte_cache_aligned; + +/** + * Function typedef of callback to free externally attached buffer. + */ +typedef void (*rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque); + +/** + * Shared data at the end of an external buffer. + */ +struct rte_mbuf_ext_shared_info { + rte_mbuf_extbuf_free_callback_t free_cb; /**< Free callback function */ + void *fcb_opaque; /**< Free callback argument */ + rte_atomic16_t refcnt_atomic; /**< Atomically accessed refcnt */ +}; + +/**< Maximum number of nb_segs allowed. */ +#define RTE_MBUF_MAX_NB_SEGS UINT16_MAX + +/** + * Returns TRUE if given mbuf is cloned by mbuf indirection, or FALSE + * otherwise. + * + * If a mbuf has its data in another mbuf and references it by mbuf + * indirection, this mbuf can be defined as a cloned mbuf. + */ +#define RTE_MBUF_CLONED(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF) + +/** + * Returns TRUE if given mbuf has an external buffer, or FALSE otherwise. + * + * External buffer is a user-provided anonymous buffer. + */ +#define RTE_MBUF_HAS_EXTBUF(mb) ((mb)->ol_flags & EXT_ATTACHED_MBUF) + +/** + * Returns TRUE if given mbuf is direct, or FALSE otherwise. + * + * If a mbuf embeds its own data after the rte_mbuf structure, this mbuf + * can be defined as a direct mbuf. + */ +#define RTE_MBUF_DIRECT(mb) \ + (!((mb)->ol_flags & (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF))) + +#define MBUF_INVALID_PORT UINT16_MAX + +/** + * A macro that points to an offset into the data in the mbuf. + * + * The returned pointer is cast to type t. Before using this + * function, the user must ensure that the first segment is large + * enough to accommodate its data. + * + * @param m + * The packet mbuf. + * @param o + * The offset into the mbuf data. + * @param t + * The type to cast the result into. + */ +#define rte_pktmbuf_mtod_offset(m, t, o) \ + ((t)((char *)(m)->buf_addr + (m)->data_off + (o))) + +/** + * A macro that points to the start of the data in the mbuf. + * + * The returned pointer is cast to type t. Before using this + * function, the user must ensure that the first segment is large + * enough to accommodate its data. + * + * @param m + * The packet mbuf. + * @param t + * The type to cast the result into. + */ +#define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0) + +/** + * A macro that returns the IO address that points to an offset of the + * start of the data in the mbuf + * + * @param m + * The packet mbuf. + * @param o + * The offset into the data to calculate address from. + */ +#define rte_pktmbuf_iova_offset(m, o) \ + (rte_iova_t)((m)->buf_iova + (m)->data_off + (o)) + +/** + * A macro that returns the IO address that points to the start of the + * data in the mbuf + * + * @param m + * The packet mbuf. + */ +#define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0) + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_MBUF_CORE_H_ */ diff --git a/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_dyn.c b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_dyn.c new file mode 100644 index 000000000..d6931f847 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_dyn.c @@ -0,0 +1,553 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2019 6WIND S.A. + */ + +#include <sys/queue.h> +#include <stdint.h> +#include <limits.h> + +#include <rte_common.h> +#include <rte_eal.h> +#include <rte_eal_memconfig.h> +#include <rte_tailq.h> +#include <rte_errno.h> +#include <rte_malloc.h> +#include <rte_string_fns.h> +#include <rte_mbuf.h> +#include <rte_mbuf_dyn.h> + +#define RTE_MBUF_DYN_MZNAME "rte_mbuf_dyn" + +struct mbuf_dynfield_elt { + TAILQ_ENTRY(mbuf_dynfield_elt) next; + struct rte_mbuf_dynfield params; + size_t offset; +}; +TAILQ_HEAD(mbuf_dynfield_list, rte_tailq_entry); + +static struct rte_tailq_elem mbuf_dynfield_tailq = { + .name = "RTE_MBUF_DYNFIELD", +}; +EAL_REGISTER_TAILQ(mbuf_dynfield_tailq); + +struct mbuf_dynflag_elt { + TAILQ_ENTRY(mbuf_dynflag_elt) next; + struct rte_mbuf_dynflag params; + unsigned int bitnum; +}; +TAILQ_HEAD(mbuf_dynflag_list, rte_tailq_entry); + +static struct rte_tailq_elem mbuf_dynflag_tailq = { + .name = "RTE_MBUF_DYNFLAG", +}; +EAL_REGISTER_TAILQ(mbuf_dynflag_tailq); + +struct mbuf_dyn_shm { + /** + * For each mbuf byte, free_space[i] != 0 if space is free. + * The value is the size of the biggest aligned element that + * can fit in the zone. + */ + uint8_t free_space[sizeof(struct rte_mbuf)]; + /** Bitfield of available flags. */ + uint64_t free_flags; +}; +static struct mbuf_dyn_shm *shm; + +/* Set the value of free_space[] according to the size and alignment of + * the free areas. This helps to select the best place when reserving a + * dynamic field. Assume tailq is locked. + */ +static void +process_score(void) +{ + size_t off, align, size, i; + + /* first, erase previous info */ + for (i = 0; i < sizeof(struct rte_mbuf); i++) { + if (shm->free_space[i]) + shm->free_space[i] = 1; + } + + for (off = 0; off < sizeof(struct rte_mbuf); off++) { + /* get the size of the free zone */ + for (size = 0; shm->free_space[off + size]; size++) + ; + if (size == 0) + continue; + + /* get the alignment of biggest object that can fit in + * the zone at this offset. + */ + for (align = 1; + (off % (align << 1)) == 0 && (align << 1) <= size; + align <<= 1) + ; + + /* save it in free_space[] */ + for (i = off; i < off + size; i++) + shm->free_space[i] = RTE_MAX(align, shm->free_space[i]); + } +} + +/* Mark the area occupied by a mbuf field as available in the shm. */ +#define mark_free(field) \ + memset(&shm->free_space[offsetof(struct rte_mbuf, field)], \ + 1, sizeof(((struct rte_mbuf *)0)->field)) + +/* Allocate and initialize the shared memory. Assume tailq is locked */ +static int +init_shared_mem(void) +{ + const struct rte_memzone *mz; + uint64_t mask; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + mz = rte_memzone_reserve_aligned(RTE_MBUF_DYN_MZNAME, + sizeof(struct mbuf_dyn_shm), + SOCKET_ID_ANY, 0, + RTE_CACHE_LINE_SIZE); + } else { + mz = rte_memzone_lookup(RTE_MBUF_DYN_MZNAME); + } + if (mz == NULL) + return -1; + + shm = mz->addr; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + /* init free_space, keep it sync'd with + * rte_mbuf_dynfield_copy(). + */ + memset(shm, 0, sizeof(*shm)); + mark_free(dynfield1); + + /* init free_flags */ + for (mask = PKT_FIRST_FREE; mask <= PKT_LAST_FREE; mask <<= 1) + shm->free_flags |= mask; + + process_score(); + } + + return 0; +} + +/* check if this offset can be used */ +static int +check_offset(size_t offset, size_t size, size_t align) +{ + size_t i; + + if ((offset & (align - 1)) != 0) + return -1; + if (offset + size > sizeof(struct rte_mbuf)) + return -1; + + for (i = 0; i < size; i++) { + if (!shm->free_space[i + offset]) + return -1; + } + + return 0; +} + +/* assume tailq is locked */ +static struct mbuf_dynfield_elt * +__mbuf_dynfield_lookup(const char *name) +{ + struct mbuf_dynfield_list *mbuf_dynfield_list; + struct mbuf_dynfield_elt *mbuf_dynfield; + struct rte_tailq_entry *te; + + mbuf_dynfield_list = RTE_TAILQ_CAST( + mbuf_dynfield_tailq.head, mbuf_dynfield_list); + + TAILQ_FOREACH(te, mbuf_dynfield_list, next) { + mbuf_dynfield = (struct mbuf_dynfield_elt *)te->data; + if (strcmp(name, mbuf_dynfield->params.name) == 0) + break; + } + + if (te == NULL) { + rte_errno = ENOENT; + return NULL; + } + + return mbuf_dynfield; +} + +int +rte_mbuf_dynfield_lookup(const char *name, struct rte_mbuf_dynfield *params) +{ + struct mbuf_dynfield_elt *mbuf_dynfield; + + if (shm == NULL) { + rte_errno = ENOENT; + return -1; + } + + rte_mcfg_tailq_read_lock(); + mbuf_dynfield = __mbuf_dynfield_lookup(name); + rte_mcfg_tailq_read_unlock(); + + if (mbuf_dynfield == NULL) { + rte_errno = ENOENT; + return -1; + } + + if (params != NULL) + memcpy(params, &mbuf_dynfield->params, sizeof(*params)); + + return mbuf_dynfield->offset; +} + +static int mbuf_dynfield_cmp(const struct rte_mbuf_dynfield *params1, + const struct rte_mbuf_dynfield *params2) +{ + if (strcmp(params1->name, params2->name)) + return -1; + if (params1->size != params2->size) + return -1; + if (params1->align != params2->align) + return -1; + if (params1->flags != params2->flags) + return -1; + return 0; +} + +/* assume tailq is locked */ +static int +__rte_mbuf_dynfield_register_offset(const struct rte_mbuf_dynfield *params, + size_t req) +{ + struct mbuf_dynfield_list *mbuf_dynfield_list; + struct mbuf_dynfield_elt *mbuf_dynfield = NULL; + struct rte_tailq_entry *te = NULL; + unsigned int best_zone = UINT_MAX; + size_t i, offset; + int ret; + + if (shm == NULL && init_shared_mem() < 0) + return -1; + + mbuf_dynfield = __mbuf_dynfield_lookup(params->name); + if (mbuf_dynfield != NULL) { + if (req != SIZE_MAX && req != mbuf_dynfield->offset) { + rte_errno = EEXIST; + return -1; + } + if (mbuf_dynfield_cmp(params, &mbuf_dynfield->params) < 0) { + rte_errno = EEXIST; + return -1; + } + return mbuf_dynfield->offset; + } + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + rte_errno = EPERM; + return -1; + } + + if (req == SIZE_MAX) { + /* Find the best place to put this field: we search the + * lowest value of shm->free_space[offset]: the zones + * containing room for larger fields are kept for later. + */ + for (offset = 0; + offset < sizeof(struct rte_mbuf); + offset++) { + if (check_offset(offset, params->size, + params->align) == 0 && + shm->free_space[offset] < best_zone) { + best_zone = shm->free_space[offset]; + req = offset; + } + } + if (req == SIZE_MAX) { + rte_errno = ENOENT; + return -1; + } + } else { + if (check_offset(req, params->size, params->align) < 0) { + rte_errno = EBUSY; + return -1; + } + } + + offset = req; + mbuf_dynfield_list = RTE_TAILQ_CAST( + mbuf_dynfield_tailq.head, mbuf_dynfield_list); + + te = rte_zmalloc("MBUF_DYNFIELD_TAILQ_ENTRY", sizeof(*te), 0); + if (te == NULL) + return -1; + + mbuf_dynfield = rte_zmalloc("mbuf_dynfield", sizeof(*mbuf_dynfield), 0); + if (mbuf_dynfield == NULL) { + rte_free(te); + return -1; + } + + ret = strlcpy(mbuf_dynfield->params.name, params->name, + sizeof(mbuf_dynfield->params.name)); + if (ret < 0 || ret >= (int)sizeof(mbuf_dynfield->params.name)) { + rte_errno = ENAMETOOLONG; + rte_free(mbuf_dynfield); + rte_free(te); + return -1; + } + memcpy(&mbuf_dynfield->params, params, sizeof(mbuf_dynfield->params)); + mbuf_dynfield->offset = offset; + te->data = mbuf_dynfield; + + TAILQ_INSERT_TAIL(mbuf_dynfield_list, te, next); + + for (i = offset; i < offset + params->size; i++) + shm->free_space[i] = 0; + process_score(); + + RTE_LOG(DEBUG, MBUF, "Registered dynamic field %s (sz=%zu, al=%zu, fl=0x%x) -> %zd\n", + params->name, params->size, params->align, params->flags, + offset); + + return offset; +} + +int +rte_mbuf_dynfield_register_offset(const struct rte_mbuf_dynfield *params, + size_t req) +{ + int ret; + + if (params->size >= sizeof(struct rte_mbuf)) { + rte_errno = EINVAL; + return -1; + } + if (!rte_is_power_of_2(params->align)) { + rte_errno = EINVAL; + return -1; + } + if (params->flags != 0) { + rte_errno = EINVAL; + return -1; + } + + rte_mcfg_tailq_write_lock(); + ret = __rte_mbuf_dynfield_register_offset(params, req); + rte_mcfg_tailq_write_unlock(); + + return ret; +} + +int +rte_mbuf_dynfield_register(const struct rte_mbuf_dynfield *params) +{ + return rte_mbuf_dynfield_register_offset(params, SIZE_MAX); +} + +/* assume tailq is locked */ +static struct mbuf_dynflag_elt * +__mbuf_dynflag_lookup(const char *name) +{ + struct mbuf_dynflag_list *mbuf_dynflag_list; + struct mbuf_dynflag_elt *mbuf_dynflag; + struct rte_tailq_entry *te; + + mbuf_dynflag_list = RTE_TAILQ_CAST( + mbuf_dynflag_tailq.head, mbuf_dynflag_list); + + TAILQ_FOREACH(te, mbuf_dynflag_list, next) { + mbuf_dynflag = (struct mbuf_dynflag_elt *)te->data; + if (strncmp(name, mbuf_dynflag->params.name, + RTE_MBUF_DYN_NAMESIZE) == 0) + break; + } + + if (te == NULL) { + rte_errno = ENOENT; + return NULL; + } + + return mbuf_dynflag; +} + +int +rte_mbuf_dynflag_lookup(const char *name, + struct rte_mbuf_dynflag *params) +{ + struct mbuf_dynflag_elt *mbuf_dynflag; + + if (shm == NULL) { + rte_errno = ENOENT; + return -1; + } + + rte_mcfg_tailq_read_lock(); + mbuf_dynflag = __mbuf_dynflag_lookup(name); + rte_mcfg_tailq_read_unlock(); + + if (mbuf_dynflag == NULL) { + rte_errno = ENOENT; + return -1; + } + + if (params != NULL) + memcpy(params, &mbuf_dynflag->params, sizeof(*params)); + + return mbuf_dynflag->bitnum; +} + +static int mbuf_dynflag_cmp(const struct rte_mbuf_dynflag *params1, + const struct rte_mbuf_dynflag *params2) +{ + if (strcmp(params1->name, params2->name)) + return -1; + if (params1->flags != params2->flags) + return -1; + return 0; +} + +/* assume tailq is locked */ +static int +__rte_mbuf_dynflag_register_bitnum(const struct rte_mbuf_dynflag *params, + unsigned int req) +{ + struct mbuf_dynflag_list *mbuf_dynflag_list; + struct mbuf_dynflag_elt *mbuf_dynflag = NULL; + struct rte_tailq_entry *te = NULL; + unsigned int bitnum; + int ret; + + if (shm == NULL && init_shared_mem() < 0) + return -1; + + mbuf_dynflag = __mbuf_dynflag_lookup(params->name); + if (mbuf_dynflag != NULL) { + if (req != UINT_MAX && req != mbuf_dynflag->bitnum) { + rte_errno = EEXIST; + return -1; + } + if (mbuf_dynflag_cmp(params, &mbuf_dynflag->params) < 0) { + rte_errno = EEXIST; + return -1; + } + return mbuf_dynflag->bitnum; + } + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + rte_errno = EPERM; + return -1; + } + + if (req == UINT_MAX) { + if (shm->free_flags == 0) { + rte_errno = ENOENT; + return -1; + } + bitnum = rte_bsf64(shm->free_flags); + } else { + if ((shm->free_flags & (1ULL << req)) == 0) { + rte_errno = EBUSY; + return -1; + } + bitnum = req; + } + + mbuf_dynflag_list = RTE_TAILQ_CAST( + mbuf_dynflag_tailq.head, mbuf_dynflag_list); + + te = rte_zmalloc("MBUF_DYNFLAG_TAILQ_ENTRY", sizeof(*te), 0); + if (te == NULL) + return -1; + + mbuf_dynflag = rte_zmalloc("mbuf_dynflag", sizeof(*mbuf_dynflag), 0); + if (mbuf_dynflag == NULL) { + rte_free(te); + return -1; + } + + ret = strlcpy(mbuf_dynflag->params.name, params->name, + sizeof(mbuf_dynflag->params.name)); + if (ret < 0 || ret >= (int)sizeof(mbuf_dynflag->params.name)) { + rte_free(mbuf_dynflag); + rte_free(te); + rte_errno = ENAMETOOLONG; + return -1; + } + mbuf_dynflag->bitnum = bitnum; + te->data = mbuf_dynflag; + + TAILQ_INSERT_TAIL(mbuf_dynflag_list, te, next); + + shm->free_flags &= ~(1ULL << bitnum); + + RTE_LOG(DEBUG, MBUF, "Registered dynamic flag %s (fl=0x%x) -> %u\n", + params->name, params->flags, bitnum); + + return bitnum; +} + +int +rte_mbuf_dynflag_register_bitnum(const struct rte_mbuf_dynflag *params, + unsigned int req) +{ + int ret; + + if (req >= RTE_SIZEOF_FIELD(struct rte_mbuf, ol_flags) * CHAR_BIT && + req != UINT_MAX) { + rte_errno = EINVAL; + return -1; + } + + rte_mcfg_tailq_write_lock(); + ret = __rte_mbuf_dynflag_register_bitnum(params, req); + rte_mcfg_tailq_write_unlock(); + + return ret; +} + +int +rte_mbuf_dynflag_register(const struct rte_mbuf_dynflag *params) +{ + return rte_mbuf_dynflag_register_bitnum(params, UINT_MAX); +} + +void rte_mbuf_dyn_dump(FILE *out) +{ + struct mbuf_dynfield_list *mbuf_dynfield_list; + struct mbuf_dynfield_elt *dynfield; + struct mbuf_dynflag_list *mbuf_dynflag_list; + struct mbuf_dynflag_elt *dynflag; + struct rte_tailq_entry *te; + size_t i; + + rte_mcfg_tailq_write_lock(); + init_shared_mem(); + fprintf(out, "Reserved fields:\n"); + mbuf_dynfield_list = RTE_TAILQ_CAST( + mbuf_dynfield_tailq.head, mbuf_dynfield_list); + TAILQ_FOREACH(te, mbuf_dynfield_list, next) { + dynfield = (struct mbuf_dynfield_elt *)te->data; + fprintf(out, " name=%s offset=%zd size=%zd align=%zd flags=%x\n", + dynfield->params.name, dynfield->offset, + dynfield->params.size, dynfield->params.align, + dynfield->params.flags); + } + fprintf(out, "Reserved flags:\n"); + mbuf_dynflag_list = RTE_TAILQ_CAST( + mbuf_dynflag_tailq.head, mbuf_dynflag_list); + TAILQ_FOREACH(te, mbuf_dynflag_list, next) { + dynflag = (struct mbuf_dynflag_elt *)te->data; + fprintf(out, " name=%s bitnum=%u flags=%x\n", + dynflag->params.name, dynflag->bitnum, + dynflag->params.flags); + } + fprintf(out, "Free space in mbuf (0 = free, value = zone alignment):\n"); + for (i = 0; i < sizeof(struct rte_mbuf); i++) { + if ((i % 8) == 0) + fprintf(out, " %4.4zx: ", i); + fprintf(out, "%2.2x%s", shm->free_space[i], + (i % 8 != 7) ? " " : "\n"); + } + rte_mcfg_tailq_write_unlock(); +} diff --git a/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_dyn.h b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_dyn.h new file mode 100644 index 000000000..96c363137 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_dyn.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2019 6WIND S.A. + */ + +#ifndef _RTE_MBUF_DYN_H_ +#define _RTE_MBUF_DYN_H_ + +/** + * @file + * RTE Mbuf dynamic fields and flags + * + * Many DPDK features require to store data inside the mbuf. As the room + * in mbuf structure is limited, it is not possible to have a field for + * each feature. Also, changing fields in the mbuf structure can break + * the API or ABI. + * + * This module addresses this issue, by enabling the dynamic + * registration of fields or flags: + * + * - a dynamic field is a named area in the rte_mbuf structure, with a + * given size (>= 1 byte) and alignment constraint. + * - a dynamic flag is a named bit in the rte_mbuf structure, stored + * in mbuf->ol_flags. + * + * The placement of the field or flag can be automatic, in this case the + * zones that have the smallest size and alignment constraint are + * selected in priority. Else, a specific field offset or flag bit + * number can be requested through the API. + * + * The typical use case is when a specific offload feature requires to + * register a dedicated offload field in the mbuf structure, and adding + * a static field or flag is not justified. + * + * Example of use: + * + * - A rte_mbuf_dynfield structure is defined, containing the parameters + * of the dynamic field to be registered: + * const struct rte_mbuf_dynfield rte_dynfield_my_feature = { ... }; + * - The application initializes the PMD, and asks for this feature + * at port initialization by passing DEV_RX_OFFLOAD_MY_FEATURE in + * rxconf. This will make the PMD to register the field by calling + * rte_mbuf_dynfield_register(&rte_dynfield_my_feature). The PMD + * stores the returned offset. + * - The application that uses the offload feature also registers + * the field to retrieve the same offset. + * - When the PMD receives a packet, it can set the field: + * *RTE_MBUF_DYNFIELD(m, offset, <type *>) = value; + * - In the main loop, the application can retrieve the value with + * the same macro. + * + * To avoid wasting space, the dynamic fields or flags must only be + * reserved on demand, when an application asks for the related feature. + * + * The registration can be done at any moment, but it is not possible + * to unregister fields or flags for now. + * + * A dynamic field can be reserved and used by an application only. + * It can for instance be a packet mark. + * + * To avoid namespace collisions, the dynamic mbuf field or flag names + * have to be chosen with care. It is advised to use the same + * conventions than function names in dpdk: + * - "rte_mbuf_dynfield_<name>" if defined in mbuf library + * - "rte_<libname>_dynfield_<name>" if defined in another library + * - "rte_net_<pmd>_dynfield_<name>" if defined in a in PMD + * - any name that does not start with "rte_" in an application + */ + +#include <sys/types.h> +/** + * Maximum length of the dynamic field or flag string. + */ +#define RTE_MBUF_DYN_NAMESIZE 64 + +/** + * Structure describing the parameters of a mbuf dynamic field. + */ +struct rte_mbuf_dynfield { + char name[RTE_MBUF_DYN_NAMESIZE]; /**< Name of the field. */ + size_t size; /**< The number of bytes to reserve. */ + size_t align; /**< The alignment constraint (power of 2). */ + unsigned int flags; /**< Reserved for future use, must be 0. */ +}; + +/** + * Structure describing the parameters of a mbuf dynamic flag. + */ +struct rte_mbuf_dynflag { + char name[RTE_MBUF_DYN_NAMESIZE]; /**< Name of the dynamic flag. */ + unsigned int flags; /**< Reserved for future use, must be 0. */ +}; + +/** + * Register space for a dynamic field in the mbuf structure. + * + * If the field is already registered (same name and parameters), its + * offset is returned. + * + * @param params + * A structure containing the requested parameters (name, size, + * alignment constraint and flags). + * @return + * The offset in the mbuf structure, or -1 on error. + * Possible values for rte_errno: + * - EINVAL: invalid parameters (size, align, or flags). + * - EEXIST: this name is already register with different parameters. + * - EPERM: called from a secondary process. + * - ENOENT: not enough room in mbuf. + * - ENOMEM: allocation failure. + * - ENAMETOOLONG: name does not ends with \0. + */ +__rte_experimental +int rte_mbuf_dynfield_register(const struct rte_mbuf_dynfield *params); + +/** + * Register space for a dynamic field in the mbuf structure at offset. + * + * If the field is already registered (same name, parameters and offset), + * the offset is returned. + * + * @param params + * A structure containing the requested parameters (name, size, + * alignment constraint and flags). + * @param offset + * The requested offset. Ignored if SIZE_MAX is passed. + * @return + * The offset in the mbuf structure, or -1 on error. + * Possible values for rte_errno: + * - EINVAL: invalid parameters (size, align, flags, or offset). + * - EEXIST: this name is already register with different parameters. + * - EBUSY: the requested offset cannot be used. + * - EPERM: called from a secondary process. + * - ENOENT: not enough room in mbuf. + * - ENOMEM: allocation failure. + * - ENAMETOOLONG: name does not ends with \0. + */ +__rte_experimental +int rte_mbuf_dynfield_register_offset(const struct rte_mbuf_dynfield *params, + size_t offset); + +/** + * Lookup for a registered dynamic mbuf field. + * + * @param name + * A string identifying the dynamic field. + * @param params + * If not NULL, and if the lookup is successful, the structure is + * filled with the parameters of the dynamic field. + * @return + * The offset of this field in the mbuf structure, or -1 on error. + * Possible values for rte_errno: + * - ENOENT: no dynamic field matches this name. + */ +__rte_experimental +int rte_mbuf_dynfield_lookup(const char *name, + struct rte_mbuf_dynfield *params); + +/** + * Register a dynamic flag in the mbuf structure. + * + * If the flag is already registered (same name and parameters), its + * bitnum is returned. + * + * @param params + * A structure containing the requested parameters of the dynamic + * flag (name and options). + * @return + * The number of the reserved bit, or -1 on error. + * Possible values for rte_errno: + * - EINVAL: invalid parameters (size, align, or flags). + * - EEXIST: this name is already register with different parameters. + * - EPERM: called from a secondary process. + * - ENOENT: no more flag available. + * - ENOMEM: allocation failure. + * - ENAMETOOLONG: name is longer than RTE_MBUF_DYN_NAMESIZE - 1. + */ +__rte_experimental +int rte_mbuf_dynflag_register(const struct rte_mbuf_dynflag *params); + +/** + * Register a dynamic flag in the mbuf structure specifying bitnum. + * + * If the flag is already registered (same name, parameters and bitnum), + * the bitnum is returned. + * + * @param params + * A structure containing the requested parameters of the dynamic + * flag (name and options). + * @param bitnum + * The requested bitnum. Ignored if UINT_MAX is passed. + * @return + * The number of the reserved bit, or -1 on error. + * Possible values for rte_errno: + * - EINVAL: invalid parameters (size, align, or flags). + * - EEXIST: this name is already register with different parameters. + * - EBUSY: the requested bitnum cannot be used. + * - EPERM: called from a secondary process. + * - ENOENT: no more flag available. + * - ENOMEM: allocation failure. + * - ENAMETOOLONG: name is longer than RTE_MBUF_DYN_NAMESIZE - 1. + */ +__rte_experimental +int rte_mbuf_dynflag_register_bitnum(const struct rte_mbuf_dynflag *params, + unsigned int bitnum); + +/** + * Lookup for a registered dynamic mbuf flag. + * + * @param name + * A string identifying the dynamic flag. + * @param params + * If not NULL, and if the lookup is successful, the structure is + * filled with the parameters of the dynamic flag. + * @return + * The offset of this flag in the mbuf structure, or -1 on error. + * Possible values for rte_errno: + * - ENOENT: no dynamic flag matches this name. + */ +__rte_experimental +int rte_mbuf_dynflag_lookup(const char *name, + struct rte_mbuf_dynflag *params); + +/** + * Helper macro to access to a dynamic field. + */ +#define RTE_MBUF_DYNFIELD(m, offset, type) ((type)((uintptr_t)(m) + (offset))) + +/** + * Dump the status of dynamic fields and flags. + * + * @param out + * The stream where the status is displayed. + */ +__rte_experimental +void rte_mbuf_dyn_dump(FILE *out); + +/* + * Placeholder for dynamic fields and flags declarations. + * This is centralizing point to gather all field names + * and parameters together. + */ + +/* + * The metadata dynamic field provides some extra packet information + * to interact with RTE Flow engine. The metadata in sent mbufs can be + * used to match on some Flows. The metadata in received mbufs can + * provide some feedback from the Flows. The metadata flag tells + * whether the field contains actual value to send, or received one. + */ +#define RTE_MBUF_DYNFIELD_METADATA_NAME "rte_flow_dynfield_metadata" +#define RTE_MBUF_DYNFLAG_METADATA_NAME "rte_flow_dynflag_metadata" + +#endif diff --git a/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_pool_ops.c b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_pool_ops.c new file mode 100644 index 000000000..f0e87a141 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_pool_ops.c @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#include <string.h> +#include <rte_compat.h> +#include <rte_eal.h> +#include <rte_mbuf.h> +#include <rte_errno.h> +#include <rte_mbuf_pool_ops.h> + +int +rte_mbuf_set_platform_mempool_ops(const char *ops_name) +{ + const struct rte_memzone *mz; + + size_t len = strnlen(ops_name, RTE_MEMPOOL_OPS_NAMESIZE); + if (len == 0) + return -EINVAL; + if (len == RTE_MEMPOOL_OPS_NAMESIZE) + return -ENAMETOOLONG; + + mz = rte_memzone_lookup("mbuf_platform_pool_ops"); + if (mz == NULL) { + mz = rte_memzone_reserve("mbuf_platform_pool_ops", + RTE_MEMPOOL_OPS_NAMESIZE, SOCKET_ID_ANY, 0); + if (mz == NULL) + return -rte_errno; + strcpy(mz->addr, ops_name); + return 0; + } else if (strcmp(mz->addr, ops_name) == 0) { + return 0; + } + + RTE_LOG(ERR, MBUF, + "%s is already registered as platform mbuf pool ops\n", + (char *)mz->addr); + return -EEXIST; +} + +const char * +rte_mbuf_platform_mempool_ops(void) +{ + const struct rte_memzone *mz; + + mz = rte_memzone_lookup("mbuf_platform_pool_ops"); + if (mz == NULL) + return NULL; + return mz->addr; +} + +int +rte_mbuf_set_user_mempool_ops(const char *ops_name) +{ + const struct rte_memzone *mz; + + size_t len = strnlen(ops_name, RTE_MEMPOOL_OPS_NAMESIZE); + if (len == 0) + return -EINVAL; + if (len == RTE_MEMPOOL_OPS_NAMESIZE) + return -ENAMETOOLONG; + + mz = rte_memzone_lookup("mbuf_user_pool_ops"); + if (mz == NULL) { + mz = rte_memzone_reserve("mbuf_user_pool_ops", + RTE_MEMPOOL_OPS_NAMESIZE, SOCKET_ID_ANY, 0); + if (mz == NULL) + return -rte_errno; + } + + strcpy(mz->addr, ops_name); + return 0; + +} + +const char * +rte_mbuf_user_mempool_ops(void) +{ + const struct rte_memzone *mz; + + mz = rte_memzone_lookup("mbuf_user_pool_ops"); + if (mz == NULL) + return rte_eal_mbuf_user_pool_ops(); + return mz->addr; +} + +/* Return mbuf pool ops name */ +const char * +rte_mbuf_best_mempool_ops(void) +{ + /* User defined mempool ops takes the priority */ + const char *best_ops = rte_mbuf_user_mempool_ops(); + if (best_ops) + return best_ops; + + /* Next choice is platform configured mempool ops */ + best_ops = rte_mbuf_platform_mempool_ops(); + if (best_ops) + return best_ops; + + /* Last choice is to use the compile time config pool */ + return RTE_MBUF_DEFAULT_MEMPOOL_OPS; +} diff --git a/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_pool_ops.h b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_pool_ops.h new file mode 100644 index 000000000..7ed95a49a --- /dev/null +++ b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_pool_ops.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#ifndef _RTE_MBUF_POOL_OPS_H_ +#define _RTE_MBUF_POOL_OPS_H_ + +/** + * @file + * RTE Mbuf Pool Ops + * + * These APIs are for configuring the mbuf pool ops names to be largely used by + * rte_pktmbuf_pool_create(). However, this can also be used to set and inquire + * the best mempool ops available. + */ + +#include <rte_compat.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Set the platform supported pktmbuf HW mempool ops name + * + * This function allow the HW to register the actively supported HW mempool + * ops_name. Only one HW mempool ops can be registered at any point of time. + * + * @param ops_name + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int +rte_mbuf_set_platform_mempool_ops(const char *ops_name); + +/** + * Get configured platform supported pktmbuf HW mempool ops name + * + * This function returns the platform supported mempool ops name. + * + * @return + * - On success, platform pool ops name. + * - On failure, NULL. + */ +const char * +rte_mbuf_platform_mempool_ops(void); + +/** + * Set the user preferred pktmbuf mempool ops name + * + * This function can be used by the user to configure user preferred + * mempool ops name. + * + * @param ops_name + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int +rte_mbuf_set_user_mempool_ops(const char *ops_name); + +/** + * Get user preferred pool ops name for mbuf + * + * This function returns the user configured mempool ops name. + * + * @return + * - On success, user pool ops name.. + * - On failure, NULL. + */ +const char * +rte_mbuf_user_mempool_ops(void); + +/** + * Get the best mempool ops name for pktmbuf. + * + * This function is used to determine the best options for mempool ops for + * pktmbuf allocations. Following are the priority order: + * 1. User defined, 2. Platform HW supported, 3. Compile time configured. + * This function is also used by the rte_pktmbuf_pool_create to get the best + * mempool ops name. + * + * @return + * returns preferred mbuf pool ops name + */ +const char * +rte_mbuf_best_mempool_ops(void); + + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_MBUF_POOL_OPS_H_ */ diff --git a/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_ptype.c b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_ptype.c new file mode 100644 index 000000000..d6f906b06 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_ptype.c @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2016 6WIND S.A. + */ + +#include <stdint.h> + +#include <rte_mbuf.h> +#include <rte_mbuf_ptype.h> + +/* get the name of the l2 packet type */ +const char *rte_get_ptype_l2_name(uint32_t ptype) +{ + switch (ptype & RTE_PTYPE_L2_MASK) { + case RTE_PTYPE_L2_ETHER: return "L2_ETHER"; + case RTE_PTYPE_L2_ETHER_TIMESYNC: return "L2_ETHER_TIMESYNC"; + case RTE_PTYPE_L2_ETHER_ARP: return "L2_ETHER_ARP"; + case RTE_PTYPE_L2_ETHER_LLDP: return "L2_ETHER_LLDP"; + case RTE_PTYPE_L2_ETHER_NSH: return "L2_ETHER_NSH"; + case RTE_PTYPE_L2_ETHER_VLAN: return "L2_ETHER_VLAN"; + case RTE_PTYPE_L2_ETHER_QINQ: return "L2_ETHER_QINQ"; + case RTE_PTYPE_L2_ETHER_PPPOE: return "L2_ETHER_PPPOE"; + case RTE_PTYPE_L2_ETHER_FCOE: return "L2_ETHER_FCOE"; + case RTE_PTYPE_L2_ETHER_MPLS: return "L2_ETHER_MPLS"; + default: return "L2_UNKNOWN"; + } +} + +/* get the name of the l3 packet type */ +const char *rte_get_ptype_l3_name(uint32_t ptype) +{ + switch (ptype & RTE_PTYPE_L3_MASK) { + case RTE_PTYPE_L3_IPV4: return "L3_IPV4"; + case RTE_PTYPE_L3_IPV4_EXT: return "L3_IPV4_EXT"; + case RTE_PTYPE_L3_IPV6: return "L3_IPV6"; + case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN: return "L3_IPV4_EXT_UNKNOWN"; + case RTE_PTYPE_L3_IPV6_EXT: return "L3_IPV6_EXT"; + case RTE_PTYPE_L3_IPV6_EXT_UNKNOWN: return "L3_IPV6_EXT_UNKNOWN"; + default: return "L3_UNKNOWN"; + } +} + +/* get the name of the l4 packet type */ +const char *rte_get_ptype_l4_name(uint32_t ptype) +{ + switch (ptype & RTE_PTYPE_L4_MASK) { + case RTE_PTYPE_L4_TCP: return "L4_TCP"; + case RTE_PTYPE_L4_UDP: return "L4_UDP"; + case RTE_PTYPE_L4_FRAG: return "L4_FRAG"; + case RTE_PTYPE_L4_SCTP: return "L4_SCTP"; + case RTE_PTYPE_L4_ICMP: return "L4_ICMP"; + case RTE_PTYPE_L4_NONFRAG: return "L4_NONFRAG"; + case RTE_PTYPE_L4_IGMP: return "L4_IGMP"; + default: return "L4_UNKNOWN"; + } +} + +/* get the name of the tunnel packet type */ +const char *rte_get_ptype_tunnel_name(uint32_t ptype) +{ + switch (ptype & RTE_PTYPE_TUNNEL_MASK) { + case RTE_PTYPE_TUNNEL_IP: return "TUNNEL_IP"; + case RTE_PTYPE_TUNNEL_GRE: return "TUNNEL_GRE"; + case RTE_PTYPE_TUNNEL_VXLAN: return "TUNNEL_VXLAN"; + case RTE_PTYPE_TUNNEL_NVGRE: return "TUNNEL_NVGRE"; + case RTE_PTYPE_TUNNEL_GENEVE: return "TUNNEL_GENEVE"; + case RTE_PTYPE_TUNNEL_GRENAT: return "TUNNEL_GRENAT"; + case RTE_PTYPE_TUNNEL_GTPC: return "TUNNEL_GTPC"; + case RTE_PTYPE_TUNNEL_GTPU: return "TUNNEL_GTPU"; + case RTE_PTYPE_TUNNEL_ESP: return "TUNNEL_ESP"; + case RTE_PTYPE_TUNNEL_L2TP: return "TUNNEL_L2TP"; + case RTE_PTYPE_TUNNEL_VXLAN_GPE: return "TUNNEL_VXLAN_GPE"; + case RTE_PTYPE_TUNNEL_MPLS_IN_UDP: return "TUNNEL_MPLS_IN_UDP"; + case RTE_PTYPE_TUNNEL_MPLS_IN_GRE: return "TUNNEL_MPLS_IN_GRE"; + default: return "TUNNEL_UNKNOWN"; + } +} + +/* get the name of the inner_l2 packet type */ +const char *rte_get_ptype_inner_l2_name(uint32_t ptype) +{ + switch (ptype & RTE_PTYPE_INNER_L2_MASK) { + case RTE_PTYPE_INNER_L2_ETHER: return "INNER_L2_ETHER"; + case RTE_PTYPE_INNER_L2_ETHER_VLAN: return "INNER_L2_ETHER_VLAN"; + case RTE_PTYPE_INNER_L2_ETHER_QINQ: return "INNER_L2_ETHER_QINQ"; + default: return "INNER_L2_UNKNOWN"; + } +} + +/* get the name of the inner_l3 packet type */ +const char *rte_get_ptype_inner_l3_name(uint32_t ptype) +{ + switch (ptype & RTE_PTYPE_INNER_L3_MASK) { + case RTE_PTYPE_INNER_L3_IPV4: return "INNER_L3_IPV4"; + case RTE_PTYPE_INNER_L3_IPV4_EXT: return "INNER_L3_IPV4_EXT"; + case RTE_PTYPE_INNER_L3_IPV6: return "INNER_L3_IPV6"; + case RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN: + return "INNER_L3_IPV4_EXT_UNKNOWN"; + case RTE_PTYPE_INNER_L3_IPV6_EXT: return "INNER_L3_IPV6_EXT"; + case RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN: + return "INNER_L3_IPV6_EXT_UNKNOWN"; + default: return "INNER_L3_UNKNOWN"; + } +} + +/* get the name of the inner_l4 packet type */ +const char *rte_get_ptype_inner_l4_name(uint32_t ptype) +{ + switch (ptype & RTE_PTYPE_INNER_L4_MASK) { + case RTE_PTYPE_INNER_L4_TCP: return "INNER_L4_TCP"; + case RTE_PTYPE_INNER_L4_UDP: return "INNER_L4_UDP"; + case RTE_PTYPE_INNER_L4_FRAG: return "INNER_L4_FRAG"; + case RTE_PTYPE_INNER_L4_SCTP: return "INNER_L4_SCTP"; + case RTE_PTYPE_INNER_L4_ICMP: return "INNER_L4_ICMP"; + case RTE_PTYPE_INNER_L4_NONFRAG: return "INNER_L4_NONFRAG"; + default: return "INNER_L4_UNKNOWN"; + } +} + +/* write the packet type name into the buffer */ +int rte_get_ptype_name(uint32_t ptype, char *buf, size_t buflen) +{ + int ret; + + if (buflen == 0) + return -1; + + buf[0] = '\0'; + if ((ptype & RTE_PTYPE_ALL_MASK) == RTE_PTYPE_UNKNOWN) { + ret = snprintf(buf, buflen, "UNKNOWN"); + if (ret < 0) + return -1; + if ((size_t)ret >= buflen) + return -1; + return 0; + } + + if ((ptype & RTE_PTYPE_L2_MASK) != 0) { + ret = snprintf(buf, buflen, "%s ", + rte_get_ptype_l2_name(ptype)); + if (ret < 0) + return -1; + if ((size_t)ret >= buflen) + return -1; + buf += ret; + buflen -= ret; + } + if ((ptype & RTE_PTYPE_L3_MASK) != 0) { + ret = snprintf(buf, buflen, "%s ", + rte_get_ptype_l3_name(ptype)); + if (ret < 0) + return -1; + if ((size_t)ret >= buflen) + return -1; + buf += ret; + buflen -= ret; + } + if ((ptype & RTE_PTYPE_L4_MASK) != 0) { + ret = snprintf(buf, buflen, "%s ", + rte_get_ptype_l4_name(ptype)); + if (ret < 0) + return -1; + if ((size_t)ret >= buflen) + return -1; + buf += ret; + buflen -= ret; + } + if ((ptype & RTE_PTYPE_TUNNEL_MASK) != 0) { + ret = snprintf(buf, buflen, "%s ", + rte_get_ptype_tunnel_name(ptype)); + if (ret < 0) + return -1; + if ((size_t)ret >= buflen) + return -1; + buf += ret; + buflen -= ret; + } + if ((ptype & RTE_PTYPE_INNER_L2_MASK) != 0) { + ret = snprintf(buf, buflen, "%s ", + rte_get_ptype_inner_l2_name(ptype)); + if (ret < 0) + return -1; + if ((size_t)ret >= buflen) + return -1; + buf += ret; + buflen -= ret; + } + if ((ptype & RTE_PTYPE_INNER_L3_MASK) != 0) { + ret = snprintf(buf, buflen, "%s ", + rte_get_ptype_inner_l3_name(ptype)); + if (ret < 0) + return -1; + if ((size_t)ret >= buflen) + return -1; + buf += ret; + buflen -= ret; + } + if ((ptype & RTE_PTYPE_INNER_L4_MASK) != 0) { + ret = snprintf(buf, buflen, "%s ", + rte_get_ptype_inner_l4_name(ptype)); + if (ret < 0) + return -1; + if ((size_t)ret >= buflen) + return -1; + buf += ret; + buflen -= ret; + } + + return 0; +} diff --git a/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_ptype.h b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_ptype.h new file mode 100644 index 000000000..17a2dd357 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_ptype.h @@ -0,0 +1,780 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation. + * Copyright 2014-2016 6WIND S.A. + */ + +#ifndef _RTE_MBUF_PTYPE_H_ +#define _RTE_MBUF_PTYPE_H_ + +/** + * @file + * RTE Mbuf Packet Types + * + * This file contains declarations for features related to mbuf packet + * types. The packet type gives information about the data carried by the + * mbuf, and is stored in the mbuf in a 32 bits field. + * + * The 32 bits are divided into several fields to mark packet types. Note that + * each field is indexical. + * - Bit 3:0 is for L2 types. + * - Bit 7:4 is for L3 or outer L3 (for tunneling case) types. + * - Bit 11:8 is for L4 or outer L4 (for tunneling case) types. + * - Bit 15:12 is for tunnel types. + * - Bit 19:16 is for inner L2 types. + * - Bit 23:20 is for inner L3 types. + * - Bit 27:24 is for inner L4 types. + * - Bit 31:28 is reserved. + * + * To be compatible with Vector PMD, RTE_PTYPE_L3_IPV4, RTE_PTYPE_L3_IPV4_EXT, + * RTE_PTYPE_L3_IPV6, RTE_PTYPE_L3_IPV6_EXT, RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP + * and RTE_PTYPE_L4_SCTP should be kept as below in a contiguous 7 bits. + * + * Note that L3 types values are selected for checking IPV4/IPV6 header from + * performance point of view. Reading annotations of RTE_ETH_IS_IPV4_HDR and + * RTE_ETH_IS_IPV6_HDR is needed for any future changes of L3 type values. + * + * Note that the packet types of the same packet recognized by different + * hardware may be different, as different hardware may have different + * capability of packet type recognition. + * + * examples: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=0x29 + * | 'version'=6, 'next header'=0x3A + * | 'ICMPv6 header'> + * will be recognized on i40e hardware as packet type combination of, + * RTE_PTYPE_L2_ETHER | + * RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + * RTE_PTYPE_TUNNEL_IP | + * RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + * RTE_PTYPE_INNER_L4_ICMP. + * + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=0x2F + * | 'GRE header' + * | 'version'=6, 'next header'=0x11 + * | 'UDP header'> + * will be recognized on i40e hardware as packet type combination of, + * RTE_PTYPE_L2_ETHER | + * RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + * RTE_PTYPE_TUNNEL_GRENAT | + * RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + * RTE_PTYPE_INNER_L4_UDP. + */ + +#include <stddef.h> +#include <stdint.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * No packet type information. + */ +#define RTE_PTYPE_UNKNOWN 0x00000000 +/** + * Ethernet packet type. + * It is used for outer packet for tunneling cases. + * + * Packet format: + * <'ether type'=[0x0800|0x86DD]> + */ +#define RTE_PTYPE_L2_ETHER 0x00000001 +/** + * Ethernet packet type for time sync. + * + * Packet format: + * <'ether type'=0x88F7> + */ +#define RTE_PTYPE_L2_ETHER_TIMESYNC 0x00000002 +/** + * ARP (Address Resolution Protocol) packet type. + * + * Packet format: + * <'ether type'=0x0806> + */ +#define RTE_PTYPE_L2_ETHER_ARP 0x00000003 +/** + * LLDP (Link Layer Discovery Protocol) packet type. + * + * Packet format: + * <'ether type'=0x88CC> + */ +#define RTE_PTYPE_L2_ETHER_LLDP 0x00000004 +/** + * NSH (Network Service Header) packet type. + * + * Packet format: + * <'ether type'=0x894F> + */ +#define RTE_PTYPE_L2_ETHER_NSH 0x00000005 +/** + * VLAN packet type. + * + * Packet format: + * <'ether type'=[0x8100]> + */ +#define RTE_PTYPE_L2_ETHER_VLAN 0x00000006 +/** + * QinQ packet type. + * + * Packet format: + * <'ether type'=[0x88A8]> + */ +#define RTE_PTYPE_L2_ETHER_QINQ 0x00000007 +/** + * PPPOE packet type. + * + * Packet format: + * <'ether type'=[0x8863|0x8864]> + */ +#define RTE_PTYPE_L2_ETHER_PPPOE 0x00000008 +/** + * FCoE packet type. + * + * Packet format: + * <'ether type'=[0x8906]> + */ +#define RTE_PTYPE_L2_ETHER_FCOE 0x00000009 +/** + * MPLS packet type. + * + * Packet format: + * <'ether type'=[0x8847|0x8848]> + */ +#define RTE_PTYPE_L2_ETHER_MPLS 0x0000000a +/** + * Mask of layer 2 packet types. + * It is used for outer packet for tunneling cases. + */ +#define RTE_PTYPE_L2_MASK 0x0000000f +/** + * IP (Internet Protocol) version 4 packet type. + * It is used for outer packet for tunneling cases, and does not contain any + * header option. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'ihl'=5> + */ +#define RTE_PTYPE_L3_IPV4 0x00000010 +/** + * IP (Internet Protocol) version 4 packet type. + * It is used for outer packet for tunneling cases, and contains header + * options. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'ihl'=[6-15], 'options'> + */ +#define RTE_PTYPE_L3_IPV4_EXT 0x00000030 +/** + * IP (Internet Protocol) version 6 packet type. + * It is used for outer packet for tunneling cases, and does not contain any + * extension header. + * + * Packet format: + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=0x3B> + */ +#define RTE_PTYPE_L3_IPV6 0x00000040 +/** + * IP (Internet Protocol) version 4 packet type. + * It is used for outer packet for tunneling cases, and may or maynot contain + * header options. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'ihl'=[5-15], <'options'>> + */ +#define RTE_PTYPE_L3_IPV4_EXT_UNKNOWN 0x00000090 +/** + * IP (Internet Protocol) version 6 packet type. + * It is used for outer packet for tunneling cases, and contains extension + * headers. + * + * Packet format: + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=[0x0|0x2B|0x2C|0x32|0x33|0x3C|0x87], + * 'extension headers'> + */ +#define RTE_PTYPE_L3_IPV6_EXT 0x000000c0 +/** + * IP (Internet Protocol) version 6 packet type. + * It is used for outer packet for tunneling cases, and may or maynot contain + * extension headers. + * + * Packet format: + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=[0x3B|0x0|0x2B|0x2C|0x32|0x33|0x3C|0x87], + * <'extension headers'>> + */ +#define RTE_PTYPE_L3_IPV6_EXT_UNKNOWN 0x000000e0 +/** + * Mask of layer 3 packet types. + * It is used for outer packet for tunneling cases. + */ +#define RTE_PTYPE_L3_MASK 0x000000f0 +/** + * TCP (Transmission Control Protocol) packet type. + * It is used for outer packet for tunneling cases. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=6, 'MF'=0, 'frag_offset'=0> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=6> + */ +#define RTE_PTYPE_L4_TCP 0x00000100 +/** + * UDP (User Datagram Protocol) packet type. + * It is used for outer packet for tunneling cases. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=17, 'MF'=0, 'frag_offset'=0> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=17> + */ +#define RTE_PTYPE_L4_UDP 0x00000200 +/** + * Fragmented IP (Internet Protocol) packet type. + * It is used for outer packet for tunneling cases. + * + * It refers to those packets of any IP types, which can be recognized as + * fragmented. A fragmented packet cannot be recognized as any other L4 types + * (RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP, RTE_PTYPE_L4_SCTP, RTE_PTYPE_L4_ICMP, + * RTE_PTYPE_L4_NONFRAG). + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'MF'=1> + * or, + * <'ether type'=0x0800 + * | 'version'=4, 'frag_offset'!=0> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=44> + */ +#define RTE_PTYPE_L4_FRAG 0x00000300 +/** + * SCTP (Stream Control Transmission Protocol) packet type. + * It is used for outer packet for tunneling cases. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=132, 'MF'=0, 'frag_offset'=0> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=132> + */ +#define RTE_PTYPE_L4_SCTP 0x00000400 +/** + * ICMP (Internet Control Message Protocol) packet type. + * It is used for outer packet for tunneling cases. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=1, 'MF'=0, 'frag_offset'=0> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=1> + */ +#define RTE_PTYPE_L4_ICMP 0x00000500 +/** + * Non-fragmented IP (Internet Protocol) packet type. + * It is used for outer packet for tunneling cases. + * + * It refers to those packets of any IP types, while cannot be recognized as + * any of above L4 types (RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP, + * RTE_PTYPE_L4_FRAG, RTE_PTYPE_L4_SCTP, RTE_PTYPE_L4_ICMP). + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'!=[6|17|132|1], 'MF'=0, 'frag_offset'=0> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'!=[6|17|44|132|1]> + */ +#define RTE_PTYPE_L4_NONFRAG 0x00000600 +/** + * IGMP (Internet Group Management Protocol) packet type. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=2, 'MF'=0, 'frag_offset'=0> + */ +#define RTE_PTYPE_L4_IGMP 0x00000700 +/** + * Mask of layer 4 packet types. + * It is used for outer packet for tunneling cases. + */ +#define RTE_PTYPE_L4_MASK 0x00000f00 +/** + * IP (Internet Protocol) in IP (Internet Protocol) tunneling packet type. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=[4|41]> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=[4|41]> + */ +#define RTE_PTYPE_TUNNEL_IP 0x00001000 +/** + * GRE (Generic Routing Encapsulation) tunneling packet type. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=47> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=47> + */ +#define RTE_PTYPE_TUNNEL_GRE 0x00002000 +/** + * VXLAN (Virtual eXtensible Local Area Network) tunneling packet type. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=17 + * | 'destination port'=4789> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=17 + * | 'destination port'=4789> + */ +#define RTE_PTYPE_TUNNEL_VXLAN 0x00003000 +/** + * NVGRE (Network Virtualization using Generic Routing Encapsulation) tunneling + * packet type. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=47 + * | 'protocol type'=0x6558> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=47 + * | 'protocol type'=0x6558'> + */ +#define RTE_PTYPE_TUNNEL_NVGRE 0x00004000 +/** + * GENEVE (Generic Network Virtualization Encapsulation) tunneling packet type. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=17 + * | 'destination port'=6081> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=17 + * | 'destination port'=6081> + */ +#define RTE_PTYPE_TUNNEL_GENEVE 0x00005000 +/** + * Tunneling packet type of Teredo, VXLAN (Virtual eXtensible Local Area + * Network) or GRE (Generic Routing Encapsulation) could be recognized as this + * packet type, if they can not be recognized independently as of hardware + * capability. + */ +#define RTE_PTYPE_TUNNEL_GRENAT 0x00006000 +/** + * GTP-C (GPRS Tunnelling Protocol) control tunneling packet type. + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=17 + * | 'destination port'=2123> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=17 + * | 'destination port'=2123> + * or, + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=17 + * | 'source port'=2123> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=17 + * | 'source port'=2123> + */ +#define RTE_PTYPE_TUNNEL_GTPC 0x00007000 +/** + * GTP-U (GPRS Tunnelling Protocol) user data tunneling packet type. + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=17 + * | 'destination port'=2152> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=17 + * | 'destination port'=2152> + */ +#define RTE_PTYPE_TUNNEL_GTPU 0x00008000 +/** + * ESP (IP Encapsulating Security Payload) tunneling packet type. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=51> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=51> + */ +#define RTE_PTYPE_TUNNEL_ESP 0x00009000 +/** + * L2TP (Layer 2 Tunneling Protocol) tunneling packet type. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=17> + * | 'destination port'=1701> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=17 + * | 'destination port'=1701> + * or, + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=115> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'protocol'=115> + */ +#define RTE_PTYPE_TUNNEL_L2TP 0x0000a000 +/** + * VXLAN-GPE (VXLAN Generic Protocol Extension) tunneling packet type. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=17 + * | 'destination port'=4790> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=17 + * | 'destination port'=4790> + */ +#define RTE_PTYPE_TUNNEL_VXLAN_GPE 0x0000b000 +/** + * MPLS-in-GRE tunneling packet type (RFC 4023). + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=47 + * | 'protocol'=0x8847> + * or, + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=47 + * | 'protocol'=0x8848> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'protocol'=47 + * | 'protocol'=0x8847> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=47 + * | 'protocol'=0x8848> + */ +#define RTE_PTYPE_TUNNEL_MPLS_IN_GRE 0x0000c000 +/** + * MPLS-in-UDP tunneling packet type (RFC 7510). + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=17 + * | 'destination port'=6635> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=17 + * | 'destination port'=6635> + */ +#define RTE_PTYPE_TUNNEL_MPLS_IN_UDP 0x0000d000 +/** + * Mask of tunneling packet types. + */ +#define RTE_PTYPE_TUNNEL_MASK 0x0000f000 +/** + * Ethernet packet type. + * It is used for inner packet type only. + * + * Packet format (inner only): + * <'ether type'=[0x800|0x86DD]> + */ +#define RTE_PTYPE_INNER_L2_ETHER 0x00010000 +/** + * Ethernet packet type with VLAN (Virtual Local Area Network) tag. + * + * Packet format (inner only): + * <'ether type'=[0x800|0x86DD], vlan=[1-4095]> + */ +#define RTE_PTYPE_INNER_L2_ETHER_VLAN 0x00020000 +/** + * QinQ packet type. + * + * Packet format: + * <'ether type'=[0x88A8]> + */ +#define RTE_PTYPE_INNER_L2_ETHER_QINQ 0x00030000 +/** + * Mask of inner layer 2 packet types. + */ +#define RTE_PTYPE_INNER_L2_MASK 0x000f0000 +/** + * IP (Internet Protocol) version 4 packet type. + * It is used for inner packet only, and does not contain any header option. + * + * Packet format (inner only): + * <'ether type'=0x0800 + * | 'version'=4, 'ihl'=5> + */ +#define RTE_PTYPE_INNER_L3_IPV4 0x00100000 +/** + * IP (Internet Protocol) version 4 packet type. + * It is used for inner packet only, and contains header options. + * + * Packet format (inner only): + * <'ether type'=0x0800 + * | 'version'=4, 'ihl'=[6-15], 'options'> + */ +#define RTE_PTYPE_INNER_L3_IPV4_EXT 0x00200000 +/** + * IP (Internet Protocol) version 6 packet type. + * It is used for inner packet only, and does not contain any extension header. + * + * Packet format (inner only): + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=0x3B> + */ +#define RTE_PTYPE_INNER_L3_IPV6 0x00300000 +/** + * IP (Internet Protocol) version 4 packet type. + * It is used for inner packet only, and may or maynot contain header options. + * + * Packet format (inner only): + * <'ether type'=0x0800 + * | 'version'=4, 'ihl'=[5-15], <'options'>> + */ +#define RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN 0x00400000 +/** + * IP (Internet Protocol) version 6 packet type. + * It is used for inner packet only, and contains extension headers. + * + * Packet format (inner only): + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=[0x0|0x2B|0x2C|0x32|0x33|0x3C|0x87], + * 'extension headers'> + */ +#define RTE_PTYPE_INNER_L3_IPV6_EXT 0x00500000 +/** + * IP (Internet Protocol) version 6 packet type. + * It is used for inner packet only, and may or maynot contain extension + * headers. + * + * Packet format (inner only): + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=[0x3B|0x0|0x2B|0x2C|0x32|0x33|0x3C|0x87], + * <'extension headers'>> + */ +#define RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN 0x00600000 +/** + * Mask of inner layer 3 packet types. + */ +#define RTE_PTYPE_INNER_L3_MASK 0x00f00000 +/** + * TCP (Transmission Control Protocol) packet type. + * It is used for inner packet only. + * + * Packet format (inner only): + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=6, 'MF'=0, 'frag_offset'=0> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=6> + */ +#define RTE_PTYPE_INNER_L4_TCP 0x01000000 +/** + * UDP (User Datagram Protocol) packet type. + * It is used for inner packet only. + * + * Packet format (inner only): + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=17, 'MF'=0, 'frag_offset'=0> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=17> + */ +#define RTE_PTYPE_INNER_L4_UDP 0x02000000 +/** + * Fragmented IP (Internet Protocol) packet type. + * It is used for inner packet only, and may or maynot have layer 4 packet. + * + * Packet format (inner only): + * <'ether type'=0x0800 + * | 'version'=4, 'MF'=1> + * or, + * <'ether type'=0x0800 + * | 'version'=4, 'frag_offset'!=0> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=44> + */ +#define RTE_PTYPE_INNER_L4_FRAG 0x03000000 +/** + * SCTP (Stream Control Transmission Protocol) packet type. + * It is used for inner packet only. + * + * Packet format (inner only): + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=132, 'MF'=0, 'frag_offset'=0> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=132> + */ +#define RTE_PTYPE_INNER_L4_SCTP 0x04000000 +/** + * ICMP (Internet Control Message Protocol) packet type. + * It is used for inner packet only. + * + * Packet format (inner only): + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=1, 'MF'=0, 'frag_offset'=0> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=1> + */ +#define RTE_PTYPE_INNER_L4_ICMP 0x05000000 +/** + * Non-fragmented IP (Internet Protocol) packet type. + * It is used for inner packet only, and may or maynot have other unknown layer + * 4 packet types. + * + * Packet format (inner only): + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'!=[6|17|132|1], 'MF'=0, 'frag_offset'=0> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'!=[6|17|44|132|1]> + */ +#define RTE_PTYPE_INNER_L4_NONFRAG 0x06000000 +/** + * Mask of inner layer 4 packet types. + */ +#define RTE_PTYPE_INNER_L4_MASK 0x0f000000 +/** + * All valid layer masks. + */ +#define RTE_PTYPE_ALL_MASK 0x0fffffff + +/** + * Check if the (outer) L3 header is IPv4. To avoid comparing IPv4 types one by + * one, bit 4 is selected to be used for IPv4 only. Then checking bit 4 can + * determine if it is an IPV4 packet. + */ +#define RTE_ETH_IS_IPV4_HDR(ptype) ((ptype) & RTE_PTYPE_L3_IPV4) + +/** + * Check if the (outer) L3 header is IPv6. To avoid comparing IPv6 types one by + * one, bit 6 is selected to be used for IPv6 only. Then checking bit 6 can + * determine if it is an IPV6 packet. + */ +#define RTE_ETH_IS_IPV6_HDR(ptype) ((ptype) & RTE_PTYPE_L3_IPV6) + +/* Check if it is a tunneling packet */ +#define RTE_ETH_IS_TUNNEL_PKT(ptype) ((ptype) & \ + (RTE_PTYPE_TUNNEL_MASK | \ + RTE_PTYPE_INNER_L2_MASK | \ + RTE_PTYPE_INNER_L3_MASK | \ + RTE_PTYPE_INNER_L4_MASK)) + +/** + * Get the name of the l2 packet type + * + * @param ptype + * The packet type value. + * @return + * A non-null string describing the packet type. + */ +const char *rte_get_ptype_l2_name(uint32_t ptype); + +/** + * Get the name of the l3 packet type + * + * @param ptype + * The packet type value. + * @return + * A non-null string describing the packet type. + */ +const char *rte_get_ptype_l3_name(uint32_t ptype); + +/** + * Get the name of the l4 packet type + * + * @param ptype + * The packet type value. + * @return + * A non-null string describing the packet type. + */ +const char *rte_get_ptype_l4_name(uint32_t ptype); + +/** + * Get the name of the tunnel packet type + * + * @param ptype + * The packet type value. + * @return + * A non-null string describing the packet type. + */ +const char *rte_get_ptype_tunnel_name(uint32_t ptype); + +/** + * Get the name of the inner_l2 packet type + * + * @param ptype + * The packet type value. + * @return + * A non-null string describing the packet type. + */ +const char *rte_get_ptype_inner_l2_name(uint32_t ptype); + +/** + * Get the name of the inner_l3 packet type + * + * @param ptype + * The packet type value. + * @return + * A non-null string describing the packet type. + */ +const char *rte_get_ptype_inner_l3_name(uint32_t ptype); + +/** + * Get the name of the inner_l4 packet type + * + * @param ptype + * The packet type value. + * @return + * A non-null string describing the packet type. + */ +const char *rte_get_ptype_inner_l4_name(uint32_t ptype); + +/** + * Write the packet type name into the buffer + * + * @param ptype + * The packet type value. + * @param buf + * The buffer where the string is written. + * @param buflen + * The length of the buffer. + * @return + * - 0 on success + * - (-1) if the buffer is too small + */ +int rte_get_ptype_name(uint32_t ptype, char *buf, size_t buflen); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_MBUF_PTYPE_H_ */ diff --git a/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_version.map b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_version.map new file mode 100644 index 000000000..ab161bcdc --- /dev/null +++ b/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf_version.map @@ -0,0 +1,49 @@ +DPDK_20.0 { + global: + + __rte_pktmbuf_linearize; + __rte_pktmbuf_read; + rte_get_ptype_inner_l2_name; + rte_get_ptype_inner_l3_name; + rte_get_ptype_inner_l4_name; + rte_get_ptype_l2_name; + rte_get_ptype_l3_name; + rte_get_ptype_l4_name; + rte_get_ptype_name; + rte_get_ptype_tunnel_name; + rte_get_rx_ol_flag_list; + rte_get_rx_ol_flag_name; + rte_get_tx_ol_flag_list; + rte_get_tx_ol_flag_name; + rte_mbuf_best_mempool_ops; + rte_mbuf_platform_mempool_ops; + rte_mbuf_sanity_check; + rte_mbuf_set_platform_mempool_ops; + rte_mbuf_set_user_mempool_ops; + rte_mbuf_user_mempool_ops; + rte_pktmbuf_clone; + rte_pktmbuf_dump; + rte_pktmbuf_init; + rte_pktmbuf_pool_create; + rte_pktmbuf_pool_create_by_ops; + rte_pktmbuf_pool_init; + + local: *; +}; + +EXPERIMENTAL { + global: + + rte_mbuf_check; + rte_mbuf_dynfield_lookup; + rte_mbuf_dynfield_register; + rte_mbuf_dynfield_register_offset; + rte_mbuf_dynflag_lookup; + rte_mbuf_dynflag_register; + rte_mbuf_dynflag_register_bitnum; + rte_mbuf_dyn_dump; + rte_pktmbuf_copy; + rte_pktmbuf_free_bulk; + rte_pktmbuf_pool_create_extbuf; + +}; |