summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/lib/librte_mempool
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/lib/librte_mempool
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/lib/librte_mempool')
-rw-r--r--src/spdk/dpdk/lib/librte_mempool/Makefile24
-rw-r--r--src/spdk/dpdk/lib/librte_mempool/mempool_trace_points.c107
-rw-r--r--src/spdk/dpdk/lib/librte_mempool/meson.build18
-rw-r--r--src/spdk/dpdk/lib/librte_mempool/rte_mempool.c1403
-rw-r--r--src/spdk/dpdk/lib/librte_mempool/rte_mempool.h1811
-rw-r--r--src/spdk/dpdk/lib/librte_mempool/rte_mempool_ops.c189
-rw-r--r--src/spdk/dpdk/lib/librte_mempool/rte_mempool_ops_default.c145
-rw-r--r--src/spdk/dpdk/lib/librte_mempool/rte_mempool_trace.h175
-rw-r--r--src/spdk/dpdk/lib/librte_mempool/rte_mempool_trace_fp.h116
-rw-r--r--src/spdk/dpdk/lib/librte_mempool/rte_mempool_version.map77
10 files changed, 4065 insertions, 0 deletions
diff --git a/src/spdk/dpdk/lib/librte_mempool/Makefile b/src/spdk/dpdk/lib/librte_mempool/Makefile
new file mode 100644
index 000000000..432d6217e
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_mempool/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_mempool.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+LDLIBS += -lrte_eal -lrte_ring
+
+EXPORT_MAP := rte_mempool_version.map
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_ops_default.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += mempool_trace_points.c
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include += rte_mempool_trace.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include += rte_mempool_trace_fp.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/lib/librte_mempool/mempool_trace_points.c b/src/spdk/dpdk/lib/librte_mempool/mempool_trace_points.c
new file mode 100644
index 000000000..df4368b17
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_mempool/mempool_trace_points.c
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_trace_point_register.h>
+
+#include "rte_mempool_trace.h"
+
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_ops_dequeue_bulk);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_ops_dequeue_contig_blocks);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_ops_enqueue_bulk);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_generic_put);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_put_bulk);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_generic_get);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_get_bulk);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_get_contig_blocks);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_create);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_create_empty);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_free);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_populate_iova);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_populate_virt);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_populate_default);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_populate_anon);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_cache_create);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_cache_free);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_default_cache);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_get_page_size);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_cache_flush);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_ops_populate);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_ops_alloc);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_ops_free);
+RTE_TRACE_POINT_DEFINE(rte_mempool_trace_set_ops_byname);
+
+RTE_INIT(mempool_trace_init)
+{
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_ops_dequeue_bulk,
+ lib.mempool.ops.deq.bulk);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_ops_dequeue_contig_blocks,
+ lib.mempool.ops.deq.contig);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_ops_enqueue_bulk,
+ lib.mempool.ops.enq.bulk);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_generic_put,
+ lib.mempool.generic.put);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_put_bulk,
+ lib.mempool.put.bulk);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_generic_get,
+ lib.mempool.generic.get);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_get_bulk,
+ lib.mempool.get.bulk);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_get_contig_blocks,
+ lib.mempool.get.blocks);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_create,
+ lib.mempool.create);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_create_empty,
+ lib.mempool.create.empty);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_free,
+ lib.mempool.free);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_populate_iova,
+ lib.mempool.populate.iova);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_populate_virt,
+ lib.mempool.populate.virt);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_populate_default,
+ lib.mempool.populate.default);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_populate_anon,
+ lib.mempool.populate.anon);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_cache_create,
+ lib.mempool.cache_create);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_cache_free,
+ lib.mempool.cache.free);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_default_cache,
+ lib.mempool.default.cache);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_get_page_size,
+ lib.mempool.get.page.size);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_cache_flush,
+ lib.mempool.cache.flush);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_ops_populate,
+ lib.mempool.ops.populate);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_ops_alloc,
+ lib.mempool.ops.alloc);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_ops_free,
+ lib.mempool.ops.free);
+
+ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_set_ops_byname,
+ lib.mempool.set.ops.byname);
+}
diff --git a/src/spdk/dpdk/lib/librte_mempool/meson.build b/src/spdk/dpdk/lib/librte_mempool/meson.build
new file mode 100644
index 000000000..7dbe6b9be
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_mempool/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+extra_flags = []
+
+foreach flag: extra_flags
+ if cc.has_argument(flag)
+ cflags += flag
+ endif
+endforeach
+
+use_function_versioning = true
+
+sources = files('rte_mempool.c', 'rte_mempool_ops.c',
+ 'rte_mempool_ops_default.c', 'mempool_trace_points.c')
+headers = files('rte_mempool.h', 'rte_mempool_trace.h',
+ 'rte_mempool_trace_fp.h')
+deps += ['ring']
diff --git a/src/spdk/dpdk/lib/librte_mempool/rte_mempool.c b/src/spdk/dpdk/lib/librte_mempool/rte_mempool.c
new file mode 100644
index 000000000..0bde995b5
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_mempool/rte_mempool.c
@@ -0,0 +1,1403 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation.
+ * Copyright(c) 2016 6WIND S.A.
+ */
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+#include <rte_spinlock.h>
+#include <rte_tailq.h>
+#include <rte_function_versioning.h>
+
+#include "rte_mempool.h"
+#include "rte_mempool_trace.h"
+
+TAILQ_HEAD(rte_mempool_list, rte_tailq_entry);
+
+static struct rte_tailq_elem rte_mempool_tailq = {
+ .name = "RTE_MEMPOOL",
+};
+EAL_REGISTER_TAILQ(rte_mempool_tailq)
+
+#define CACHE_FLUSHTHRESH_MULTIPLIER 1.5
+#define CALC_CACHE_FLUSHTHRESH(c) \
+ ((typeof(c))((c) * CACHE_FLUSHTHRESH_MULTIPLIER))
+
+#if defined(RTE_ARCH_X86)
+/*
+ * return the greatest common divisor between a and b (fast algorithm)
+ *
+ */
+static unsigned get_gcd(unsigned a, unsigned b)
+{
+ unsigned c;
+
+ if (0 == a)
+ return b;
+ if (0 == b)
+ return a;
+
+ if (a < b) {
+ c = a;
+ a = b;
+ b = c;
+ }
+
+ while (b != 0) {
+ c = a % b;
+ a = b;
+ b = c;
+ }
+
+ return a;
+}
+
+/*
+ * Depending on memory configuration on x86 arch, objects addresses are spread
+ * between channels and ranks in RAM: the pool allocator will add
+ * padding between objects. This function return the new size of the
+ * object.
+ */
+static unsigned int
+arch_mem_object_align(unsigned int obj_size)
+{
+ unsigned nrank, nchan;
+ unsigned new_obj_size;
+
+ /* get number of channels */
+ nchan = rte_memory_get_nchannel();
+ if (nchan == 0)
+ nchan = 4;
+
+ nrank = rte_memory_get_nrank();
+ if (nrank == 0)
+ nrank = 1;
+
+ /* process new object size */
+ new_obj_size = (obj_size + RTE_MEMPOOL_ALIGN_MASK) / RTE_MEMPOOL_ALIGN;
+ while (get_gcd(new_obj_size, nrank * nchan) != 1)
+ new_obj_size++;
+ return new_obj_size * RTE_MEMPOOL_ALIGN;
+}
+#else
+static unsigned int
+arch_mem_object_align(unsigned int obj_size)
+{
+ return obj_size;
+}
+#endif
+
+struct pagesz_walk_arg {
+ int socket_id;
+ size_t min;
+};
+
+static int
+find_min_pagesz(const struct rte_memseg_list *msl, void *arg)
+{
+ struct pagesz_walk_arg *wa = arg;
+ bool valid;
+
+ /*
+ * we need to only look at page sizes available for a particular socket
+ * ID. so, we either need an exact match on socket ID (can match both
+ * native and external memory), or, if SOCKET_ID_ANY was specified as a
+ * socket ID argument, we must only look at native memory and ignore any
+ * page sizes associated with external memory.
+ */
+ valid = msl->socket_id == wa->socket_id;
+ valid |= wa->socket_id == SOCKET_ID_ANY && msl->external == 0;
+
+ if (valid && msl->page_sz < wa->min)
+ wa->min = msl->page_sz;
+
+ return 0;
+}
+
+static size_t
+get_min_page_size(int socket_id)
+{
+ struct pagesz_walk_arg wa;
+
+ wa.min = SIZE_MAX;
+ wa.socket_id = socket_id;
+
+ rte_memseg_list_walk(find_min_pagesz, &wa);
+
+ return wa.min == SIZE_MAX ? (size_t) getpagesize() : wa.min;
+}
+
+
+static void
+mempool_add_elem(struct rte_mempool *mp, __rte_unused void *opaque,
+ void *obj, rte_iova_t iova)
+{
+ struct rte_mempool_objhdr *hdr;
+ struct rte_mempool_objtlr *tlr __rte_unused;
+
+ /* set mempool ptr in header */
+ hdr = RTE_PTR_SUB(obj, sizeof(*hdr));
+ hdr->mp = mp;
+ hdr->iova = iova;
+ STAILQ_INSERT_TAIL(&mp->elt_list, hdr, next);
+ mp->populated_size++;
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
+ tlr = __mempool_get_trailer(obj);
+ tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE;
+#endif
+}
+
+/* call obj_cb() for each mempool element */
+uint32_t
+rte_mempool_obj_iter(struct rte_mempool *mp,
+ rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ struct rte_mempool_objhdr *hdr;
+ void *obj;
+ unsigned n = 0;
+
+ STAILQ_FOREACH(hdr, &mp->elt_list, next) {
+ obj = (char *)hdr + sizeof(*hdr);
+ obj_cb(mp, obj_cb_arg, obj, n);
+ n++;
+ }
+
+ return n;
+}
+
+/* call mem_cb() for each mempool memory chunk */
+uint32_t
+rte_mempool_mem_iter(struct rte_mempool *mp,
+ rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg)
+{
+ struct rte_mempool_memhdr *hdr;
+ unsigned n = 0;
+
+ STAILQ_FOREACH(hdr, &mp->mem_list, next) {
+ mem_cb(mp, mem_cb_arg, hdr, n);
+ n++;
+ }
+
+ return n;
+}
+
+/* get the header, trailer and total size of a mempool element. */
+uint32_t
+rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
+ struct rte_mempool_objsz *sz)
+{
+ struct rte_mempool_objsz lsz;
+
+ sz = (sz != NULL) ? sz : &lsz;
+
+ sz->header_size = sizeof(struct rte_mempool_objhdr);
+ if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0)
+ sz->header_size = RTE_ALIGN_CEIL(sz->header_size,
+ RTE_MEMPOOL_ALIGN);
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ sz->trailer_size = sizeof(struct rte_mempool_objtlr);
+#else
+ sz->trailer_size = 0;
+#endif
+
+ /* element size is 8 bytes-aligned at least */
+ sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t));
+
+ /* expand trailer to next cache line */
+ if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
+ sz->total_size = sz->header_size + sz->elt_size +
+ sz->trailer_size;
+ sz->trailer_size += ((RTE_MEMPOOL_ALIGN -
+ (sz->total_size & RTE_MEMPOOL_ALIGN_MASK)) &
+ RTE_MEMPOOL_ALIGN_MASK);
+ }
+
+ /*
+ * increase trailer to add padding between objects in order to
+ * spread them across memory channels/ranks
+ */
+ if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
+ unsigned new_size;
+ new_size = arch_mem_object_align
+ (sz->header_size + sz->elt_size + sz->trailer_size);
+ sz->trailer_size = new_size - sz->header_size - sz->elt_size;
+ }
+
+ /* this is the size of an object, including header and trailer */
+ sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size;
+
+ return sz->total_size;
+}
+
+/* free a memchunk allocated with rte_memzone_reserve() */
+static void
+rte_mempool_memchunk_mz_free(__rte_unused struct rte_mempool_memhdr *memhdr,
+ void *opaque)
+{
+ const struct rte_memzone *mz = opaque;
+ rte_memzone_free(mz);
+}
+
+/* Free memory chunks used by a mempool. Objects must be in pool */
+static void
+rte_mempool_free_memchunks(struct rte_mempool *mp)
+{
+ struct rte_mempool_memhdr *memhdr;
+ void *elt;
+
+ while (!STAILQ_EMPTY(&mp->elt_list)) {
+ rte_mempool_ops_dequeue_bulk(mp, &elt, 1);
+ (void)elt;
+ STAILQ_REMOVE_HEAD(&mp->elt_list, next);
+ mp->populated_size--;
+ }
+
+ while (!STAILQ_EMPTY(&mp->mem_list)) {
+ memhdr = STAILQ_FIRST(&mp->mem_list);
+ STAILQ_REMOVE_HEAD(&mp->mem_list, next);
+ if (memhdr->free_cb != NULL)
+ memhdr->free_cb(memhdr, memhdr->opaque);
+ rte_free(memhdr);
+ mp->nb_mem_chunks--;
+ }
+}
+
+static int
+mempool_ops_alloc_once(struct rte_mempool *mp)
+{
+ int ret;
+
+ /* create the internal ring if not already done */
+ if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) {
+ ret = rte_mempool_ops_alloc(mp);
+ if (ret != 0)
+ return ret;
+ mp->flags |= MEMPOOL_F_POOL_CREATED;
+ }
+ return 0;
+}
+
+__vsym int
+rte_mempool_populate_iova_v21(struct rte_mempool *mp, char *vaddr,
+ rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque);
+
+/* Add objects in the pool, using a physically contiguous memory
+ * zone. Return the number of objects added, or a negative value
+ * on error.
+ */
+__vsym int
+rte_mempool_populate_iova_v21(struct rte_mempool *mp, char *vaddr,
+ rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)
+{
+ unsigned i = 0;
+ size_t off;
+ struct rte_mempool_memhdr *memhdr;
+ int ret;
+
+ ret = mempool_ops_alloc_once(mp);
+ if (ret != 0)
+ return ret;
+
+ /* mempool is already populated */
+ if (mp->populated_size >= mp->size)
+ return -ENOSPC;
+
+ memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);
+ if (memhdr == NULL)
+ return -ENOMEM;
+
+ memhdr->mp = mp;
+ memhdr->addr = vaddr;
+ memhdr->iova = iova;
+ memhdr->len = len;
+ memhdr->free_cb = free_cb;
+ memhdr->opaque = opaque;
+
+ if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr;
+ else
+ off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_MEMPOOL_ALIGN) - vaddr;
+
+ if (off > len) {
+ ret = 0;
+ goto fail;
+ }
+
+ i = rte_mempool_ops_populate(mp, mp->size - mp->populated_size,
+ (char *)vaddr + off,
+ (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off),
+ len - off, mempool_add_elem, NULL);
+
+ /* not enough room to store one object */
+ if (i == 0) {
+ ret = 0;
+ goto fail;
+ }
+
+ STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next);
+ mp->nb_mem_chunks++;
+
+ rte_mempool_trace_populate_iova(mp, vaddr, iova, len, free_cb, opaque);
+ return i;
+
+fail:
+ rte_free(memhdr);
+ return ret;
+}
+
+BIND_DEFAULT_SYMBOL(rte_mempool_populate_iova, _v21, 21);
+MAP_STATIC_SYMBOL(
+ int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
+ rte_iova_t iova, size_t len,
+ rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque),
+ rte_mempool_populate_iova_v21);
+
+__vsym int
+rte_mempool_populate_iova_v20(struct rte_mempool *mp, char *vaddr,
+ rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque);
+
+__vsym int
+rte_mempool_populate_iova_v20(struct rte_mempool *mp, char *vaddr,
+ rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)
+{
+ int ret;
+
+ ret = rte_mempool_populate_iova_v21(mp, vaddr, iova, len, free_cb,
+ opaque);
+ if (ret == 0)
+ ret = -EINVAL;
+
+ return ret;
+}
+VERSION_SYMBOL(rte_mempool_populate_iova, _v20, 20.0);
+
+static rte_iova_t
+get_iova(void *addr)
+{
+ struct rte_memseg *ms;
+
+ /* try registered memory first */
+ ms = rte_mem_virt2memseg(addr, NULL);
+ if (ms == NULL || ms->iova == RTE_BAD_IOVA)
+ /* fall back to actual physical address */
+ return rte_mem_virt2iova(addr);
+ return ms->iova + RTE_PTR_DIFF(addr, ms->addr);
+}
+
+__vsym int
+rte_mempool_populate_virt_v21(struct rte_mempool *mp, char *addr,
+ size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque);
+
+/* Populate the mempool with a virtual area. Return the number of
+ * objects added, or a negative value on error.
+ */
+__vsym int
+rte_mempool_populate_virt_v21(struct rte_mempool *mp, char *addr,
+ size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)
+{
+ rte_iova_t iova;
+ size_t off, phys_len;
+ int ret, cnt = 0;
+
+ if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG)
+ return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA,
+ len, free_cb, opaque);
+
+ for (off = 0; off < len &&
+ mp->populated_size < mp->size; off += phys_len) {
+
+ iova = get_iova(addr + off);
+
+ /* populate with the largest group of contiguous pages */
+ for (phys_len = RTE_MIN(
+ (size_t)(RTE_PTR_ALIGN_CEIL(addr + off + 1, pg_sz) -
+ (addr + off)),
+ len - off);
+ off + phys_len < len;
+ phys_len = RTE_MIN(phys_len + pg_sz, len - off)) {
+ rte_iova_t iova_tmp;
+
+ iova_tmp = get_iova(addr + off + phys_len);
+
+ if (iova_tmp == RTE_BAD_IOVA ||
+ iova_tmp != iova + phys_len)
+ break;
+ }
+
+ ret = rte_mempool_populate_iova_v21(mp, addr + off, iova,
+ phys_len, free_cb, opaque);
+ if (ret == 0)
+ continue;
+ if (ret < 0)
+ goto fail;
+ /* no need to call the free callback for next chunks */
+ free_cb = NULL;
+ cnt += ret;
+ }
+
+ rte_mempool_trace_populate_virt(mp, addr, len, pg_sz, free_cb, opaque);
+ return cnt;
+
+ fail:
+ rte_mempool_free_memchunks(mp);
+ return ret;
+}
+BIND_DEFAULT_SYMBOL(rte_mempool_populate_virt, _v21, 21);
+MAP_STATIC_SYMBOL(
+ int rte_mempool_populate_virt(struct rte_mempool *mp,
+ char *addr, size_t len, size_t pg_sz,
+ rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque),
+ rte_mempool_populate_virt_v21);
+
+__vsym int
+rte_mempool_populate_virt_v20(struct rte_mempool *mp, char *addr,
+ size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque);
+
+__vsym int
+rte_mempool_populate_virt_v20(struct rte_mempool *mp, char *addr,
+ size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)
+{
+ int ret;
+
+ ret = rte_mempool_populate_virt_v21(mp, addr, len, pg_sz,
+ free_cb, opaque);
+
+ if (ret == 0)
+ ret = -EINVAL;
+
+ return ret;
+}
+VERSION_SYMBOL(rte_mempool_populate_virt, _v20, 20.0);
+
+/* Get the minimal page size used in a mempool before populating it. */
+int
+rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz)
+{
+ bool need_iova_contig_obj;
+ bool alloc_in_ext_mem;
+ int ret;
+
+ /* check if we can retrieve a valid socket ID */
+ ret = rte_malloc_heap_socket_is_external(mp->socket_id);
+ if (ret < 0)
+ return -EINVAL;
+ alloc_in_ext_mem = (ret == 1);
+ need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
+
+ if (!need_iova_contig_obj)
+ *pg_sz = 0;
+ else if (rte_eal_has_hugepages() || alloc_in_ext_mem)
+ *pg_sz = get_min_page_size(mp->socket_id);
+ else
+ *pg_sz = getpagesize();
+
+ rte_mempool_trace_get_page_size(mp, *pg_sz);
+ return 0;
+}
+
+/* Default function to populate the mempool: allocate memory in memzones,
+ * and populate them. Return the number of objects added, or a negative
+ * value on error.
+ */
+int
+rte_mempool_populate_default(struct rte_mempool *mp)
+{
+ unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+ ssize_t mem_size;
+ size_t align, pg_sz, pg_shift = 0;
+ rte_iova_t iova;
+ unsigned mz_id, n;
+ int ret;
+ bool need_iova_contig_obj;
+ size_t max_alloc_size = SIZE_MAX;
+
+ ret = mempool_ops_alloc_once(mp);
+ if (ret != 0)
+ return ret;
+
+ /* mempool must not be populated */
+ if (mp->nb_mem_chunks != 0)
+ return -EEXIST;
+
+ /*
+ * the following section calculates page shift and page size values.
+ *
+ * these values impact the result of calc_mem_size operation, which
+ * returns the amount of memory that should be allocated to store the
+ * desired number of objects. when not zero, it allocates more memory
+ * for the padding between objects, to ensure that an object does not
+ * cross a page boundary. in other words, page size/shift are to be set
+ * to zero if mempool elements won't care about page boundaries.
+ * there are several considerations for page size and page shift here.
+ *
+ * if we don't need our mempools to have physically contiguous objects,
+ * then just set page shift and page size to 0, because the user has
+ * indicated that there's no need to care about anything.
+ *
+ * if we do need contiguous objects (if a mempool driver has its
+ * own calc_size() method returning min_chunk_size = mem_size),
+ * there is also an option to reserve the entire mempool memory
+ * as one contiguous block of memory.
+ *
+ * if we require contiguous objects, but not necessarily the entire
+ * mempool reserved space to be contiguous, pg_sz will be != 0,
+ * and the default ops->populate() will take care of not placing
+ * objects across pages.
+ *
+ * if our IO addresses are physical, we may get memory from bigger
+ * pages, or we might get memory from smaller pages, and how much of it
+ * we require depends on whether we want bigger or smaller pages.
+ * However, requesting each and every memory size is too much work, so
+ * what we'll do instead is walk through the page sizes available, pick
+ * the smallest one and set up page shift to match that one. We will be
+ * wasting some space this way, but it's much nicer than looping around
+ * trying to reserve each and every page size.
+ *
+ * If we fail to get enough contiguous memory, then we'll go and
+ * reserve space in smaller chunks.
+ */
+
+ need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
+ ret = rte_mempool_get_page_size(mp, &pg_sz);
+ if (ret < 0)
+ return ret;
+
+ if (pg_sz != 0)
+ pg_shift = rte_bsf32(pg_sz);
+
+ for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
+ size_t min_chunk_size;
+
+ mem_size = rte_mempool_ops_calc_mem_size(
+ mp, n, pg_shift, &min_chunk_size, &align);
+
+ if (mem_size < 0) {
+ ret = mem_size;
+ goto fail;
+ }
+
+ ret = snprintf(mz_name, sizeof(mz_name),
+ RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id);
+ if (ret < 0 || ret >= (int)sizeof(mz_name)) {
+ ret = -ENAMETOOLONG;
+ goto fail;
+ }
+
+ /* if we're trying to reserve contiguous memory, add appropriate
+ * memzone flag.
+ */
+ if (min_chunk_size == (size_t)mem_size)
+ mz_flags |= RTE_MEMZONE_IOVA_CONTIG;
+
+ /* Allocate a memzone, retrying with a smaller area on ENOMEM */
+ do {
+ mz = rte_memzone_reserve_aligned(mz_name,
+ RTE_MIN((size_t)mem_size, max_alloc_size),
+ mp->socket_id, mz_flags, align);
+
+ if (mz == NULL && rte_errno != ENOMEM)
+ break;
+
+ max_alloc_size = RTE_MIN(max_alloc_size,
+ (size_t)mem_size) / 2;
+ } while (mz == NULL && max_alloc_size >= min_chunk_size);
+
+ if (mz == NULL) {
+ ret = -rte_errno;
+ goto fail;
+ }
+
+ if (need_iova_contig_obj)
+ iova = mz->iova;
+ else
+ iova = RTE_BAD_IOVA;
+
+ if (pg_sz == 0 || (mz_flags & RTE_MEMZONE_IOVA_CONTIG))
+ ret = rte_mempool_populate_iova(mp, mz->addr,
+ iova, mz->len,
+ rte_mempool_memchunk_mz_free,
+ (void *)(uintptr_t)mz);
+ else
+ ret = rte_mempool_populate_virt(mp, mz->addr,
+ mz->len, pg_sz,
+ rte_mempool_memchunk_mz_free,
+ (void *)(uintptr_t)mz);
+ if (ret == 0) /* should not happen */
+ ret = -ENOBUFS;
+ if (ret < 0) {
+ rte_memzone_free(mz);
+ goto fail;
+ }
+ }
+
+ rte_mempool_trace_populate_default(mp);
+ return mp->size;
+
+ fail:
+ rte_mempool_free_memchunks(mp);
+ return ret;
+}
+
+/* return the memory size required for mempool objects in anonymous mem */
+static ssize_t
+get_anon_size(const struct rte_mempool *mp)
+{
+ ssize_t size;
+ size_t pg_sz, pg_shift;
+ size_t min_chunk_size;
+ size_t align;
+
+ pg_sz = getpagesize();
+ pg_shift = rte_bsf32(pg_sz);
+ size = rte_mempool_ops_calc_mem_size(mp, mp->size, pg_shift,
+ &min_chunk_size, &align);
+
+ return size;
+}
+
+/* unmap a memory zone mapped by rte_mempool_populate_anon() */
+static void
+rte_mempool_memchunk_anon_free(struct rte_mempool_memhdr *memhdr,
+ void *opaque)
+{
+ ssize_t size;
+
+ /*
+ * Calculate size since memhdr->len has contiguous chunk length
+ * which may be smaller if anon map is split into many contiguous
+ * chunks. Result must be the same as we calculated on populate.
+ */
+ size = get_anon_size(memhdr->mp);
+ if (size < 0)
+ return;
+
+ munmap(opaque, size);
+}
+
+/* populate the mempool with an anonymous mapping */
+int
+rte_mempool_populate_anon(struct rte_mempool *mp)
+{
+ ssize_t size;
+ int ret;
+ char *addr;
+
+ /* mempool is already populated, error */
+ if ((!STAILQ_EMPTY(&mp->mem_list)) || mp->nb_mem_chunks != 0) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ ret = mempool_ops_alloc_once(mp);
+ if (ret < 0) {
+ rte_errno = -ret;
+ return 0;
+ }
+
+ size = get_anon_size(mp);
+ if (size < 0) {
+ rte_errno = -size;
+ return 0;
+ }
+
+ /* get chunk of virtually continuous memory */
+ addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (addr == MAP_FAILED) {
+ rte_errno = errno;
+ return 0;
+ }
+ /* can't use MMAP_LOCKED, it does not exist on BSD */
+ if (mlock(addr, size) < 0) {
+ rte_errno = errno;
+ munmap(addr, size);
+ return 0;
+ }
+
+ ret = rte_mempool_populate_virt(mp, addr, size, getpagesize(),
+ rte_mempool_memchunk_anon_free, addr);
+ if (ret == 0) /* should not happen */
+ ret = -ENOBUFS;
+ if (ret < 0) {
+ rte_errno = -ret;
+ goto fail;
+ }
+
+ rte_mempool_trace_populate_anon(mp);
+ return mp->populated_size;
+
+ fail:
+ rte_mempool_free_memchunks(mp);
+ return 0;
+}
+
+/* free a mempool */
+void
+rte_mempool_free(struct rte_mempool *mp)
+{
+ struct rte_mempool_list *mempool_list = NULL;
+ struct rte_tailq_entry *te;
+
+ if (mp == NULL)
+ return;
+
+ mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
+ rte_mcfg_tailq_write_lock();
+ /* find out tailq entry */
+ TAILQ_FOREACH(te, mempool_list, next) {
+ if (te->data == (void *)mp)
+ break;
+ }
+
+ if (te != NULL) {
+ TAILQ_REMOVE(mempool_list, te, next);
+ rte_free(te);
+ }
+ rte_mcfg_tailq_write_unlock();
+
+ rte_mempool_trace_free(mp);
+ rte_mempool_free_memchunks(mp);
+ rte_mempool_ops_free(mp);
+ rte_memzone_free(mp->mz);
+}
+
+static void
+mempool_cache_init(struct rte_mempool_cache *cache, uint32_t size)
+{
+ cache->size = size;
+ cache->flushthresh = CALC_CACHE_FLUSHTHRESH(size);
+ cache->len = 0;
+}
+
+/*
+ * Create and initialize a cache for objects that are retrieved from and
+ * returned to an underlying mempool. This structure is identical to the
+ * local_cache[lcore_id] pointed to by the mempool structure.
+ */
+struct rte_mempool_cache *
+rte_mempool_cache_create(uint32_t size, int socket_id)
+{
+ struct rte_mempool_cache *cache;
+
+ if (size == 0 || size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ cache = rte_zmalloc_socket("MEMPOOL_CACHE", sizeof(*cache),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (cache == NULL) {
+ RTE_LOG(ERR, MEMPOOL, "Cannot allocate mempool cache.\n");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ mempool_cache_init(cache, size);
+
+ rte_mempool_trace_cache_create(size, socket_id, cache);
+ return cache;
+}
+
+/*
+ * Free a cache. It's the responsibility of the user to make sure that any
+ * remaining objects in the cache are flushed to the corresponding
+ * mempool.
+ */
+void
+rte_mempool_cache_free(struct rte_mempool_cache *cache)
+{
+ rte_mempool_trace_cache_free(cache);
+ rte_free(cache);
+}
+
+/* create an empty mempool */
+struct rte_mempool *
+rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ int socket_id, unsigned flags)
+{
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ struct rte_mempool_list *mempool_list;
+ struct rte_mempool *mp = NULL;
+ struct rte_tailq_entry *te = NULL;
+ const struct rte_memzone *mz = NULL;
+ size_t mempool_size;
+ unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
+ struct rte_mempool_objsz objsz;
+ unsigned lcore_id;
+ int ret;
+
+ /* compilation-time checks */
+ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
+ RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
+ RTE_CACHE_LINE_MASK) != 0);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
+ RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
+ RTE_CACHE_LINE_MASK) != 0);
+#endif
+
+ mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
+
+ /* asked for zero items */
+ if (n == 0) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* asked cache too big */
+ if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
+ CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* "no cache align" imply "no spread" */
+ if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ flags |= MEMPOOL_F_NO_SPREAD;
+
+ /* calculate mempool object sizes. */
+ if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ rte_mcfg_mempool_write_lock();
+
+ /*
+ * reserve a memory zone for this mempool: private data is
+ * cache-aligned
+ */
+ private_data_size = (private_data_size +
+ RTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK);
+
+
+ /* try to allocate tailq entry */
+ te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n");
+ goto exit_unlock;
+ }
+
+ mempool_size = MEMPOOL_HEADER_SIZE(mp, cache_size);
+ mempool_size += private_data_size;
+ mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
+
+ ret = snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name);
+ if (ret < 0 || ret >= (int)sizeof(mz_name)) {
+ rte_errno = ENAMETOOLONG;
+ goto exit_unlock;
+ }
+
+ mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags);
+ if (mz == NULL)
+ goto exit_unlock;
+
+ /* init the mempool structure */
+ mp = mz->addr;
+ memset(mp, 0, MEMPOOL_HEADER_SIZE(mp, cache_size));
+ ret = strlcpy(mp->name, name, sizeof(mp->name));
+ if (ret < 0 || ret >= (int)sizeof(mp->name)) {
+ rte_errno = ENAMETOOLONG;
+ goto exit_unlock;
+ }
+ mp->mz = mz;
+ mp->size = n;
+ mp->flags = flags;
+ mp->socket_id = socket_id;
+ mp->elt_size = objsz.elt_size;
+ mp->header_size = objsz.header_size;
+ mp->trailer_size = objsz.trailer_size;
+ /* Size of default caches, zero means disabled. */
+ mp->cache_size = cache_size;
+ mp->private_data_size = private_data_size;
+ STAILQ_INIT(&mp->elt_list);
+ STAILQ_INIT(&mp->mem_list);
+
+ /*
+ * local_cache pointer is set even if cache_size is zero.
+ * The local_cache points to just past the elt_pa[] array.
+ */
+ mp->local_cache = (struct rte_mempool_cache *)
+ RTE_PTR_ADD(mp, MEMPOOL_HEADER_SIZE(mp, 0));
+
+ /* Init all default caches. */
+ if (cache_size != 0) {
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
+ mempool_cache_init(&mp->local_cache[lcore_id],
+ cache_size);
+ }
+
+ te->data = mp;
+
+ rte_mcfg_tailq_write_lock();
+ TAILQ_INSERT_TAIL(mempool_list, te, next);
+ rte_mcfg_tailq_write_unlock();
+ rte_mcfg_mempool_write_unlock();
+
+ rte_mempool_trace_create_empty(name, n, elt_size, cache_size,
+ private_data_size, flags, mp);
+ return mp;
+
+exit_unlock:
+ rte_mcfg_mempool_write_unlock();
+ rte_free(te);
+ rte_mempool_free(mp);
+ return NULL;
+}
+
+/* create the mempool */
+struct rte_mempool *
+rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags)
+{
+ int ret;
+ struct rte_mempool *mp;
+
+ mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
+ private_data_size, socket_id, flags);
+ if (mp == NULL)
+ return NULL;
+
+ /*
+ * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to
+ * set the correct index into the table of ops structs.
+ */
+ if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
+ ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
+ else if (flags & MEMPOOL_F_SP_PUT)
+ ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
+ else if (flags & MEMPOOL_F_SC_GET)
+ ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
+ else
+ ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
+
+ if (ret)
+ goto fail;
+
+ /* call the mempool priv initializer */
+ if (mp_init)
+ mp_init(mp, mp_init_arg);
+
+ if (rte_mempool_populate_default(mp) < 0)
+ goto fail;
+
+ /* call the object initializers */
+ if (obj_init)
+ rte_mempool_obj_iter(mp, obj_init, obj_init_arg);
+
+ rte_mempool_trace_create(name, n, elt_size, cache_size,
+ private_data_size, mp_init, mp_init_arg, obj_init,
+ obj_init_arg, flags, mp);
+ return mp;
+
+ fail:
+ rte_mempool_free(mp);
+ return NULL;
+}
+
+/* Return the number of entries in the mempool */
+unsigned int
+rte_mempool_avail_count(const struct rte_mempool *mp)
+{
+ unsigned count;
+ unsigned lcore_id;
+
+ count = rte_mempool_ops_get_count(mp);
+
+ if (mp->cache_size == 0)
+ return count;
+
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
+ count += mp->local_cache[lcore_id].len;
+
+ /*
+ * due to race condition (access to len is not locked), the
+ * total can be greater than size... so fix the result
+ */
+ if (count > mp->size)
+ return mp->size;
+ return count;
+}
+
+/* return the number of entries allocated from the mempool */
+unsigned int
+rte_mempool_in_use_count(const struct rte_mempool *mp)
+{
+ return mp->size - rte_mempool_avail_count(mp);
+}
+
+/* dump the cache status */
+static unsigned
+rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp)
+{
+ unsigned lcore_id;
+ unsigned count = 0;
+ unsigned cache_count;
+
+ fprintf(f, " internal cache infos:\n");
+ fprintf(f, " cache_size=%"PRIu32"\n", mp->cache_size);
+
+ if (mp->cache_size == 0)
+ return count;
+
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ cache_count = mp->local_cache[lcore_id].len;
+ fprintf(f, " cache_count[%u]=%"PRIu32"\n",
+ lcore_id, cache_count);
+ count += cache_count;
+ }
+ fprintf(f, " total_cache_count=%u\n", count);
+ return count;
+}
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+/* check and update cookies or panic (internal) */
+void rte_mempool_check_cookies(const struct rte_mempool *mp,
+ void * const *obj_table_const, unsigned n, int free)
+{
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ struct rte_mempool_objhdr *hdr;
+ struct rte_mempool_objtlr *tlr;
+ uint64_t cookie;
+ void *tmp;
+ void *obj;
+ void **obj_table;
+
+ /* Force to drop the "const" attribute. This is done only when
+ * DEBUG is enabled */
+ tmp = (void *) obj_table_const;
+ obj_table = tmp;
+
+ while (n--) {
+ obj = obj_table[n];
+
+ if (rte_mempool_from_obj(obj) != mp)
+ rte_panic("MEMPOOL: object is owned by another "
+ "mempool\n");
+
+ hdr = __mempool_get_header(obj);
+ cookie = hdr->cookie;
+
+ if (free == 0) {
+ if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) {
+ RTE_LOG(CRIT, MEMPOOL,
+ "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
+ obj, (const void *) mp, cookie);
+ rte_panic("MEMPOOL: bad header cookie (put)\n");
+ }
+ hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
+ } else if (free == 1) {
+ if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
+ RTE_LOG(CRIT, MEMPOOL,
+ "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
+ obj, (const void *) mp, cookie);
+ rte_panic("MEMPOOL: bad header cookie (get)\n");
+ }
+ hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE1;
+ } else if (free == 2) {
+ if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 &&
+ cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
+ RTE_LOG(CRIT, MEMPOOL,
+ "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
+ obj, (const void *) mp, cookie);
+ rte_panic("MEMPOOL: bad header cookie (audit)\n");
+ }
+ }
+ tlr = __mempool_get_trailer(obj);
+ cookie = tlr->cookie;
+ if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) {
+ RTE_LOG(CRIT, MEMPOOL,
+ "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
+ obj, (const void *) mp, cookie);
+ rte_panic("MEMPOOL: bad trailer cookie\n");
+ }
+ }
+#else
+ RTE_SET_USED(mp);
+ RTE_SET_USED(obj_table_const);
+ RTE_SET_USED(n);
+ RTE_SET_USED(free);
+#endif
+}
+
+void
+rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp,
+ void * const *first_obj_table_const, unsigned int n, int free)
+{
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ struct rte_mempool_info info;
+ const size_t total_elt_sz =
+ mp->header_size + mp->elt_size + mp->trailer_size;
+ unsigned int i, j;
+
+ rte_mempool_ops_get_info(mp, &info);
+
+ for (i = 0; i < n; ++i) {
+ void *first_obj = first_obj_table_const[i];
+
+ for (j = 0; j < info.contig_block_size; ++j) {
+ void *obj;
+
+ obj = (void *)((uintptr_t)first_obj + j * total_elt_sz);
+ rte_mempool_check_cookies(mp, &obj, 1, free);
+ }
+ }
+#else
+ RTE_SET_USED(mp);
+ RTE_SET_USED(first_obj_table_const);
+ RTE_SET_USED(n);
+ RTE_SET_USED(free);
+#endif
+}
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+static void
+mempool_obj_audit(struct rte_mempool *mp, __rte_unused void *opaque,
+ void *obj, __rte_unused unsigned idx)
+{
+ __mempool_check_cookies(mp, &obj, 1, 2);
+}
+
+static void
+mempool_audit_cookies(struct rte_mempool *mp)
+{
+ unsigned num;
+
+ num = rte_mempool_obj_iter(mp, mempool_obj_audit, NULL);
+ if (num != mp->size) {
+ rte_panic("rte_mempool_obj_iter(mempool=%p, size=%u) "
+ "iterated only over %u elements\n",
+ mp, mp->size, num);
+ }
+}
+#else
+#define mempool_audit_cookies(mp) do {} while(0)
+#endif
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic error "-Wcast-qual"
+#endif
+
+/* check cookies before and after objects */
+static void
+mempool_audit_cache(const struct rte_mempool *mp)
+{
+ /* check cache size consistency */
+ unsigned lcore_id;
+
+ if (mp->cache_size == 0)
+ return;
+
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ const struct rte_mempool_cache *cache;
+ cache = &mp->local_cache[lcore_id];
+ if (cache->len > cache->flushthresh) {
+ RTE_LOG(CRIT, MEMPOOL, "badness on cache[%u]\n",
+ lcore_id);
+ rte_panic("MEMPOOL: invalid cache len\n");
+ }
+ }
+}
+
+/* check the consistency of mempool (size, cookies, ...) */
+void
+rte_mempool_audit(struct rte_mempool *mp)
+{
+ mempool_audit_cache(mp);
+ mempool_audit_cookies(mp);
+
+ /* For case where mempool DEBUG is not set, and cache size is 0 */
+ RTE_SET_USED(mp);
+}
+
+/* dump the status of the mempool on the console */
+void
+rte_mempool_dump(FILE *f, struct rte_mempool *mp)
+{
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ struct rte_mempool_info info;
+ struct rte_mempool_debug_stats sum;
+ unsigned lcore_id;
+#endif
+ struct rte_mempool_memhdr *memhdr;
+ unsigned common_count;
+ unsigned cache_count;
+ size_t mem_len = 0;
+
+ RTE_ASSERT(f != NULL);
+ RTE_ASSERT(mp != NULL);
+
+ fprintf(f, "mempool <%s>@%p\n", mp->name, mp);
+ fprintf(f, " flags=%x\n", mp->flags);
+ fprintf(f, " pool=%p\n", mp->pool_data);
+ fprintf(f, " iova=0x%" PRIx64 "\n", mp->mz->iova);
+ fprintf(f, " nb_mem_chunks=%u\n", mp->nb_mem_chunks);
+ fprintf(f, " size=%"PRIu32"\n", mp->size);
+ fprintf(f, " populated_size=%"PRIu32"\n", mp->populated_size);
+ fprintf(f, " header_size=%"PRIu32"\n", mp->header_size);
+ fprintf(f, " elt_size=%"PRIu32"\n", mp->elt_size);
+ fprintf(f, " trailer_size=%"PRIu32"\n", mp->trailer_size);
+ fprintf(f, " total_obj_size=%"PRIu32"\n",
+ mp->header_size + mp->elt_size + mp->trailer_size);
+
+ fprintf(f, " private_data_size=%"PRIu32"\n", mp->private_data_size);
+
+ STAILQ_FOREACH(memhdr, &mp->mem_list, next)
+ mem_len += memhdr->len;
+ if (mem_len != 0) {
+ fprintf(f, " avg bytes/object=%#Lf\n",
+ (long double)mem_len / mp->size);
+ }
+
+ cache_count = rte_mempool_dump_cache(f, mp);
+ common_count = rte_mempool_ops_get_count(mp);
+ if ((cache_count + common_count) > mp->size)
+ common_count = mp->size - cache_count;
+ fprintf(f, " common_pool_count=%u\n", common_count);
+
+ /* sum and dump statistics */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ rte_mempool_ops_get_info(mp, &info);
+ memset(&sum, 0, sizeof(sum));
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ sum.put_bulk += mp->stats[lcore_id].put_bulk;
+ sum.put_objs += mp->stats[lcore_id].put_objs;
+ sum.get_success_bulk += mp->stats[lcore_id].get_success_bulk;
+ sum.get_success_objs += mp->stats[lcore_id].get_success_objs;
+ sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk;
+ sum.get_fail_objs += mp->stats[lcore_id].get_fail_objs;
+ sum.get_success_blks += mp->stats[lcore_id].get_success_blks;
+ sum.get_fail_blks += mp->stats[lcore_id].get_fail_blks;
+ }
+ fprintf(f, " stats:\n");
+ fprintf(f, " put_bulk=%"PRIu64"\n", sum.put_bulk);
+ fprintf(f, " put_objs=%"PRIu64"\n", sum.put_objs);
+ fprintf(f, " get_success_bulk=%"PRIu64"\n", sum.get_success_bulk);
+ fprintf(f, " get_success_objs=%"PRIu64"\n", sum.get_success_objs);
+ fprintf(f, " get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk);
+ fprintf(f, " get_fail_objs=%"PRIu64"\n", sum.get_fail_objs);
+ if (info.contig_block_size > 0) {
+ fprintf(f, " get_success_blks=%"PRIu64"\n",
+ sum.get_success_blks);
+ fprintf(f, " get_fail_blks=%"PRIu64"\n", sum.get_fail_blks);
+ }
+#else
+ fprintf(f, " no statistics available\n");
+#endif
+
+ rte_mempool_audit(mp);
+}
+
+/* dump the status of all mempools on the console */
+void
+rte_mempool_list_dump(FILE *f)
+{
+ struct rte_mempool *mp = NULL;
+ struct rte_tailq_entry *te;
+ struct rte_mempool_list *mempool_list;
+
+ mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
+
+ rte_mcfg_mempool_read_lock();
+
+ TAILQ_FOREACH(te, mempool_list, next) {
+ mp = (struct rte_mempool *) te->data;
+ rte_mempool_dump(f, mp);
+ }
+
+ rte_mcfg_mempool_read_unlock();
+}
+
+/* search a mempool from its name */
+struct rte_mempool *
+rte_mempool_lookup(const char *name)
+{
+ struct rte_mempool *mp = NULL;
+ struct rte_tailq_entry *te;
+ struct rte_mempool_list *mempool_list;
+
+ mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
+
+ rte_mcfg_mempool_read_lock();
+
+ TAILQ_FOREACH(te, mempool_list, next) {
+ mp = (struct rte_mempool *) te->data;
+ if (strncmp(name, mp->name, RTE_MEMPOOL_NAMESIZE) == 0)
+ break;
+ }
+
+ rte_mcfg_mempool_read_unlock();
+
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+
+ return mp;
+}
+
+void rte_mempool_walk(void (*func)(struct rte_mempool *, void *),
+ void *arg)
+{
+ struct rte_tailq_entry *te = NULL;
+ struct rte_mempool_list *mempool_list;
+ void *tmp_te;
+
+ mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
+
+ rte_mcfg_mempool_read_lock();
+
+ TAILQ_FOREACH_SAFE(te, mempool_list, next, tmp_te) {
+ (*func)((struct rte_mempool *) te->data, arg);
+ }
+
+ rte_mcfg_mempool_read_unlock();
+}
diff --git a/src/spdk/dpdk/lib/librte_mempool/rte_mempool.h b/src/spdk/dpdk/lib/librte_mempool/rte_mempool.h
new file mode 100644
index 000000000..652d19f9f
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_mempool/rte_mempool.h
@@ -0,0 +1,1811 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation.
+ * Copyright(c) 2016 6WIND S.A.
+ */
+
+#ifndef _RTE_MEMPOOL_H_
+#define _RTE_MEMPOOL_H_
+
+/**
+ * @file
+ * RTE Mempool.
+ *
+ * A memory pool is an allocator of fixed-size object. It is
+ * identified by its name, and uses a ring to store free objects. It
+ * provides some other optional services, like a per-core object
+ * cache, and an alignment helper to ensure that objects are padded
+ * to spread them equally on all RAM channels, ranks, and so on.
+ *
+ * Objects owned by a mempool should never be added in another
+ * mempool. When an object is freed using rte_mempool_put() or
+ * equivalent, the object data is not modified; the user can save some
+ * meta-data in the object data and retrieve them when allocating a
+ * new object.
+ *
+ * Note: the mempool implementation is not preemptible. An lcore must not be
+ * interrupted by another task that uses the same mempool (because it uses a
+ * ring which is not preemptible). Also, usual mempool functions like
+ * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL
+ * thread due to the internal per-lcore cache. Due to the lack of caching,
+ * rte_mempool_get() or rte_mempool_put() performance will suffer when called
+ * by non-EAL threads. Instead, non-EAL threads should call
+ * rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache
+ * created with rte_mempool_cache_create().
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <rte_config.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_lcore.h>
+#include <rte_memory.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_memcpy.h>
+#include <rte_common.h>
+
+#include "rte_mempool_trace_fp.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL /**< Header cookie. */
+#define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL /**< Header cookie. */
+#define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+/**
+ * A structure that stores the mempool statistics (per-lcore).
+ */
+struct rte_mempool_debug_stats {
+ uint64_t put_bulk; /**< Number of puts. */
+ uint64_t put_objs; /**< Number of objects successfully put. */
+ uint64_t get_success_bulk; /**< Successful allocation number. */
+ uint64_t get_success_objs; /**< Objects successfully allocated. */
+ uint64_t get_fail_bulk; /**< Failed allocation number. */
+ uint64_t get_fail_objs; /**< Objects that failed to be allocated. */
+ /** Successful allocation number of contiguous blocks. */
+ uint64_t get_success_blks;
+ /** Failed allocation number of contiguous blocks. */
+ uint64_t get_fail_blks;
+} __rte_cache_aligned;
+#endif
+
+/**
+ * A structure that stores a per-core object cache.
+ */
+struct rte_mempool_cache {
+ uint32_t size; /**< Size of the cache */
+ uint32_t flushthresh; /**< Threshold before we flush excess elements */
+ uint32_t len; /**< Current cache count */
+ /*
+ * Cache is allocated to this size to allow it to overflow in certain
+ * cases to avoid needless emptying of cache.
+ */
+ void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */
+} __rte_cache_aligned;
+
+/**
+ * A structure that stores the size of mempool elements.
+ */
+struct rte_mempool_objsz {
+ uint32_t elt_size; /**< Size of an element. */
+ uint32_t header_size; /**< Size of header (before elt). */
+ uint32_t trailer_size; /**< Size of trailer (after elt). */
+ uint32_t total_size;
+ /**< Total size of an object (header + elt + trailer). */
+};
+
+/**< Maximum length of a memory pool's name. */
+#define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
+ sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
+#define RTE_MEMPOOL_MZ_PREFIX "MP_"
+
+/* "MP_<name>" */
+#define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
+
+#define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
+
+/** Mempool over one chunk of physically continuous memory */
+#define MEMPOOL_PG_NUM_DEFAULT 1
+
+#ifndef RTE_MEMPOOL_ALIGN
+/**
+ * Alignment of elements inside mempool.
+ */
+#define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
+#endif
+
+#define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
+
+/**
+ * Mempool object header structure
+ *
+ * Each object stored in mempools are prefixed by this header structure,
+ * it allows to retrieve the mempool pointer from the object and to
+ * iterate on all objects attached to a mempool. When debug is enabled,
+ * a cookie is also added in this structure preventing corruptions and
+ * double-frees.
+ */
+struct rte_mempool_objhdr {
+ STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */
+ struct rte_mempool *mp; /**< The mempool owning the object. */
+ RTE_STD_C11
+ union {
+ rte_iova_t iova; /**< IO address of the object. */
+ phys_addr_t physaddr; /**< deprecated - Physical address of the object. */
+ };
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ uint64_t cookie; /**< Debug cookie. */
+#endif
+};
+
+/**
+ * A list of object headers type
+ */
+STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+
+/**
+ * Mempool object trailer structure
+ *
+ * In debug mode, each object stored in mempools are suffixed by this
+ * trailer structure containing a cookie preventing memory corruptions.
+ */
+struct rte_mempool_objtlr {
+ uint64_t cookie; /**< Debug cookie. */
+};
+
+#endif
+
+/**
+ * A list of memory where objects are stored
+ */
+STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
+
+/**
+ * Callback used to free a memory chunk
+ */
+typedef void (rte_mempool_memchunk_free_cb_t)(struct rte_mempool_memhdr *memhdr,
+ void *opaque);
+
+/**
+ * Mempool objects memory header structure
+ *
+ * The memory chunks where objects are stored. Each chunk is virtually
+ * and physically contiguous.
+ */
+struct rte_mempool_memhdr {
+ STAILQ_ENTRY(rte_mempool_memhdr) next; /**< Next in list. */
+ struct rte_mempool *mp; /**< The mempool owning the chunk */
+ void *addr; /**< Virtual address of the chunk */
+ RTE_STD_C11
+ union {
+ rte_iova_t iova; /**< IO address of the chunk */
+ phys_addr_t phys_addr; /**< Physical address of the chunk */
+ };
+ size_t len; /**< length of the chunk */
+ rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */
+ void *opaque; /**< Argument passed to the free callback */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Additional information about the mempool
+ *
+ * The structure is cache-line aligned to avoid ABI breakages in
+ * a number of cases when something small is added.
+ */
+struct rte_mempool_info {
+ /** Number of objects in the contiguous block */
+ unsigned int contig_block_size;
+} __rte_cache_aligned;
+
+/**
+ * The RTE mempool structure.
+ */
+struct rte_mempool {
+ /*
+ * Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI
+ * compatibility requirements, it could be changed to
+ * RTE_MEMPOOL_NAMESIZE next time the ABI changes
+ */
+ char name[RTE_MEMZONE_NAMESIZE]; /**< Name of mempool. */
+ RTE_STD_C11
+ union {
+ void *pool_data; /**< Ring or pool to store objects. */
+ uint64_t pool_id; /**< External mempool identifier. */
+ };
+ void *pool_config; /**< optional args for ops alloc. */
+ const struct rte_memzone *mz; /**< Memzone where pool is alloc'd. */
+ unsigned int flags; /**< Flags of the mempool. */
+ int socket_id; /**< Socket id passed at create. */
+ uint32_t size; /**< Max size of the mempool. */
+ uint32_t cache_size;
+ /**< Size of per-lcore default local cache. */
+
+ uint32_t elt_size; /**< Size of an element. */
+ uint32_t header_size; /**< Size of header (before elt). */
+ uint32_t trailer_size; /**< Size of trailer (after elt). */
+
+ unsigned private_data_size; /**< Size of private data. */
+ /**
+ * Index into rte_mempool_ops_table array of mempool ops
+ * structs, which contain callback function pointers.
+ * We're using an index here rather than pointers to the callbacks
+ * to facilitate any secondary processes that may want to use
+ * this mempool.
+ */
+ int32_t ops_index;
+
+ struct rte_mempool_cache *local_cache; /**< Per-lcore local cache */
+
+ uint32_t populated_size; /**< Number of populated objects. */
+ struct rte_mempool_objhdr_list elt_list; /**< List of objects in pool */
+ uint32_t nb_mem_chunks; /**< Number of memory chunks */
+ struct rte_mempool_memhdr_list mem_list; /**< List of memory chunks */
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ /** Per-lcore statistics. */
+ struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
+#endif
+} __rte_cache_aligned;
+
+#define MEMPOOL_F_NO_SPREAD 0x0001
+ /**< Spreading among memory channels not required. */
+#define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
+#define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
+#define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
+#define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */
+#define MEMPOOL_F_NO_IOVA_CONTIG 0x0020 /**< Don't need IOVA contiguous objs. */
+#define MEMPOOL_F_NO_PHYS_CONTIG MEMPOOL_F_NO_IOVA_CONTIG /* deprecated */
+
+/**
+ * @internal When debug is enabled, store some statistics.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param name
+ * Name of the statistics field to increment in the memory pool.
+ * @param n
+ * Number to add to the object-oriented statistics.
+ */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+#define __MEMPOOL_STAT_ADD(mp, name, n) do { \
+ unsigned __lcore_id = rte_lcore_id(); \
+ if (__lcore_id < RTE_MAX_LCORE) { \
+ mp->stats[__lcore_id].name##_objs += n; \
+ mp->stats[__lcore_id].name##_bulk += 1; \
+ } \
+ } while(0)
+#define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do { \
+ unsigned int __lcore_id = rte_lcore_id(); \
+ if (__lcore_id < RTE_MAX_LCORE) { \
+ mp->stats[__lcore_id].name##_blks += n; \
+ mp->stats[__lcore_id].name##_bulk += 1; \
+ } \
+ } while (0)
+#else
+#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
+#define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do {} while (0)
+#endif
+
+/**
+ * Calculate the size of the mempool header.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param cs
+ * Size of the per-lcore cache.
+ */
+#define MEMPOOL_HEADER_SIZE(mp, cs) \
+ (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
+ (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
+
+/* return the header of a mempool object (internal) */
+static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj)
+{
+ return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
+ sizeof(struct rte_mempool_objhdr));
+}
+
+/**
+ * Return a pointer to the mempool owning this object.
+ *
+ * @param obj
+ * An object that is owned by a pool. If this is not the case,
+ * the behavior is undefined.
+ * @return
+ * A pointer to the mempool structure.
+ */
+static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
+{
+ struct rte_mempool_objhdr *hdr = __mempool_get_header(obj);
+ return hdr->mp;
+}
+
+/* return the trailer of a mempool object (internal) */
+static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj)
+{
+ struct rte_mempool *mp = rte_mempool_from_obj(obj);
+ return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
+}
+
+/**
+ * @internal Check and update cookies or panic.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param obj_table_const
+ * Pointer to a table of void * pointers (objects).
+ * @param n
+ * Index of object in object table.
+ * @param free
+ * - 0: object is supposed to be allocated, mark it as free
+ * - 1: object is supposed to be free, mark it as allocated
+ * - 2: just check that cookie is valid (free or allocated)
+ */
+void rte_mempool_check_cookies(const struct rte_mempool *mp,
+ void * const *obj_table_const, unsigned n, int free);
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+#define __mempool_check_cookies(mp, obj_table_const, n, free) \
+ rte_mempool_check_cookies(mp, obj_table_const, n, free)
+#else
+#define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
+#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @internal Check contiguous object blocks and update cookies or panic.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param first_obj_table_const
+ * Pointer to a table of void * pointers (first object of the contiguous
+ * object blocks).
+ * @param n
+ * Number of contiguous object blocks.
+ * @param free
+ * - 0: object is supposed to be allocated, mark it as free
+ * - 1: object is supposed to be free, mark it as allocated
+ * - 2: just check that cookie is valid (free or allocated)
+ */
+void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp,
+ void * const *first_obj_table_const, unsigned int n, int free);
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+#define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
+ free) \
+ rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
+ free)
+#else
+#define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
+ free) \
+ do {} while (0)
+#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
+
+#define RTE_MEMPOOL_OPS_NAMESIZE 32 /**< Max length of ops struct name. */
+
+/**
+ * Prototype for implementation specific data provisioning function.
+ *
+ * The function should provide the implementation specific memory for
+ * use by the other mempool ops functions in a given mempool ops struct.
+ * E.g. the default ops provides an instance of the rte_ring for this purpose.
+ * it will most likely point to a different type of data structure, and
+ * will be transparent to the application programmer.
+ * This function should set mp->pool_data.
+ */
+typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
+
+/**
+ * Free the opaque private data pointed to by mp->pool_data pointer.
+ */
+typedef void (*rte_mempool_free_t)(struct rte_mempool *mp);
+
+/**
+ * Enqueue an object into the external pool.
+ */
+typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp,
+ void * const *obj_table, unsigned int n);
+
+/**
+ * Dequeue an object from the external pool.
+ */
+typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
+ void **obj_table, unsigned int n);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dequeue a number of contiguous object blocks from the external pool.
+ */
+typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp,
+ void **first_obj_table, unsigned int n);
+
+/**
+ * Return the number of available objects in the external pool.
+ */
+typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
+
+/**
+ * Calculate memory size required to store given number of objects.
+ *
+ * If mempool objects are not required to be IOVA-contiguous
+ * (the flag MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines
+ * virtually contiguous chunk size. Otherwise, if mempool objects must
+ * be IOVA-contiguous (the flag MEMPOOL_F_NO_IOVA_CONTIG is clear),
+ * min_chunk_size defines IOVA-contiguous chunk size.
+ *
+ * @param[in] mp
+ * Pointer to the memory pool.
+ * @param[in] obj_num
+ * Number of objects.
+ * @param[in] pg_shift
+ * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
+ * @param[out] min_chunk_size
+ * Location for minimum size of the memory chunk which may be used to
+ * store memory pool objects.
+ * @param[out] align
+ * Location for required memory chunk alignment.
+ * @return
+ * Required memory size.
+ */
+typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @internal Helper to calculate memory size required to store given
+ * number of objects.
+ *
+ * This function is internal to mempool library and mempool drivers.
+ *
+ * If page boundaries may be ignored, it is just a product of total
+ * object size including header and trailer and number of objects.
+ * Otherwise, it is a number of pages required to store given number of
+ * objects without crossing page boundary.
+ *
+ * Note that if object size is bigger than page size, then it assumes
+ * that pages are grouped in subsets of physically continuous pages big
+ * enough to store at least one object.
+ *
+ * Minimum size of memory chunk is the total element size.
+ * Required memory chunk alignment is the cache line size.
+ *
+ * @param[in] mp
+ * A pointer to the mempool structure.
+ * @param[in] obj_num
+ * Number of objects to be added in mempool.
+ * @param[in] pg_shift
+ * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
+ * @param[in] chunk_reserve
+ * Amount of memory that must be reserved at the beginning of each page,
+ * or at the beginning of the memory area if pg_shift is 0.
+ * @param[out] min_chunk_size
+ * Location for minimum size of the memory chunk which may be used to
+ * store memory pool objects.
+ * @param[out] align
+ * Location for required memory chunk alignment.
+ * @return
+ * Required memory size.
+ */
+__rte_experimental
+ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift, size_t chunk_reserve,
+ size_t *min_chunk_size, size_t *align);
+
+/**
+ * Default way to calculate memory size required to store given number of
+ * objects.
+ *
+ * Equivalent to rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
+ * 0, min_chunk_size, align).
+ */
+ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align);
+
+/**
+ * Function to be called for each populated object.
+ *
+ * @param[in] mp
+ * A pointer to the mempool structure.
+ * @param[in] opaque
+ * An opaque pointer passed to iterator.
+ * @param[in] vaddr
+ * Object virtual address.
+ * @param[in] iova
+ * Input/output virtual address of the object or RTE_BAD_IOVA.
+ */
+typedef void (rte_mempool_populate_obj_cb_t)(struct rte_mempool *mp,
+ void *opaque, void *vaddr, rte_iova_t iova);
+
+/**
+ * Populate memory pool objects using provided memory chunk.
+ *
+ * Populated objects should be enqueued to the pool, e.g. using
+ * rte_mempool_ops_enqueue_bulk().
+ *
+ * If the given IO address is unknown (iova = RTE_BAD_IOVA),
+ * the chunk doesn't need to be physically contiguous (only virtually),
+ * and allocated objects may span two pages.
+ *
+ * @param[in] mp
+ * A pointer to the mempool structure.
+ * @param[in] max_objs
+ * Maximum number of objects to be populated.
+ * @param[in] vaddr
+ * The virtual address of memory that should be used to store objects.
+ * @param[in] iova
+ * The IO address
+ * @param[in] len
+ * The length of memory in bytes.
+ * @param[in] obj_cb
+ * Callback function to be executed for each populated object.
+ * @param[in] obj_cb_arg
+ * An opaque pointer passed to the callback function.
+ * @return
+ * The number of objects added on success.
+ * On error, no objects are populated and a negative errno is returned.
+ */
+typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp,
+ unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
+
+/**
+ * Align objects on addresses multiple of total_elt_sz.
+ */
+#define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @internal Helper to populate memory pool object using provided memory
+ * chunk: just slice objects one by one, taking care of not
+ * crossing page boundaries.
+ *
+ * If RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ is set in flags, the addresses
+ * of object headers will be aligned on a multiple of total_elt_sz.
+ * This feature is used by octeontx hardware.
+ *
+ * This function is internal to mempool library and mempool drivers.
+ *
+ * @param[in] mp
+ * A pointer to the mempool structure.
+ * @param[in] flags
+ * Logical OR of following flags:
+ * - RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ: align objects on addresses
+ * multiple of total_elt_sz.
+ * @param[in] max_objs
+ * Maximum number of objects to be added in mempool.
+ * @param[in] vaddr
+ * The virtual address of memory that should be used to store objects.
+ * @param[in] iova
+ * The IO address corresponding to vaddr, or RTE_BAD_IOVA.
+ * @param[in] len
+ * The length of memory in bytes.
+ * @param[in] obj_cb
+ * Callback function to be executed for each populated object.
+ * @param[in] obj_cb_arg
+ * An opaque pointer passed to the callback function.
+ * @return
+ * The number of objects added in mempool.
+ */
+__rte_experimental
+int rte_mempool_op_populate_helper(struct rte_mempool *mp,
+ unsigned int flags, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
+
+/**
+ * Default way to populate memory pool object using provided memory chunk.
+ *
+ * Equivalent to rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, iova,
+ * len, obj_cb, obj_cb_arg).
+ */
+int rte_mempool_op_populate_default(struct rte_mempool *mp,
+ unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get some additional information about a mempool.
+ */
+typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp,
+ struct rte_mempool_info *info);
+
+
+/** Structure defining mempool operations structure */
+struct rte_mempool_ops {
+ char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */
+ rte_mempool_alloc_t alloc; /**< Allocate private data. */
+ rte_mempool_free_t free; /**< Free the external pool. */
+ rte_mempool_enqueue_t enqueue; /**< Enqueue an object. */
+ rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */
+ rte_mempool_get_count get_count; /**< Get qty of available objs. */
+ /**
+ * Optional callback to calculate memory size required to
+ * store specified number of objects.
+ */
+ rte_mempool_calc_mem_size_t calc_mem_size;
+ /**
+ * Optional callback to populate mempool objects using
+ * provided memory chunk.
+ */
+ rte_mempool_populate_t populate;
+ /**
+ * Get mempool info
+ */
+ rte_mempool_get_info_t get_info;
+ /**
+ * Dequeue a number of contiguous object blocks.
+ */
+ rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks;
+} __rte_cache_aligned;
+
+#define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */
+
+/**
+ * Structure storing the table of registered ops structs, each of which contain
+ * the function pointers for the mempool ops functions.
+ * Each process has its own storage for this ops struct array so that
+ * the mempools can be shared across primary and secondary processes.
+ * The indices used to access the array are valid across processes, whereas
+ * any function pointers stored directly in the mempool struct would not be.
+ * This results in us simply having "ops_index" in the mempool struct.
+ */
+struct rte_mempool_ops_table {
+ rte_spinlock_t sl; /**< Spinlock for add/delete. */
+ uint32_t num_ops; /**< Number of used ops structs in the table. */
+ /**
+ * Storage for all possible ops structs.
+ */
+ struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX];
+} __rte_cache_aligned;
+
+/** Array of registered ops structs. */
+extern struct rte_mempool_ops_table rte_mempool_ops_table;
+
+/**
+ * @internal Get the mempool ops struct from its index.
+ *
+ * @param ops_index
+ * The index of the ops struct in the ops struct table. It must be a valid
+ * index: (0 <= idx < num_ops).
+ * @return
+ * The pointer to the ops struct in the table.
+ */
+static inline struct rte_mempool_ops *
+rte_mempool_get_ops(int ops_index)
+{
+ RTE_VERIFY((ops_index >= 0) && (ops_index < RTE_MEMPOOL_MAX_OPS_IDX));
+
+ return &rte_mempool_ops_table.ops[ops_index];
+}
+
+/**
+ * @internal Wrapper for mempool_ops alloc callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @return
+ * - 0: Success; successfully allocated mempool pool_data.
+ * - <0: Error; code of alloc function.
+ */
+int
+rte_mempool_ops_alloc(struct rte_mempool *mp);
+
+/**
+ * @internal Wrapper for mempool_ops dequeue callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param obj_table
+ * Pointer to a table of void * pointers (objects).
+ * @param n
+ * Number of objects to get.
+ * @return
+ * - 0: Success; got n objects.
+ * - <0: Error; code of dequeue function.
+ */
+static inline int
+rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
+ void **obj_table, unsigned n)
+{
+ struct rte_mempool_ops *ops;
+
+ rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
+ ops = rte_mempool_get_ops(mp->ops_index);
+ return ops->dequeue(mp, obj_table, n);
+}
+
+/**
+ * @internal Wrapper for mempool_ops dequeue_contig_blocks callback.
+ *
+ * @param[in] mp
+ * Pointer to the memory pool.
+ * @param[out] first_obj_table
+ * Pointer to a table of void * pointers (first objects).
+ * @param[in] n
+ * Number of blocks to get.
+ * @return
+ * - 0: Success; got n objects.
+ * - <0: Error; code of dequeue function.
+ */
+static inline int
+rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp,
+ void **first_obj_table, unsigned int n)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+ RTE_ASSERT(ops->dequeue_contig_blocks != NULL);
+ rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n);
+ return ops->dequeue_contig_blocks(mp, first_obj_table, n);
+}
+
+/**
+ * @internal wrapper for mempool_ops enqueue callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param obj_table
+ * Pointer to a table of void * pointers (objects).
+ * @param n
+ * Number of objects to put.
+ * @return
+ * - 0: Success; n objects supplied.
+ * - <0: Error; code of enqueue function.
+ */
+static inline int
+rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+{
+ struct rte_mempool_ops *ops;
+
+ rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
+ ops = rte_mempool_get_ops(mp->ops_index);
+ return ops->enqueue(mp, obj_table, n);
+}
+
+/**
+ * @internal wrapper for mempool_ops get_count callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @return
+ * The number of available objects in the external pool.
+ */
+unsigned
+rte_mempool_ops_get_count(const struct rte_mempool *mp);
+
+/**
+ * @internal wrapper for mempool_ops calc_mem_size callback.
+ * API to calculate size of memory required to store specified number of
+ * object.
+ *
+ * @param[in] mp
+ * Pointer to the memory pool.
+ * @param[in] obj_num
+ * Number of objects.
+ * @param[in] pg_shift
+ * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
+ * @param[out] min_chunk_size
+ * Location for minimum size of the memory chunk which may be used to
+ * store memory pool objects.
+ * @param[out] align
+ * Location for required memory chunk alignment.
+ * @return
+ * Required memory size aligned at page boundary.
+ */
+ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align);
+
+/**
+ * @internal wrapper for mempool_ops populate callback.
+ *
+ * Populate memory pool objects using provided memory chunk.
+ *
+ * @param[in] mp
+ * A pointer to the mempool structure.
+ * @param[in] max_objs
+ * Maximum number of objects to be populated.
+ * @param[in] vaddr
+ * The virtual address of memory that should be used to store objects.
+ * @param[in] iova
+ * The IO address
+ * @param[in] len
+ * The length of memory in bytes.
+ * @param[in] obj_cb
+ * Callback function to be executed for each populated object.
+ * @param[in] obj_cb_arg
+ * An opaque pointer passed to the callback function.
+ * @return
+ * The number of objects added on success.
+ * On error, no objects are populated and a negative errno is returned.
+ */
+int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb,
+ void *obj_cb_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Wrapper for mempool_ops get_info callback.
+ *
+ * @param[in] mp
+ * Pointer to the memory pool.
+ * @param[out] info
+ * Pointer to the rte_mempool_info structure
+ * @return
+ * - 0: Success; The mempool driver supports retrieving supplementary
+ * mempool information
+ * - -ENOTSUP - doesn't support get_info ops (valid case).
+ */
+__rte_experimental
+int rte_mempool_ops_get_info(const struct rte_mempool *mp,
+ struct rte_mempool_info *info);
+
+/**
+ * @internal wrapper for mempool_ops free callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ */
+void
+rte_mempool_ops_free(struct rte_mempool *mp);
+
+/**
+ * Set the ops of a mempool.
+ *
+ * This can only be done on a mempool that is not populated, i.e. just after
+ * a call to rte_mempool_create_empty().
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param name
+ * Name of the ops structure to use for this mempool.
+ * @param pool_config
+ * Opaque data that can be passed by the application to the ops functions.
+ * @return
+ * - 0: Success; the mempool is now using the requested ops functions.
+ * - -EINVAL - Invalid ops struct name provided.
+ * - -EEXIST - mempool already has an ops struct assigned.
+ */
+int
+rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
+ void *pool_config);
+
+/**
+ * Register mempool operations.
+ *
+ * @param ops
+ * Pointer to an ops structure to register.
+ * @return
+ * - >=0: Success; return the index of the ops struct in the table.
+ * - -EINVAL - some missing callbacks while registering ops struct.
+ * - -ENOSPC - the maximum number of ops structs has been reached.
+ */
+int rte_mempool_register_ops(const struct rte_mempool_ops *ops);
+
+/**
+ * Macro to statically register the ops of a mempool handler.
+ * Note that the rte_mempool_register_ops fails silently here when
+ * more than RTE_MEMPOOL_MAX_OPS_IDX is registered.
+ */
+#define MEMPOOL_REGISTER_OPS(ops) \
+ RTE_INIT(mp_hdlr_init_##ops) \
+ { \
+ rte_mempool_register_ops(&ops); \
+ }
+
+/**
+ * An object callback function for mempool.
+ *
+ * Used by rte_mempool_create() and rte_mempool_obj_iter().
+ */
+typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
+ void *opaque, void *obj, unsigned obj_idx);
+typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
+
+/**
+ * A memory callback function for mempool.
+ *
+ * Used by rte_mempool_mem_iter().
+ */
+typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp,
+ void *opaque, struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx);
+
+/**
+ * A mempool constructor callback function.
+ *
+ * Arguments are the mempool and the opaque pointer given by the user in
+ * rte_mempool_create().
+ */
+typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
+
+/**
+ * Create a new mempool named *name* in memory.
+ *
+ * This function uses ``rte_memzone_reserve()`` to allocate memory. The
+ * pool contains n elements of elt_size. Its size is set to n.
+ *
+ * @param name
+ * The name of the mempool.
+ * @param n
+ * The number of elements in the mempool. The optimum size (in terms of
+ * memory usage) for a mempool is when n is a power of two minus one:
+ * n = (2^q - 1).
+ * @param elt_size
+ * The size of each element.
+ * @param cache_size
+ * If cache_size is non-zero, the rte_mempool library will try to
+ * limit the accesses to the common lockless pool, by maintaining a
+ * per-lcore object cache. This argument must be lower or equal to
+ * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
+ * cache_size to have "n modulo cache_size == 0": if this is
+ * not the case, some elements will always stay in the pool and will
+ * never be used. The access to the per-lcore table is of course
+ * faster than the multi-producer/consumer pool. The cache can be
+ * disabled if the cache_size argument is set to 0; it can be useful to
+ * avoid losing objects in cache.
+ * @param private_data_size
+ * The size of the private data appended after the mempool
+ * structure. This is useful for storing some private data after the
+ * mempool structure, as is done for rte_mbuf_pool for example.
+ * @param mp_init
+ * A function pointer that is called for initialization of the pool,
+ * before object initialization. The user can initialize the private
+ * data in this function if needed. This parameter can be NULL if
+ * not needed.
+ * @param mp_init_arg
+ * An opaque pointer to data that can be used in the mempool
+ * constructor function.
+ * @param obj_init
+ * A function pointer that is called for each object at
+ * initialization of the pool. The user can set some meta data in
+ * objects if needed. This parameter can be NULL if not needed.
+ * The obj_init() function takes the mempool pointer, the init_arg,
+ * the object pointer and the object number as parameters.
+ * @param obj_init_arg
+ * An opaque pointer to data that can be used as an argument for
+ * each call to the object constructor function.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in the case of
+ * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
+ * constraint for the reserved zone.
+ * @param flags
+ * The *flags* arguments is an OR of following flags:
+ * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
+ * between channels in RAM: the pool allocator will add padding
+ * between objects depending on the hardware configuration. See
+ * Memory alignment constraints for details. If this flag is set,
+ * the allocator will just align them to a cache line.
+ * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
+ * cache-aligned. This flag removes this constraint, and no
+ * padding will be present between objects. This flag implies
+ * MEMPOOL_F_NO_SPREAD.
+ * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
+ * when using rte_mempool_put() or rte_mempool_put_bulk() is
+ * "single-producer". Otherwise, it is "multi-producers".
+ * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
+ * when using rte_mempool_get() or rte_mempool_get_bulk() is
+ * "single-consumer". Otherwise, it is "multi-consumers".
+ * - MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't
+ * necessarily be contiguous in IO memory.
+ * @return
+ * The pointer to the new allocated mempool, on success. NULL on error
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - EINVAL - cache size provided is too large
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_mempool *
+rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags);
+
+/**
+ * Create an empty mempool
+ *
+ * The mempool is allocated and initialized, but it is not populated: no
+ * memory is allocated for the mempool elements. The user has to call
+ * rte_mempool_populate_*() to add memory chunks to the pool. Once
+ * populated, the user may also want to initialize each object with
+ * rte_mempool_obj_iter().
+ *
+ * @param name
+ * The name of the mempool.
+ * @param n
+ * The maximum number of elements that can be added in the mempool.
+ * The optimum size (in terms of memory usage) for a mempool is when n
+ * is a power of two minus one: n = (2^q - 1).
+ * @param elt_size
+ * The size of each element.
+ * @param cache_size
+ * Size of the cache. See rte_mempool_create() for details.
+ * @param private_data_size
+ * The size of the private data appended after the mempool
+ * structure. This is useful for storing some private data after the
+ * mempool structure, as is done for rte_mbuf_pool for example.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in the case of
+ * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
+ * constraint for the reserved zone.
+ * @param flags
+ * Flags controlling the behavior of the mempool. See
+ * rte_mempool_create() for details.
+ * @return
+ * The pointer to the new allocated mempool, on success. NULL on error
+ * with rte_errno set appropriately. See rte_mempool_create() for details.
+ */
+struct rte_mempool *
+rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ int socket_id, unsigned flags);
+/**
+ * Free a mempool
+ *
+ * Unlink the mempool from global list, free the memory chunks, and all
+ * memory referenced by the mempool. The objects must not be used by
+ * other cores as they will be freed.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ */
+void
+rte_mempool_free(struct rte_mempool *mp);
+
+/**
+ * Add physically contiguous memory for objects in the pool at init
+ *
+ * Add a virtually and physically contiguous memory chunk in the pool
+ * where objects can be instantiated.
+ *
+ * If the given IO address is unknown (iova = RTE_BAD_IOVA),
+ * the chunk doesn't need to be physically contiguous (only virtually),
+ * and allocated objects may span two pages.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param vaddr
+ * The virtual address of memory that should be used to store objects.
+ * @param iova
+ * The IO address
+ * @param len
+ * The length of memory in bytes.
+ * @param free_cb
+ * The callback used to free this chunk when destroying the mempool.
+ * @param opaque
+ * An opaque argument passed to free_cb.
+ * @return
+ * The number of objects added on success (strictly positive).
+ * On error, the chunk is not added in the memory list of the
+ * mempool the following code is returned:
+ * (0): not enough room in chunk for one object.
+ * (-ENOSPC): mempool is already populated.
+ * (-ENOMEM): allocation failure.
+ */
+int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
+ rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque);
+
+/**
+ * Add virtually contiguous memory for objects in the pool at init
+ *
+ * Add a virtually contiguous memory chunk in the pool where objects can
+ * be instantiated.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param addr
+ * The virtual address of memory that should be used to store objects.
+ * @param len
+ * The length of memory in bytes.
+ * @param pg_sz
+ * The size of memory pages in this virtual area.
+ * @param free_cb
+ * The callback used to free this chunk when destroying the mempool.
+ * @param opaque
+ * An opaque argument passed to free_cb.
+ * @return
+ * The number of objects added on success (strictly positive).
+ * On error, the chunk is not added in the memory list of the
+ * mempool the following code is returned:
+ * (0): not enough room in chunk for one object.
+ * (-ENOSPC): mempool is already populated.
+ * (-ENOMEM): allocation failure.
+ */
+int
+rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
+ size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque);
+
+/**
+ * Add memory for objects in the pool at init
+ *
+ * This is the default function used by rte_mempool_create() to populate
+ * the mempool. It adds memory allocated using rte_memzone_reserve().
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * The number of objects added on success.
+ * On error, the chunk is not added in the memory list of the
+ * mempool and a negative errno is returned.
+ */
+int rte_mempool_populate_default(struct rte_mempool *mp);
+
+/**
+ * Add memory from anonymous mapping for objects in the pool at init
+ *
+ * This function mmap an anonymous memory zone that is locked in
+ * memory to store the objects of the mempool.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * The number of objects added on success.
+ * On error, 0 is returned, rte_errno is set, and the chunk is not added in
+ * the memory list of the mempool.
+ */
+int rte_mempool_populate_anon(struct rte_mempool *mp);
+
+/**
+ * Call a function for each mempool element
+ *
+ * Iterate across all objects attached to a rte_mempool and call the
+ * callback function on it.
+ *
+ * @param mp
+ * A pointer to an initialized mempool.
+ * @param obj_cb
+ * A function pointer that is called for each object.
+ * @param obj_cb_arg
+ * An opaque pointer passed to the callback function.
+ * @return
+ * Number of objects iterated.
+ */
+uint32_t rte_mempool_obj_iter(struct rte_mempool *mp,
+ rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
+
+/**
+ * Call a function for each mempool memory chunk
+ *
+ * Iterate across all memory chunks attached to a rte_mempool and call
+ * the callback function on it.
+ *
+ * @param mp
+ * A pointer to an initialized mempool.
+ * @param mem_cb
+ * A function pointer that is called for each memory chunk.
+ * @param mem_cb_arg
+ * An opaque pointer passed to the callback function.
+ * @return
+ * Number of memory chunks iterated.
+ */
+uint32_t rte_mempool_mem_iter(struct rte_mempool *mp,
+ rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
+
+/**
+ * Dump the status of the mempool to a file.
+ *
+ * @param f
+ * A pointer to a file for output
+ * @param mp
+ * A pointer to the mempool structure.
+ */
+void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
+
+/**
+ * Create a user-owned mempool cache.
+ *
+ * This can be used by non-EAL threads to enable caching when they
+ * interact with a mempool.
+ *
+ * @param size
+ * The size of the mempool cache. See rte_mempool_create()'s cache_size
+ * parameter description for more information. The same limits and
+ * considerations apply here too.
+ * @param socket_id
+ * The socket identifier in the case of NUMA. The value can be
+ * SOCKET_ID_ANY if there is no NUMA constraint for the reserved zone.
+ */
+struct rte_mempool_cache *
+rte_mempool_cache_create(uint32_t size, int socket_id);
+
+/**
+ * Free a user-owned mempool cache.
+ *
+ * @param cache
+ * A pointer to the mempool cache.
+ */
+void
+rte_mempool_cache_free(struct rte_mempool_cache *cache);
+
+/**
+ * Get a pointer to the per-lcore default mempool cache.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param lcore_id
+ * The logical core id.
+ * @return
+ * A pointer to the mempool cache or NULL if disabled or non-EAL thread.
+ */
+static __rte_always_inline struct rte_mempool_cache *
+rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
+{
+ if (mp->cache_size == 0)
+ return NULL;
+
+ if (lcore_id >= RTE_MAX_LCORE)
+ return NULL;
+
+ rte_mempool_trace_default_cache(mp, lcore_id,
+ &mp->local_cache[lcore_id]);
+ return &mp->local_cache[lcore_id];
+}
+
+/**
+ * Flush a user-owned mempool cache to the specified mempool.
+ *
+ * @param cache
+ * A pointer to the mempool cache.
+ * @param mp
+ * A pointer to the mempool.
+ */
+static __rte_always_inline void
+rte_mempool_cache_flush(struct rte_mempool_cache *cache,
+ struct rte_mempool *mp)
+{
+ if (cache == NULL)
+ cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ if (cache == NULL || cache->len == 0)
+ return;
+ rte_mempool_trace_cache_flush(cache, mp);
+ rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
+ cache->len = 0;
+}
+
+/**
+ * @internal Put several objects back in the mempool; used internally.
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to store back in the mempool, must be strictly
+ * positive.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ */
+static __rte_always_inline void
+__mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n, struct rte_mempool_cache *cache)
+{
+ void **cache_objs;
+
+ /* increment stat now, adding in mempool always success */
+ __MEMPOOL_STAT_ADD(mp, put, n);
+
+ /* No cache provided or if put would overflow mem allocated for cache */
+ if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE))
+ goto ring_enqueue;
+
+ cache_objs = &cache->objs[cache->len];
+
+ /*
+ * The cache follows the following algorithm
+ * 1. Add the objects to the cache
+ * 2. Anything greater than the cache min value (if it crosses the
+ * cache flush threshold) is flushed to the ring.
+ */
+
+ /* Add elements back into the cache */
+ rte_memcpy(&cache_objs[0], obj_table, sizeof(void *) * n);
+
+ cache->len += n;
+
+ if (cache->len >= cache->flushthresh) {
+ rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size],
+ cache->len - cache->size);
+ cache->len = cache->size;
+ }
+
+ return;
+
+ring_enqueue:
+
+ /* push remaining objects in ring */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0)
+ rte_panic("cannot put objects in mempool\n");
+#else
+ rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
+#endif
+}
+
+
+/**
+ * Put several objects back in the mempool.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the mempool from the obj_table.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ */
+static __rte_always_inline void
+rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n, struct rte_mempool_cache *cache)
+{
+ rte_mempool_trace_generic_put(mp, obj_table, n, cache);
+ __mempool_check_cookies(mp, obj_table, n, 0);
+ __mempool_generic_put(mp, obj_table, n, cache);
+}
+
+/**
+ * Put several objects back in the mempool.
+ *
+ * This function calls the multi-producer or the single-producer
+ * version depending on the default behavior that was specified at
+ * mempool creation time (see flags).
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the mempool from obj_table.
+ */
+static __rte_always_inline void
+rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n)
+{
+ struct rte_mempool_cache *cache;
+ cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ rte_mempool_trace_put_bulk(mp, obj_table, n, cache);
+ rte_mempool_generic_put(mp, obj_table, n, cache);
+}
+
+/**
+ * Put one object back in the mempool.
+ *
+ * This function calls the multi-producer or the single-producer
+ * version depending on the default behavior that was specified at
+ * mempool creation time (see flags).
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj
+ * A pointer to the object to be added.
+ */
+static __rte_always_inline void
+rte_mempool_put(struct rte_mempool *mp, void *obj)
+{
+ rte_mempool_put_bulk(mp, &obj, 1);
+}
+
+/**
+ * @internal Get several objects from the mempool; used internally.
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to get, must be strictly positive.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ * @return
+ * - >=0: Success; number of objects supplied.
+ * - <0: Error; code of ring dequeue function.
+ */
+static __rte_always_inline int
+__mempool_generic_get(struct rte_mempool *mp, void **obj_table,
+ unsigned int n, struct rte_mempool_cache *cache)
+{
+ int ret;
+ uint32_t index, len;
+ void **cache_objs;
+
+ /* No cache provided or cannot be satisfied from cache */
+ if (unlikely(cache == NULL || n >= cache->size))
+ goto ring_dequeue;
+
+ cache_objs = cache->objs;
+
+ /* Can this be satisfied from the cache? */
+ if (cache->len < n) {
+ /* No. Backfill the cache first, and then fill from it */
+ uint32_t req = n + (cache->size - cache->len);
+
+ /* How many do we require i.e. number to fill the cache + the request */
+ ret = rte_mempool_ops_dequeue_bulk(mp,
+ &cache->objs[cache->len], req);
+ if (unlikely(ret < 0)) {
+ /*
+ * In the off chance that we are buffer constrained,
+ * where we are not able to allocate cache + n, go to
+ * the ring directly. If that fails, we are truly out of
+ * buffers.
+ */
+ goto ring_dequeue;
+ }
+
+ cache->len += req;
+ }
+
+ /* Now fill in the response ... */
+ for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
+ *obj_table = cache_objs[len];
+
+ cache->len -= n;
+
+ __MEMPOOL_STAT_ADD(mp, get_success, n);
+
+ return 0;
+
+ring_dequeue:
+
+ /* get remaining objects from ring */
+ ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
+
+ if (ret < 0)
+ __MEMPOOL_STAT_ADD(mp, get_fail, n);
+ else
+ __MEMPOOL_STAT_ADD(mp, get_success, n);
+
+ return ret;
+}
+
+/**
+ * Get several objects from the mempool.
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to get from mempool to obj_table.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ * @return
+ * - 0: Success; objects taken.
+ * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static __rte_always_inline int
+rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
+ unsigned int n, struct rte_mempool_cache *cache)
+{
+ int ret;
+ ret = __mempool_generic_get(mp, obj_table, n, cache);
+ if (ret == 0)
+ __mempool_check_cookies(mp, obj_table, n, 1);
+ rte_mempool_trace_generic_get(mp, obj_table, n, cache);
+ return ret;
+}
+
+/**
+ * Get several objects from the mempool.
+ *
+ * This function calls the multi-consumers or the single-consumer
+ * version, depending on the default behaviour that was specified at
+ * mempool creation time (see flags).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to get from the mempool to obj_table.
+ * @return
+ * - 0: Success; objects taken
+ * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static __rte_always_inline int
+rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+ struct rte_mempool_cache *cache;
+ cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ rte_mempool_trace_get_bulk(mp, obj_table, n, cache);
+ return rte_mempool_generic_get(mp, obj_table, n, cache);
+}
+
+/**
+ * Get one object from the mempool.
+ *
+ * This function calls the multi-consumers or the single-consumer
+ * version, depending on the default behavior that was specified at
+ * mempool creation (see flags).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_p
+ * A pointer to a void * pointer (object) that will be filled.
+ * @return
+ * - 0: Success; objects taken.
+ * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static __rte_always_inline int
+rte_mempool_get(struct rte_mempool *mp, void **obj_p)
+{
+ return rte_mempool_get_bulk(mp, obj_p, 1);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get a contiguous blocks of objects from the mempool.
+ *
+ * If cache is enabled, consider to flush it first, to reuse objects
+ * as soon as possible.
+ *
+ * The application should check that the driver supports the operation
+ * by calling rte_mempool_ops_get_info() and checking that `contig_block_size`
+ * is not zero.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param first_obj_table
+ * A pointer to a pointer to the first object in each block.
+ * @param n
+ * The number of blocks to get from mempool.
+ * @return
+ * - 0: Success; blocks taken.
+ * - -ENOBUFS: Not enough entries in the mempool; no object is retrieved.
+ * - -EOPNOTSUPP: The mempool driver does not support block dequeue
+ */
+static __rte_always_inline int
+__rte_experimental
+rte_mempool_get_contig_blocks(struct rte_mempool *mp,
+ void **first_obj_table, unsigned int n)
+{
+ int ret;
+
+ ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
+ if (ret == 0) {
+ __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_success, n);
+ __mempool_contig_blocks_check_cookies(mp, first_obj_table, n,
+ 1);
+ } else {
+ __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_fail, n);
+ }
+
+ rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);
+ return ret;
+}
+
+/**
+ * Return the number of entries in the mempool.
+ *
+ * When cache is enabled, this function has to browse the length of
+ * all lcores, so it should not be used in a data path, but only for
+ * debug purposes. User-owned mempool caches are not accounted for.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * The number of entries in the mempool.
+ */
+unsigned int rte_mempool_avail_count(const struct rte_mempool *mp);
+
+/**
+ * Return the number of elements which have been allocated from the mempool
+ *
+ * When cache is enabled, this function has to browse the length of
+ * all lcores, so it should not be used in a data path, but only for
+ * debug purposes.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * The number of free entries in the mempool.
+ */
+unsigned int
+rte_mempool_in_use_count(const struct rte_mempool *mp);
+
+/**
+ * Test if the mempool is full.
+ *
+ * When cache is enabled, this function has to browse the length of all
+ * lcores, so it should not be used in a data path, but only for debug
+ * purposes. User-owned mempool caches are not accounted for.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * - 1: The mempool is full.
+ * - 0: The mempool is not full.
+ */
+static inline int
+rte_mempool_full(const struct rte_mempool *mp)
+{
+ return rte_mempool_avail_count(mp) == mp->size;
+}
+
+/**
+ * Test if the mempool is empty.
+ *
+ * When cache is enabled, this function has to browse the length of all
+ * lcores, so it should not be used in a data path, but only for debug
+ * purposes. User-owned mempool caches are not accounted for.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * - 1: The mempool is empty.
+ * - 0: The mempool is not empty.
+ */
+static inline int
+rte_mempool_empty(const struct rte_mempool *mp)
+{
+ return rte_mempool_avail_count(mp) == 0;
+}
+
+/**
+ * Return the IO address of elt, which is an element of the pool mp.
+ *
+ * @param elt
+ * A pointer (virtual address) to the element of the pool.
+ * @return
+ * The IO address of the elt element.
+ * If the mempool was created with MEMPOOL_F_NO_IOVA_CONTIG, the
+ * returned value is RTE_BAD_IOVA.
+ */
+static inline rte_iova_t
+rte_mempool_virt2iova(const void *elt)
+{
+ const struct rte_mempool_objhdr *hdr;
+ hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
+ sizeof(*hdr));
+ return hdr->iova;
+}
+
+/**
+ * Check the consistency of mempool objects.
+ *
+ * Verify the coherency of fields in the mempool structure. Also check
+ * that the cookies of mempool objects (even the ones that are not
+ * present in pool) have a correct value. If not, a panic will occur.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ */
+void rte_mempool_audit(struct rte_mempool *mp);
+
+/**
+ * Return a pointer to the private data in an mempool structure.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * A pointer to the private data.
+ */
+static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
+{
+ return (char *)mp +
+ MEMPOOL_HEADER_SIZE(mp, mp->cache_size);
+}
+
+/**
+ * Dump the status of all mempools on the console
+ *
+ * @param f
+ * A pointer to a file for output
+ */
+void rte_mempool_list_dump(FILE *f);
+
+/**
+ * Search a mempool from its name
+ *
+ * @param name
+ * The name of the mempool.
+ * @return
+ * The pointer to the mempool matching the name, or NULL if not found.
+ * NULL on error
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - ENOENT - required entry not available to return.
+ *
+ */
+struct rte_mempool *rte_mempool_lookup(const char *name);
+
+/**
+ * Get the header, trailer and total size of a mempool element.
+ *
+ * Given a desired size of the mempool element and mempool flags,
+ * calculates header, trailer, body and total sizes of the mempool object.
+ *
+ * @param elt_size
+ * The size of each element, without header and trailer.
+ * @param flags
+ * The flags used for the mempool creation.
+ * Consult rte_mempool_create() for more information about possible values.
+ * The size of each element.
+ * @param sz
+ * The calculated detailed size the mempool object. May be NULL.
+ * @return
+ * Total size of the mempool object.
+ */
+uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
+ struct rte_mempool_objsz *sz);
+
+/**
+ * Walk list of all memory pools
+ *
+ * @param func
+ * Iterator function
+ * @param arg
+ * Argument passed to iterator
+ */
+void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
+ void *arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @internal Get page size used for mempool object allocation.
+ * This function is internal to mempool library and mempool drivers.
+ */
+__rte_experimental
+int
+rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMPOOL_H_ */
diff --git a/src/spdk/dpdk/lib/librte_mempool/rte_mempool_ops.c b/src/spdk/dpdk/lib/librte_mempool/rte_mempool_ops.c
new file mode 100644
index 000000000..5e2266778
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_mempool/rte_mempool_ops.c
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 6WIND S.A.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_string_fns.h>
+#include <rte_mempool.h>
+#include <rte_errno.h>
+#include <rte_dev.h>
+
+#include "rte_mempool_trace.h"
+
+/* indirect jump table to support external memory pools. */
+struct rte_mempool_ops_table rte_mempool_ops_table = {
+ .sl = RTE_SPINLOCK_INITIALIZER,
+ .num_ops = 0
+};
+
+/* add a new ops struct in rte_mempool_ops_table, return its index. */
+int
+rte_mempool_register_ops(const struct rte_mempool_ops *h)
+{
+ struct rte_mempool_ops *ops;
+ int16_t ops_index;
+
+ rte_spinlock_lock(&rte_mempool_ops_table.sl);
+
+ if (rte_mempool_ops_table.num_ops >=
+ RTE_MEMPOOL_MAX_OPS_IDX) {
+ rte_spinlock_unlock(&rte_mempool_ops_table.sl);
+ RTE_LOG(ERR, MEMPOOL,
+ "Maximum number of mempool ops structs exceeded\n");
+ return -ENOSPC;
+ }
+
+ if (h->alloc == NULL || h->enqueue == NULL ||
+ h->dequeue == NULL || h->get_count == NULL) {
+ rte_spinlock_unlock(&rte_mempool_ops_table.sl);
+ RTE_LOG(ERR, MEMPOOL,
+ "Missing callback while registering mempool ops\n");
+ return -EINVAL;
+ }
+
+ if (strlen(h->name) >= sizeof(ops->name) - 1) {
+ rte_spinlock_unlock(&rte_mempool_ops_table.sl);
+ RTE_LOG(DEBUG, EAL, "%s(): mempool_ops <%s>: name too long\n",
+ __func__, h->name);
+ rte_errno = EEXIST;
+ return -EEXIST;
+ }
+
+ ops_index = rte_mempool_ops_table.num_ops++;
+ ops = &rte_mempool_ops_table.ops[ops_index];
+ strlcpy(ops->name, h->name, sizeof(ops->name));
+ ops->alloc = h->alloc;
+ ops->free = h->free;
+ ops->enqueue = h->enqueue;
+ ops->dequeue = h->dequeue;
+ ops->get_count = h->get_count;
+ ops->calc_mem_size = h->calc_mem_size;
+ ops->populate = h->populate;
+ ops->get_info = h->get_info;
+ ops->dequeue_contig_blocks = h->dequeue_contig_blocks;
+
+ rte_spinlock_unlock(&rte_mempool_ops_table.sl);
+
+ return ops_index;
+}
+
+/* wrapper to allocate an external mempool's private (pool) data. */
+int
+rte_mempool_ops_alloc(struct rte_mempool *mp)
+{
+ struct rte_mempool_ops *ops;
+
+ rte_mempool_trace_ops_alloc(mp);
+ ops = rte_mempool_get_ops(mp->ops_index);
+ return ops->alloc(mp);
+}
+
+/* wrapper to free an external pool ops. */
+void
+rte_mempool_ops_free(struct rte_mempool *mp)
+{
+ struct rte_mempool_ops *ops;
+
+ rte_mempool_trace_ops_free(mp);
+ ops = rte_mempool_get_ops(mp->ops_index);
+ if (ops->free == NULL)
+ return;
+ ops->free(mp);
+}
+
+/* wrapper to get available objects in an external mempool. */
+unsigned int
+rte_mempool_ops_get_count(const struct rte_mempool *mp)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+ return ops->get_count(mp);
+}
+
+/* wrapper to calculate the memory size required to store given number
+ * of objects
+ */
+ssize_t
+rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+
+ if (ops->calc_mem_size == NULL)
+ return rte_mempool_op_calc_mem_size_default(mp, obj_num,
+ pg_shift, min_chunk_size, align);
+
+ return ops->calc_mem_size(mp, obj_num, pg_shift, min_chunk_size, align);
+}
+
+/* wrapper to populate memory pool objects using provided memory chunk */
+int
+rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb,
+ void *obj_cb_arg)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+
+ rte_mempool_trace_ops_populate(mp, max_objs, vaddr, iova, len, obj_cb,
+ obj_cb_arg);
+ if (ops->populate == NULL)
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr,
+ iova, len, obj_cb,
+ obj_cb_arg);
+
+ return ops->populate(mp, max_objs, vaddr, iova, len, obj_cb,
+ obj_cb_arg);
+}
+
+/* wrapper to get additional mempool info */
+int
+rte_mempool_ops_get_info(const struct rte_mempool *mp,
+ struct rte_mempool_info *info)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+
+ RTE_FUNC_PTR_OR_ERR_RET(ops->get_info, -ENOTSUP);
+ return ops->get_info(mp, info);
+}
+
+
+/* sets mempool ops previously registered by rte_mempool_register_ops. */
+int
+rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
+ void *pool_config)
+{
+ struct rte_mempool_ops *ops = NULL;
+ unsigned i;
+
+ /* too late, the mempool is already populated. */
+ if (mp->flags & MEMPOOL_F_POOL_CREATED)
+ return -EEXIST;
+
+ for (i = 0; i < rte_mempool_ops_table.num_ops; i++) {
+ if (!strcmp(name,
+ rte_mempool_ops_table.ops[i].name)) {
+ ops = &rte_mempool_ops_table.ops[i];
+ break;
+ }
+ }
+
+ if (ops == NULL)
+ return -EINVAL;
+
+ mp->ops_index = i;
+ mp->pool_config = pool_config;
+ rte_mempool_trace_set_ops_byname(mp, name, pool_config);
+ return 0;
+}
diff --git a/src/spdk/dpdk/lib/librte_mempool/rte_mempool_ops_default.c b/src/spdk/dpdk/lib/librte_mempool/rte_mempool_ops_default.c
new file mode 100644
index 000000000..22fccf9d7
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_mempool/rte_mempool_ops_default.c
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 6WIND S.A.
+ * Copyright(c) 2018 Solarflare Communications Inc.
+ */
+
+#include <rte_mempool.h>
+
+ssize_t
+rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t chunk_reserve,
+ size_t *min_chunk_size, size_t *align)
+{
+ size_t total_elt_sz;
+ size_t obj_per_page, pg_sz, objs_in_last_page;
+ size_t mem_size;
+
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+ if (total_elt_sz == 0) {
+ mem_size = 0;
+ } else if (pg_shift == 0) {
+ mem_size = total_elt_sz * obj_num + chunk_reserve;
+ } else {
+ pg_sz = (size_t)1 << pg_shift;
+ if (chunk_reserve >= pg_sz)
+ return -EINVAL;
+ obj_per_page = (pg_sz - chunk_reserve) / total_elt_sz;
+ if (obj_per_page == 0) {
+ /*
+ * Note that if object size is bigger than page size,
+ * then it is assumed that pages are grouped in subsets
+ * of physically continuous pages big enough to store
+ * at least one object.
+ */
+ mem_size = RTE_ALIGN_CEIL(total_elt_sz + chunk_reserve,
+ pg_sz) * obj_num;
+ } else {
+ /* In the best case, the allocator will return a
+ * page-aligned address. For example, with 5 objs,
+ * the required space is as below:
+ * | page0 | page1 | page2 (last) |
+ * |obj0 |obj1 |xxx|obj2 |obj3 |xxx|obj4|
+ * <------------- mem_size ------------->
+ */
+ objs_in_last_page = ((obj_num - 1) % obj_per_page) + 1;
+ /* room required for the last page */
+ mem_size = objs_in_last_page * total_elt_sz +
+ chunk_reserve;
+ /* room required for other pages */
+ mem_size += ((obj_num - objs_in_last_page) /
+ obj_per_page) << pg_shift;
+
+ /* In the worst case, the allocator returns a
+ * non-aligned pointer, wasting up to
+ * total_elt_sz. Add a margin for that.
+ */
+ mem_size += total_elt_sz - 1;
+ }
+ }
+
+ *min_chunk_size = total_elt_sz;
+ *align = RTE_MEMPOOL_ALIGN;
+
+ return mem_size;
+}
+
+ssize_t
+rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align)
+{
+ return rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
+ 0, min_chunk_size, align);
+}
+
+/* Returns -1 if object crosses a page boundary, else returns 0 */
+static int
+check_obj_bounds(char *obj, size_t pg_sz, size_t elt_sz)
+{
+ if (pg_sz == 0)
+ return 0;
+ if (elt_sz > pg_sz)
+ return 0;
+ if (RTE_PTR_ALIGN(obj, pg_sz) != RTE_PTR_ALIGN(obj + elt_sz - 1, pg_sz))
+ return -1;
+ return 0;
+}
+
+int
+rte_mempool_op_populate_helper(struct rte_mempool *mp, unsigned int flags,
+ unsigned int max_objs, void *vaddr, rte_iova_t iova,
+ size_t len, rte_mempool_populate_obj_cb_t *obj_cb,
+ void *obj_cb_arg)
+{
+ char *va = vaddr;
+ size_t total_elt_sz, pg_sz;
+ size_t off;
+ unsigned int i;
+ void *obj;
+ int ret;
+
+ ret = rte_mempool_get_page_size(mp, &pg_sz);
+ if (ret < 0)
+ return ret;
+
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+
+ if (flags & RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ)
+ off = total_elt_sz - (((uintptr_t)(va - 1) % total_elt_sz) + 1);
+ else
+ off = 0;
+ for (i = 0; i < max_objs; i++) {
+ /* avoid objects to cross page boundaries */
+ if (check_obj_bounds(va + off, pg_sz, total_elt_sz) < 0) {
+ off += RTE_PTR_ALIGN_CEIL(va + off, pg_sz) - (va + off);
+ if (flags & RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ)
+ off += total_elt_sz -
+ (((uintptr_t)(va + off - 1) %
+ total_elt_sz) + 1);
+ }
+
+ if (off + total_elt_sz > len)
+ break;
+
+ off += mp->header_size;
+ obj = va + off;
+ obj_cb(mp, obj_cb_arg, obj,
+ (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off));
+ rte_mempool_ops_enqueue_bulk(mp, &obj, 1);
+ off += mp->elt_size + mp->trailer_size;
+ }
+
+ return i;
+}
+
+int
+rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb,
+ void *obj_cb_arg)
+{
+ return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, iova,
+ len, obj_cb, obj_cb_arg);
+}
diff --git a/src/spdk/dpdk/lib/librte_mempool/rte_mempool_trace.h b/src/spdk/dpdk/lib/librte_mempool/rte_mempool_trace.h
new file mode 100644
index 000000000..e776df0a6
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_mempool/rte_mempool_trace.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _RTE_MEMPOOL_TRACE_H_
+#define _RTE_MEMPOOL_TRACE_H_
+
+/**
+ * @file
+ *
+ * APIs for mempool trace support
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "rte_mempool.h"
+
+#include <rte_memzone.h>
+#include <rte_trace_point.h>
+
+RTE_TRACE_POINT(
+ rte_mempool_trace_create,
+ RTE_TRACE_POINT_ARGS(const char *name, uint32_t nb_elts,
+ uint32_t elt_size, uint32_t cache_size,
+ uint32_t private_data_size, void *mp_init, void *mp_init_arg,
+ void *obj_init, void *obj_init_arg, uint32_t flags,
+ struct rte_mempool *mempool),
+ rte_trace_point_emit_string(name);
+ rte_trace_point_emit_u32(nb_elts);
+ rte_trace_point_emit_u32(elt_size);
+ rte_trace_point_emit_u32(cache_size);
+ rte_trace_point_emit_u32(private_data_size);
+ rte_trace_point_emit_ptr(mp_init);
+ rte_trace_point_emit_ptr(mp_init_arg);
+ rte_trace_point_emit_ptr(obj_init);
+ rte_trace_point_emit_ptr(obj_init_arg);
+ rte_trace_point_emit_u32(flags);
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_i32(mempool->ops_index);
+)
+
+RTE_TRACE_POINT(
+ rte_mempool_trace_create_empty,
+ RTE_TRACE_POINT_ARGS(const char *name, uint32_t nb_elts,
+ uint32_t elt_size, uint32_t cache_size,
+ uint32_t private_data_size, uint32_t flags,
+ struct rte_mempool *mempool),
+ rte_trace_point_emit_string(name);
+ rte_trace_point_emit_u32(nb_elts);
+ rte_trace_point_emit_u32(elt_size);
+ rte_trace_point_emit_u32(cache_size);
+ rte_trace_point_emit_u32(private_data_size);
+ rte_trace_point_emit_u32(flags);
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_i32(mempool->ops_index);
+)
+
+RTE_TRACE_POINT(
+ rte_mempool_trace_free,
+ RTE_TRACE_POINT_ARGS(struct rte_mempool *mempool),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_string(mempool->name);
+)
+
+RTE_TRACE_POINT(
+ rte_mempool_trace_populate_iova,
+ RTE_TRACE_POINT_ARGS(struct rte_mempool *mempool, void *vaddr,
+ rte_iova_t iova, size_t len, void *free_cb, void *opaque),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_string(mempool->name);
+ rte_trace_point_emit_ptr(vaddr);
+ rte_trace_point_emit_u64(iova);
+ rte_trace_point_emit_long(len);
+ rte_trace_point_emit_ptr(free_cb);
+ rte_trace_point_emit_ptr(opaque);
+)
+
+RTE_TRACE_POINT(
+ rte_mempool_trace_populate_virt,
+ RTE_TRACE_POINT_ARGS(struct rte_mempool *mempool, void *addr,
+ size_t len, size_t pg_sz, void *free_cb, void *opaque),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_string(mempool->name);
+ rte_trace_point_emit_ptr(addr);
+ rte_trace_point_emit_long(len);
+ rte_trace_point_emit_long(pg_sz);
+ rte_trace_point_emit_ptr(free_cb);
+ rte_trace_point_emit_ptr(opaque);
+)
+
+RTE_TRACE_POINT(
+ rte_mempool_trace_populate_default,
+ RTE_TRACE_POINT_ARGS(struct rte_mempool *mempool),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_string(mempool->name);
+)
+
+RTE_TRACE_POINT(
+ rte_mempool_trace_populate_anon,
+ RTE_TRACE_POINT_ARGS(struct rte_mempool *mempool),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_string(mempool->name);
+)
+
+RTE_TRACE_POINT(
+ rte_mempool_trace_cache_create,
+ RTE_TRACE_POINT_ARGS(uint32_t size, int socket_id,
+ struct rte_mempool_cache *cache),
+ rte_trace_point_emit_u32(size);
+ rte_trace_point_emit_i32(socket_id);
+ rte_trace_point_emit_ptr(cache);
+ rte_trace_point_emit_u32(cache->len);
+ rte_trace_point_emit_u32(cache->flushthresh);
+)
+
+RTE_TRACE_POINT(
+ rte_mempool_trace_cache_free,
+ RTE_TRACE_POINT_ARGS(void *cache),
+ rte_trace_point_emit_ptr(cache);
+)
+
+RTE_TRACE_POINT(
+ rte_mempool_trace_get_page_size,
+ RTE_TRACE_POINT_ARGS(struct rte_mempool *mempool, size_t pg_sz),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_string(mempool->name);
+ rte_trace_point_emit_long(pg_sz);
+)
+
+RTE_TRACE_POINT(
+ rte_mempool_trace_ops_populate,
+ RTE_TRACE_POINT_ARGS(struct rte_mempool *mempool, uint32_t max_objs,
+ void *vaddr, uint64_t iova, size_t len, void *obj_cb,
+ void *obj_cb_arg),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_string(mempool->name);
+ rte_trace_point_emit_u32(max_objs);
+ rte_trace_point_emit_ptr(vaddr);
+ rte_trace_point_emit_u64(iova);
+ rte_trace_point_emit_long(len);
+ rte_trace_point_emit_ptr(obj_cb);
+ rte_trace_point_emit_ptr(obj_cb_arg);
+)
+
+RTE_TRACE_POINT(
+ rte_mempool_trace_ops_alloc,
+ RTE_TRACE_POINT_ARGS(struct rte_mempool *mempool),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_string(mempool->name);
+)
+
+RTE_TRACE_POINT(
+ rte_mempool_trace_ops_free,
+ RTE_TRACE_POINT_ARGS(struct rte_mempool *mempool),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_string(mempool->name);
+)
+
+RTE_TRACE_POINT(
+ rte_mempool_trace_set_ops_byname,
+ RTE_TRACE_POINT_ARGS(struct rte_mempool *mempool, const char *name,
+ void *pool_config),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_string(mempool->name);
+ rte_trace_point_emit_string(name);
+ rte_trace_point_emit_ptr(pool_config);
+)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMPOOL_TRACE_H_ */
diff --git a/src/spdk/dpdk/lib/librte_mempool/rte_mempool_trace_fp.h b/src/spdk/dpdk/lib/librte_mempool/rte_mempool_trace_fp.h
new file mode 100644
index 000000000..ed060e887
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_mempool/rte_mempool_trace_fp.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _RTE_MEMPOOL_TRACE_FP_H_
+#define _RTE_MEMPOOL_TRACE_FP_H_
+
+/**
+ * @file
+ *
+ * Mempool fast path API for trace support
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_trace_point.h>
+
+RTE_TRACE_POINT_FP(
+ rte_mempool_trace_ops_dequeue_bulk,
+ RTE_TRACE_POINT_ARGS(void *mempool, void **obj_table,
+ uint32_t nb_objs),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_ptr(obj_table);
+ rte_trace_point_emit_u32(nb_objs);
+)
+
+RTE_TRACE_POINT_FP(
+ rte_mempool_trace_ops_dequeue_contig_blocks,
+ RTE_TRACE_POINT_ARGS(void *mempool, void **first_obj_table,
+ uint32_t nb_objs),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_ptr(first_obj_table);
+ rte_trace_point_emit_u32(nb_objs);
+)
+
+RTE_TRACE_POINT_FP(
+ rte_mempool_trace_ops_enqueue_bulk,
+ RTE_TRACE_POINT_ARGS(void *mempool, void * const *obj_table,
+ uint32_t nb_objs),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_ptr(obj_table);
+ rte_trace_point_emit_u32(nb_objs);
+)
+
+RTE_TRACE_POINT_FP(
+ rte_mempool_trace_generic_put,
+ RTE_TRACE_POINT_ARGS(void *mempool, void * const *obj_table,
+ uint32_t nb_objs, void *cache),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_ptr(obj_table);
+ rte_trace_point_emit_u32(nb_objs);
+ rte_trace_point_emit_ptr(cache);
+)
+
+RTE_TRACE_POINT_FP(
+ rte_mempool_trace_put_bulk,
+ RTE_TRACE_POINT_ARGS(void *mempool, void * const *obj_table,
+ uint32_t nb_objs, void *cache),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_ptr(obj_table);
+ rte_trace_point_emit_u32(nb_objs);
+ rte_trace_point_emit_ptr(cache);
+)
+
+RTE_TRACE_POINT_FP(
+ rte_mempool_trace_generic_get,
+ RTE_TRACE_POINT_ARGS(void *mempool, void * const *obj_table,
+ uint32_t nb_objs, void *cache),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_ptr(obj_table);
+ rte_trace_point_emit_u32(nb_objs);
+ rte_trace_point_emit_ptr(cache);
+)
+
+RTE_TRACE_POINT_FP(
+ rte_mempool_trace_get_bulk,
+ RTE_TRACE_POINT_ARGS(void *mempool, void **obj_table,
+ uint32_t nb_objs, void *cache),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_ptr(obj_table);
+ rte_trace_point_emit_u32(nb_objs);
+ rte_trace_point_emit_ptr(cache);
+)
+
+RTE_TRACE_POINT_FP(
+ rte_mempool_trace_get_contig_blocks,
+ RTE_TRACE_POINT_ARGS(void *mempool, void **first_obj_table,
+ uint32_t nb_objs),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_ptr(first_obj_table);
+ rte_trace_point_emit_u32(nb_objs);
+)
+
+RTE_TRACE_POINT_FP(
+ rte_mempool_trace_default_cache,
+ RTE_TRACE_POINT_ARGS(void *mempool, uint32_t lcore_id,
+ void *default_cache),
+ rte_trace_point_emit_ptr(mempool);
+ rte_trace_point_emit_u32(lcore_id);
+ rte_trace_point_emit_ptr(default_cache);
+)
+
+RTE_TRACE_POINT_FP(
+ rte_mempool_trace_cache_flush,
+ RTE_TRACE_POINT_ARGS(void *cache, void *mempool),
+ rte_trace_point_emit_ptr(cache);
+ rte_trace_point_emit_ptr(mempool);
+)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMPOOL_TRACE_FP_H_ */
diff --git a/src/spdk/dpdk/lib/librte_mempool/rte_mempool_version.map b/src/spdk/dpdk/lib/librte_mempool/rte_mempool_version.map
new file mode 100644
index 000000000..826a0b882
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_mempool/rte_mempool_version.map
@@ -0,0 +1,77 @@
+DPDK_20.0 {
+ global:
+
+ rte_mempool_audit;
+ rte_mempool_avail_count;
+ rte_mempool_cache_create;
+ rte_mempool_cache_free;
+ rte_mempool_calc_obj_size;
+ rte_mempool_check_cookies;
+ rte_mempool_contig_blocks_check_cookies;
+ rte_mempool_create;
+ rte_mempool_create_empty;
+ rte_mempool_dump;
+ rte_mempool_free;
+ rte_mempool_in_use_count;
+ rte_mempool_list_dump;
+ rte_mempool_lookup;
+ rte_mempool_mem_iter;
+ rte_mempool_obj_iter;
+ rte_mempool_op_calc_mem_size_default;
+ rte_mempool_op_populate_default;
+ rte_mempool_ops_table;
+ rte_mempool_populate_anon;
+ rte_mempool_populate_default;
+ rte_mempool_populate_iova;
+ rte_mempool_populate_virt;
+ rte_mempool_register_ops;
+ rte_mempool_set_ops_byname;
+ rte_mempool_walk;
+
+ local: *;
+};
+
+DPDK_21 {
+ global:
+
+ rte_mempool_populate_iova;
+ rte_mempool_populate_virt;
+} DPDK_20.0;
+
+EXPERIMENTAL {
+ global:
+
+ # added in 18.05
+ rte_mempool_ops_get_info;
+
+ # added in 19.11
+ rte_mempool_get_page_size;
+ rte_mempool_op_calc_mem_size_helper;
+ rte_mempool_op_populate_helper;
+
+ # added in 20.05
+ __rte_mempool_trace_ops_dequeue_bulk;
+ __rte_mempool_trace_ops_dequeue_contig_blocks;
+ __rte_mempool_trace_ops_enqueue_bulk;
+ __rte_mempool_trace_generic_put;
+ __rte_mempool_trace_put_bulk;
+ __rte_mempool_trace_generic_get;
+ __rte_mempool_trace_get_bulk;
+ __rte_mempool_trace_get_contig_blocks;
+ __rte_mempool_trace_create;
+ __rte_mempool_trace_create_empty;
+ __rte_mempool_trace_free;
+ __rte_mempool_trace_populate_iova;
+ __rte_mempool_trace_populate_virt;
+ __rte_mempool_trace_populate_default;
+ __rte_mempool_trace_populate_anon;
+ __rte_mempool_trace_cache_create;
+ __rte_mempool_trace_cache_free;
+ __rte_mempool_trace_default_cache;
+ __rte_mempool_trace_get_page_size;
+ __rte_mempool_trace_cache_flush;
+ __rte_mempool_trace_ops_populate;
+ __rte_mempool_trace_ops_alloc;
+ __rte_mempool_trace_ops_free;
+ __rte_mempool_trace_set_ops_byname;
+};