summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/lib/librte_stack
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/lib/librte_stack
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/lib/librte_stack')
-rw-r--r--src/spdk/dpdk/lib/librte_stack/Makefile27
-rw-r--r--src/spdk/dpdk/lib/librte_stack/meson.build11
-rw-r--r--src/spdk/dpdk/lib/librte_stack/rte_stack.c197
-rw-r--r--src/spdk/dpdk/lib/librte_stack/rte_stack.h269
-rw-r--r--src/spdk/dpdk/lib/librte_stack/rte_stack_lf.c31
-rw-r--r--src/spdk/dpdk/lib/librte_stack/rte_stack_lf.h112
-rw-r--r--src/spdk/dpdk/lib/librte_stack/rte_stack_lf_c11.h159
-rw-r--r--src/spdk/dpdk/lib/librte_stack/rte_stack_lf_generic.h149
-rw-r--r--src/spdk/dpdk/lib/librte_stack/rte_stack_lf_stubs.h44
-rw-r--r--src/spdk/dpdk/lib/librte_stack/rte_stack_std.c26
-rw-r--r--src/spdk/dpdk/lib/librte_stack/rte_stack_std.h124
-rw-r--r--src/spdk/dpdk/lib/librte_stack/rte_stack_version.map9
-rw-r--r--src/spdk/dpdk/lib/librte_stack/stack_pvt.h34
13 files changed, 1192 insertions, 0 deletions
diff --git a/src/spdk/dpdk/lib/librte_stack/Makefile b/src/spdk/dpdk/lib/librte_stack/Makefile
new file mode 100644
index 000000000..020ef102b
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_stack/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2019 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_stack.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+LDLIBS += -lrte_eal
+
+EXPORT_MAP := rte_stack_version.map
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_STACK) := rte_stack.c \
+ rte_stack_std.c \
+ rte_stack_lf.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_STACK)-include := rte_stack.h \
+ rte_stack_std.h \
+ rte_stack_lf.h \
+ rte_stack_lf_generic.h \
+ rte_stack_lf_c11.h \
+ rte_stack_lf_stubs.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/lib/librte_stack/meson.build b/src/spdk/dpdk/lib/librte_stack/meson.build
new file mode 100644
index 000000000..1c447b4c4
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_stack/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2019 Intel Corporation
+
+sources = files('rte_stack.c', 'rte_stack_std.c', 'rte_stack_lf.c')
+headers = files('rte_stack.h',
+ 'rte_stack_std.h',
+ 'rte_stack_lf.h',
+ 'rte_stack_lf_generic.h',
+ 'rte_stack_lf_c11.h')
+build = false
+reason = 'not needed by SPDK'
diff --git a/src/spdk/dpdk/lib/librte_stack/rte_stack.c b/src/spdk/dpdk/lib/librte_stack/rte_stack.c
new file mode 100644
index 000000000..d19824f00
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_stack/rte_stack.c
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_string_fns.h>
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_errno.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_rwlock.h>
+#include <rte_tailq.h>
+
+#include "rte_stack.h"
+#include "stack_pvt.h"
+
+int stack_logtype;
+
+TAILQ_HEAD(rte_stack_list, rte_tailq_entry);
+
+static struct rte_tailq_elem rte_stack_tailq = {
+ .name = RTE_TAILQ_STACK_NAME,
+};
+EAL_REGISTER_TAILQ(rte_stack_tailq)
+
+
+static void
+rte_stack_init(struct rte_stack *s, unsigned int count, uint32_t flags)
+{
+ memset(s, 0, sizeof(*s));
+
+ if (flags & RTE_STACK_F_LF)
+ rte_stack_lf_init(s, count);
+ else
+ rte_stack_std_init(s);
+}
+
+static ssize_t
+rte_stack_get_memsize(unsigned int count, uint32_t flags)
+{
+ if (flags & RTE_STACK_F_LF)
+ return rte_stack_lf_get_memsize(count);
+ else
+ return rte_stack_std_get_memsize(count);
+}
+
+struct rte_stack *
+rte_stack_create(const char *name, unsigned int count, int socket_id,
+ uint32_t flags)
+{
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ struct rte_stack_list *stack_list;
+ const struct rte_memzone *mz;
+ struct rte_tailq_entry *te;
+ struct rte_stack *s;
+ unsigned int sz;
+ int ret;
+
+#ifdef RTE_ARCH_64
+ RTE_BUILD_BUG_ON(sizeof(struct rte_stack_lf_head) != 16);
+#else
+ if (flags & RTE_STACK_F_LF) {
+ STACK_LOG_ERR("Lock-free stack is not supported on your platform\n");
+ return NULL;
+ }
+#endif
+
+ sz = rte_stack_get_memsize(count, flags);
+
+ ret = snprintf(mz_name, sizeof(mz_name), "%s%s",
+ RTE_STACK_MZ_PREFIX, name);
+ if (ret < 0 || ret >= (int)sizeof(mz_name)) {
+ rte_errno = ENAMETOOLONG;
+ return NULL;
+ }
+
+ te = rte_zmalloc("STACK_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ STACK_LOG_ERR("Cannot reserve memory for tailq\n");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ rte_mcfg_tailq_write_lock();
+
+ mz = rte_memzone_reserve_aligned(mz_name, sz, socket_id,
+ 0, __alignof__(*s));
+ if (mz == NULL) {
+ STACK_LOG_ERR("Cannot reserve stack memzone!\n");
+ rte_mcfg_tailq_write_unlock();
+ rte_free(te);
+ return NULL;
+ }
+
+ s = mz->addr;
+
+ rte_stack_init(s, count, flags);
+
+ /* Store the name for later lookups */
+ ret = strlcpy(s->name, name, sizeof(s->name));
+ if (ret < 0 || ret >= (int)sizeof(s->name)) {
+ rte_mcfg_tailq_write_unlock();
+
+ rte_errno = ENAMETOOLONG;
+ rte_free(te);
+ rte_memzone_free(mz);
+ return NULL;
+ }
+
+ s->memzone = mz;
+ s->capacity = count;
+ s->flags = flags;
+
+ te->data = s;
+
+ stack_list = RTE_TAILQ_CAST(rte_stack_tailq.head, rte_stack_list);
+
+ TAILQ_INSERT_TAIL(stack_list, te, next);
+
+ rte_mcfg_tailq_write_unlock();
+
+ return s;
+}
+
+void
+rte_stack_free(struct rte_stack *s)
+{
+ struct rte_stack_list *stack_list;
+ struct rte_tailq_entry *te;
+
+ if (s == NULL)
+ return;
+
+ stack_list = RTE_TAILQ_CAST(rte_stack_tailq.head, rte_stack_list);
+ rte_mcfg_tailq_write_lock();
+
+ /* find out tailq entry */
+ TAILQ_FOREACH(te, stack_list, next) {
+ if (te->data == s)
+ break;
+ }
+
+ if (te == NULL) {
+ rte_mcfg_tailq_write_unlock();
+ return;
+ }
+
+ TAILQ_REMOVE(stack_list, te, next);
+
+ rte_mcfg_tailq_write_unlock();
+
+ rte_free(te);
+
+ rte_memzone_free(s->memzone);
+}
+
+struct rte_stack *
+rte_stack_lookup(const char *name)
+{
+ struct rte_stack_list *stack_list;
+ struct rte_tailq_entry *te;
+ struct rte_stack *r = NULL;
+
+ if (name == NULL) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ stack_list = RTE_TAILQ_CAST(rte_stack_tailq.head, rte_stack_list);
+
+ rte_mcfg_tailq_read_lock();
+
+ TAILQ_FOREACH(te, stack_list, next) {
+ r = (struct rte_stack *) te->data;
+ if (strncmp(name, r->name, RTE_STACK_NAMESIZE) == 0)
+ break;
+ }
+
+ rte_mcfg_tailq_read_unlock();
+
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+
+ return r;
+}
+
+RTE_INIT(librte_stack_init_log)
+{
+ stack_logtype = rte_log_register("lib.stack");
+ if (stack_logtype >= 0)
+ rte_log_set_level(stack_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/lib/librte_stack/rte_stack.h b/src/spdk/dpdk/lib/librte_stack/rte_stack.h
new file mode 100644
index 000000000..27ddb199e
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_stack/rte_stack.h
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+/**
+ * @file rte_stack.h
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * RTE Stack
+ *
+ * librte_stack provides an API for configuration and use of a bounded stack of
+ * pointers. Push and pop operations are MT-safe, allowing concurrent access,
+ * and the interface supports pushing and popping multiple pointers at a time.
+ */
+
+#ifndef _RTE_STACK_H_
+#define _RTE_STACK_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_atomic.h>
+#include <rte_compat.h>
+#include <rte_debug.h>
+#include <rte_errno.h>
+#include <rte_memzone.h>
+#include <rte_spinlock.h>
+
+#define RTE_TAILQ_STACK_NAME "RTE_STACK"
+#define RTE_STACK_MZ_PREFIX "STK_"
+/** The maximum length of a stack name. */
+#define RTE_STACK_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
+ sizeof(RTE_STACK_MZ_PREFIX) + 1)
+
+struct rte_stack_lf_elem {
+ void *data; /**< Data pointer */
+ struct rte_stack_lf_elem *next; /**< Next pointer */
+};
+
+struct rte_stack_lf_head {
+ struct rte_stack_lf_elem *top; /**< Stack top */
+ uint64_t cnt; /**< Modification counter for avoiding ABA problem */
+};
+
+struct rte_stack_lf_list {
+ /** List head */
+ struct rte_stack_lf_head head __rte_aligned(16);
+ /** List len */
+ uint64_t len;
+};
+
+/* Structure containing two lock-free LIFO lists: the stack itself and a list
+ * of free linked-list elements.
+ */
+struct rte_stack_lf {
+ /** LIFO list of elements */
+ struct rte_stack_lf_list used __rte_cache_aligned;
+ /** LIFO list of free elements */
+ struct rte_stack_lf_list free __rte_cache_aligned;
+ /** LIFO elements */
+ struct rte_stack_lf_elem elems[] __rte_cache_aligned;
+};
+
+/* Structure containing the LIFO, its current length, and a lock for mutual
+ * exclusion.
+ */
+struct rte_stack_std {
+ rte_spinlock_t lock; /**< LIFO lock */
+ uint32_t len; /**< LIFO len */
+ void *objs[]; /**< LIFO pointer table */
+};
+
+/* The RTE stack structure contains the LIFO structure itself, plus metadata
+ * such as its name and memzone pointer.
+ */
+struct rte_stack {
+ /** Name of the stack. */
+ char name[RTE_STACK_NAMESIZE] __rte_cache_aligned;
+ /** Memzone containing the rte_stack structure. */
+ const struct rte_memzone *memzone;
+ uint32_t capacity; /**< Usable size of the stack. */
+ uint32_t flags; /**< Flags supplied at creation. */
+ RTE_STD_C11
+ union {
+ struct rte_stack_lf stack_lf; /**< Lock-free LIFO structure. */
+ struct rte_stack_std stack_std; /**< LIFO structure. */
+ };
+} __rte_cache_aligned;
+
+/**
+ * The stack uses lock-free push and pop functions. This flag is only
+ * supported on x86_64 platforms, currently.
+ */
+#define RTE_STACK_F_LF 0x0001
+
+#include "rte_stack_std.h"
+#include "rte_stack_lf.h"
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Push several objects on the stack (MT-safe).
+ *
+ * @param s
+ * A pointer to the stack structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to push on the stack from the obj_table.
+ * @return
+ * Actual number of objects pushed (either 0 or *n*).
+ */
+__rte_experimental
+static __rte_always_inline unsigned int
+rte_stack_push(struct rte_stack *s, void * const *obj_table, unsigned int n)
+{
+ RTE_ASSERT(s != NULL);
+ RTE_ASSERT(obj_table != NULL);
+
+ if (s->flags & RTE_STACK_F_LF)
+ return __rte_stack_lf_push(s, obj_table, n);
+ else
+ return __rte_stack_std_push(s, obj_table, n);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Pop several objects from the stack (MT-safe).
+ *
+ * @param s
+ * A pointer to the stack structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to pull from the stack.
+ * @return
+ * Actual number of objects popped (either 0 or *n*).
+ */
+__rte_experimental
+static __rte_always_inline unsigned int
+rte_stack_pop(struct rte_stack *s, void **obj_table, unsigned int n)
+{
+ RTE_ASSERT(s != NULL);
+ RTE_ASSERT(obj_table != NULL);
+
+ if (s->flags & RTE_STACK_F_LF)
+ return __rte_stack_lf_pop(s, obj_table, n);
+ else
+ return __rte_stack_std_pop(s, obj_table, n);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Return the number of used entries in a stack.
+ *
+ * @param s
+ * A pointer to the stack structure.
+ * @return
+ * The number of used entries in the stack.
+ */
+__rte_experimental
+static __rte_always_inline unsigned int
+rte_stack_count(struct rte_stack *s)
+{
+ RTE_ASSERT(s != NULL);
+
+ if (s->flags & RTE_STACK_F_LF)
+ return __rte_stack_lf_count(s);
+ else
+ return __rte_stack_std_count(s);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Return the number of free entries in a stack.
+ *
+ * @param s
+ * A pointer to the stack structure.
+ * @return
+ * The number of free entries in the stack.
+ */
+__rte_experimental
+static __rte_always_inline unsigned int
+rte_stack_free_count(struct rte_stack *s)
+{
+ RTE_ASSERT(s != NULL);
+
+ return s->capacity - rte_stack_count(s);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create a new stack named *name* in memory.
+ *
+ * This function uses ``memzone_reserve()`` to allocate memory for a stack of
+ * size *count*. The behavior of the stack is controlled by the *flags*.
+ *
+ * @param name
+ * The name of the stack.
+ * @param count
+ * The size of the stack.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in case of
+ * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
+ * constraint for the reserved zone.
+ * @param flags
+ * An OR of the following:
+ * - RTE_STACK_F_LF: If this flag is set, the stack uses lock-free
+ * variants of the push and pop functions. Otherwise, it achieves
+ * thread-safety using a lock.
+ * @return
+ * On success, the pointer to the new allocated stack. NULL on error with
+ * rte_errno set appropriately. Possible errno values include:
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a stack with the same name already exists
+ * - ENOMEM - insufficient memory to create the stack
+ * - ENAMETOOLONG - name size exceeds RTE_STACK_NAMESIZE
+ */
+__rte_experimental
+struct rte_stack *
+rte_stack_create(const char *name, unsigned int count, int socket_id,
+ uint32_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Free all memory used by the stack.
+ *
+ * @param s
+ * Stack to free
+ */
+__rte_experimental
+void
+rte_stack_free(struct rte_stack *s);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Lookup a stack by its name.
+ *
+ * @param name
+ * The name of the stack.
+ * @return
+ * The pointer to the stack matching the name, or NULL if not found,
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - ENOENT - Stack with name *name* not found.
+ * - EINVAL - *name* pointer is NULL.
+ */
+__rte_experimental
+struct rte_stack *
+rte_stack_lookup(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_STACK_H_ */
diff --git a/src/spdk/dpdk/lib/librte_stack/rte_stack_lf.c b/src/spdk/dpdk/lib/librte_stack/rte_stack_lf.c
new file mode 100644
index 000000000..0adcc263e
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_stack/rte_stack_lf.c
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include "rte_stack.h"
+
+void
+rte_stack_lf_init(struct rte_stack *s, unsigned int count)
+{
+ struct rte_stack_lf_elem *elems = s->stack_lf.elems;
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ __rte_stack_lf_push_elems(&s->stack_lf.free,
+ &elems[i], &elems[i], 1);
+}
+
+ssize_t
+rte_stack_lf_get_memsize(unsigned int count)
+{
+ ssize_t sz = sizeof(struct rte_stack);
+
+ sz += RTE_CACHE_LINE_ROUNDUP(count * sizeof(struct rte_stack_lf_elem));
+
+ /* Add padding to avoid false sharing conflicts caused by
+ * next-line hardware prefetchers.
+ */
+ sz += 2 * RTE_CACHE_LINE_SIZE;
+
+ return sz;
+}
diff --git a/src/spdk/dpdk/lib/librte_stack/rte_stack_lf.h b/src/spdk/dpdk/lib/librte_stack/rte_stack_lf.h
new file mode 100644
index 000000000..e67630c27
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_stack/rte_stack_lf.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _RTE_STACK_LF_H_
+#define _RTE_STACK_LF_H_
+
+#if !(defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_ARM64))
+#include "rte_stack_lf_stubs.h"
+#else
+#ifdef RTE_USE_C11_MEM_MODEL
+#include "rte_stack_lf_c11.h"
+#else
+#include "rte_stack_lf_generic.h"
+#endif
+#endif
+
+/**
+ * @internal Push several objects on the lock-free stack (MT-safe).
+ *
+ * @param s
+ * A pointer to the stack structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to push on the stack from the obj_table.
+ * @return
+ * Actual number of objects enqueued.
+ */
+__rte_experimental
+static __rte_always_inline unsigned int
+__rte_stack_lf_push(struct rte_stack *s,
+ void * const *obj_table,
+ unsigned int n)
+{
+ struct rte_stack_lf_elem *tmp, *first, *last = NULL;
+ unsigned int i;
+
+ if (unlikely(n == 0))
+ return 0;
+
+ /* Pop n free elements */
+ first = __rte_stack_lf_pop_elems(&s->stack_lf.free, n, NULL, &last);
+ if (unlikely(first == NULL))
+ return 0;
+
+ /* Construct the list elements */
+ for (tmp = first, i = 0; i < n; i++, tmp = tmp->next)
+ tmp->data = obj_table[n - i - 1];
+
+ /* Push them to the used list */
+ __rte_stack_lf_push_elems(&s->stack_lf.used, first, last, n);
+
+ return n;
+}
+
+/**
+ * @internal Pop several objects from the lock-free stack (MT-safe).
+ *
+ * @param s
+ * A pointer to the stack structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to pull from the stack.
+ * @return
+ * - Actual number of objects popped.
+ */
+__rte_experimental
+static __rte_always_inline unsigned int
+__rte_stack_lf_pop(struct rte_stack *s, void **obj_table, unsigned int n)
+{
+ struct rte_stack_lf_elem *first, *last = NULL;
+
+ if (unlikely(n == 0))
+ return 0;
+
+ /* Pop n used elements */
+ first = __rte_stack_lf_pop_elems(&s->stack_lf.used,
+ n, obj_table, &last);
+ if (unlikely(first == NULL))
+ return 0;
+
+ /* Push the list elements to the free list */
+ __rte_stack_lf_push_elems(&s->stack_lf.free, first, last, n);
+
+ return n;
+}
+
+/**
+ * @internal Initialize a lock-free stack.
+ *
+ * @param s
+ * A pointer to the stack structure.
+ * @param count
+ * The size of the stack.
+ */
+void
+rte_stack_lf_init(struct rte_stack *s, unsigned int count);
+
+/**
+ * @internal Return the memory required for a lock-free stack.
+ *
+ * @param count
+ * The size of the stack.
+ * @return
+ * The bytes to allocate for a lock-free stack.
+ */
+ssize_t
+rte_stack_lf_get_memsize(unsigned int count);
+
+#endif /* _RTE_STACK_LF_H_ */
diff --git a/src/spdk/dpdk/lib/librte_stack/rte_stack_lf_c11.h b/src/spdk/dpdk/lib/librte_stack/rte_stack_lf_c11.h
new file mode 100644
index 000000000..999359f08
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_stack/rte_stack_lf_c11.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _RTE_STACK_LF_C11_H_
+#define _RTE_STACK_LF_C11_H_
+
+#include <rte_branch_prediction.h>
+#include <rte_prefetch.h>
+
+static __rte_always_inline unsigned int
+__rte_stack_lf_count(struct rte_stack *s)
+{
+ /* stack_lf_push() and stack_lf_pop() do not update the list's contents
+ * and stack_lf->len atomically, which can cause the list to appear
+ * shorter than it actually is if this function is called while other
+ * threads are modifying the list.
+ *
+ * However, given the inherently approximate nature of the get_count
+ * callback -- even if the list and its size were updated atomically,
+ * the size could change between when get_count executes and when the
+ * value is returned to the caller -- this is acceptable.
+ *
+ * The stack_lf->len updates are placed such that the list may appear to
+ * have fewer elements than it does, but will never appear to have more
+ * elements. If the mempool is near-empty to the point that this is a
+ * concern, the user should consider increasing the mempool size.
+ */
+ return (unsigned int)__atomic_load_n(&s->stack_lf.used.len,
+ __ATOMIC_RELAXED);
+}
+
+static __rte_always_inline void
+__rte_stack_lf_push_elems(struct rte_stack_lf_list *list,
+ struct rte_stack_lf_elem *first,
+ struct rte_stack_lf_elem *last,
+ unsigned int num)
+{
+ struct rte_stack_lf_head old_head;
+ int success;
+
+ old_head = list->head;
+
+ do {
+ struct rte_stack_lf_head new_head;
+
+ /* Use an acquire fence to establish a synchronized-with
+ * relationship between the list->head load and store-release
+ * operations (as part of the rte_atomic128_cmp_exchange()).
+ */
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+
+ /* Swing the top pointer to the first element in the list and
+ * make the last element point to the old top.
+ */
+ new_head.top = first;
+ new_head.cnt = old_head.cnt + 1;
+
+ last->next = old_head.top;
+
+ /* Use the release memmodel to ensure the writes to the LF LIFO
+ * elements are visible before the head pointer write.
+ */
+ success = rte_atomic128_cmp_exchange(
+ (rte_int128_t *)&list->head,
+ (rte_int128_t *)&old_head,
+ (rte_int128_t *)&new_head,
+ 1, __ATOMIC_RELEASE,
+ __ATOMIC_RELAXED);
+ } while (success == 0);
+
+ /* Ensure the stack modifications are not reordered with respect
+ * to the LIFO len update.
+ */
+ __atomic_add_fetch(&list->len, num, __ATOMIC_RELEASE);
+}
+
+static __rte_always_inline struct rte_stack_lf_elem *
+__rte_stack_lf_pop_elems(struct rte_stack_lf_list *list,
+ unsigned int num,
+ void **obj_table,
+ struct rte_stack_lf_elem **last)
+{
+ struct rte_stack_lf_head old_head;
+ uint64_t len;
+ int success;
+
+ /* Reserve num elements, if available */
+ len = __atomic_load_n(&list->len, __ATOMIC_ACQUIRE);
+
+ while (1) {
+ /* Does the list contain enough elements? */
+ if (unlikely(len < num))
+ return NULL;
+
+ /* len is updated on failure */
+ if (__atomic_compare_exchange_n(&list->len,
+ &len, len - num,
+ 0, __ATOMIC_ACQUIRE,
+ __ATOMIC_ACQUIRE))
+ break;
+ }
+
+ /* If a torn read occurs, the CAS will fail and set old_head to the
+ * correct/latest value.
+ */
+ old_head = list->head;
+
+ /* Pop num elements */
+ do {
+ struct rte_stack_lf_head new_head;
+ struct rte_stack_lf_elem *tmp;
+ unsigned int i;
+
+ /* Use the acquire memmodel to ensure the reads to the LF LIFO
+ * elements are properly ordered with respect to the head
+ * pointer read.
+ */
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+
+ rte_prefetch0(old_head.top);
+
+ tmp = old_head.top;
+
+ /* Traverse the list to find the new head. A next pointer will
+ * either point to another element or NULL; if a thread
+ * encounters a pointer that has already been popped, the CAS
+ * will fail.
+ */
+ for (i = 0; i < num && tmp != NULL; i++) {
+ rte_prefetch0(tmp->next);
+ if (obj_table)
+ obj_table[i] = tmp->data;
+ if (last)
+ *last = tmp;
+ tmp = tmp->next;
+ }
+
+ /* If NULL was encountered, the list was modified while
+ * traversing it. Retry.
+ */
+ if (i != num)
+ continue;
+
+ new_head.top = tmp;
+ new_head.cnt = old_head.cnt + 1;
+
+ success = rte_atomic128_cmp_exchange(
+ (rte_int128_t *)&list->head,
+ (rte_int128_t *)&old_head,
+ (rte_int128_t *)&new_head,
+ 1, __ATOMIC_RELEASE,
+ __ATOMIC_RELAXED);
+ } while (success == 0);
+
+ return old_head.top;
+}
+
+#endif /* _RTE_STACK_LF_C11_H_ */
diff --git a/src/spdk/dpdk/lib/librte_stack/rte_stack_lf_generic.h b/src/spdk/dpdk/lib/librte_stack/rte_stack_lf_generic.h
new file mode 100644
index 000000000..3abbb5342
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_stack/rte_stack_lf_generic.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _RTE_STACK_LF_GENERIC_H_
+#define _RTE_STACK_LF_GENERIC_H_
+
+#include <rte_branch_prediction.h>
+#include <rte_prefetch.h>
+
+static __rte_always_inline unsigned int
+__rte_stack_lf_count(struct rte_stack *s)
+{
+ /* stack_lf_push() and stack_lf_pop() do not update the list's contents
+ * and stack_lf->len atomically, which can cause the list to appear
+ * shorter than it actually is if this function is called while other
+ * threads are modifying the list.
+ *
+ * However, given the inherently approximate nature of the get_count
+ * callback -- even if the list and its size were updated atomically,
+ * the size could change between when get_count executes and when the
+ * value is returned to the caller -- this is acceptable.
+ *
+ * The stack_lf->len updates are placed such that the list may appear to
+ * have fewer elements than it does, but will never appear to have more
+ * elements. If the mempool is near-empty to the point that this is a
+ * concern, the user should consider increasing the mempool size.
+ */
+ return (unsigned int)rte_atomic64_read((rte_atomic64_t *)
+ &s->stack_lf.used.len);
+}
+
+static __rte_always_inline void
+__rte_stack_lf_push_elems(struct rte_stack_lf_list *list,
+ struct rte_stack_lf_elem *first,
+ struct rte_stack_lf_elem *last,
+ unsigned int num)
+{
+ struct rte_stack_lf_head old_head;
+ int success;
+
+ old_head = list->head;
+
+ do {
+ struct rte_stack_lf_head new_head;
+
+ /* An acquire fence (or stronger) is needed for weak memory
+ * models to establish a synchronized-with relationship between
+ * the list->head load and store-release operations (as part of
+ * the rte_atomic128_cmp_exchange()).
+ */
+ rte_smp_mb();
+
+ /* Swing the top pointer to the first element in the list and
+ * make the last element point to the old top.
+ */
+ new_head.top = first;
+ new_head.cnt = old_head.cnt + 1;
+
+ last->next = old_head.top;
+
+ /* old_head is updated on failure */
+ success = rte_atomic128_cmp_exchange(
+ (rte_int128_t *)&list->head,
+ (rte_int128_t *)&old_head,
+ (rte_int128_t *)&new_head,
+ 1, __ATOMIC_RELEASE,
+ __ATOMIC_RELAXED);
+ } while (success == 0);
+
+ rte_atomic64_add((rte_atomic64_t *)&list->len, num);
+}
+
+static __rte_always_inline struct rte_stack_lf_elem *
+__rte_stack_lf_pop_elems(struct rte_stack_lf_list *list,
+ unsigned int num,
+ void **obj_table,
+ struct rte_stack_lf_elem **last)
+{
+ struct rte_stack_lf_head old_head;
+ int success;
+
+ /* Reserve num elements, if available */
+ while (1) {
+ uint64_t len = rte_atomic64_read((rte_atomic64_t *)&list->len);
+
+ /* Does the list contain enough elements? */
+ if (unlikely(len < num))
+ return NULL;
+
+ if (rte_atomic64_cmpset((volatile uint64_t *)&list->len,
+ len, len - num))
+ break;
+ }
+
+ old_head = list->head;
+
+ /* Pop num elements */
+ do {
+ struct rte_stack_lf_head new_head;
+ struct rte_stack_lf_elem *tmp;
+ unsigned int i;
+
+ /* An acquire fence (or stronger) is needed for weak memory
+ * models to ensure the LF LIFO element reads are properly
+ * ordered with respect to the head pointer read.
+ */
+ rte_smp_mb();
+
+ rte_prefetch0(old_head.top);
+
+ tmp = old_head.top;
+
+ /* Traverse the list to find the new head. A next pointer will
+ * either point to another element or NULL; if a thread
+ * encounters a pointer that has already been popped, the CAS
+ * will fail.
+ */
+ for (i = 0; i < num && tmp != NULL; i++) {
+ rte_prefetch0(tmp->next);
+ if (obj_table)
+ obj_table[i] = tmp->data;
+ if (last)
+ *last = tmp;
+ tmp = tmp->next;
+ }
+
+ /* If NULL was encountered, the list was modified while
+ * traversing it. Retry.
+ */
+ if (i != num)
+ continue;
+
+ new_head.top = tmp;
+ new_head.cnt = old_head.cnt + 1;
+
+ /* old_head is updated on failure */
+ success = rte_atomic128_cmp_exchange(
+ (rte_int128_t *)&list->head,
+ (rte_int128_t *)&old_head,
+ (rte_int128_t *)&new_head,
+ 1, __ATOMIC_RELEASE,
+ __ATOMIC_RELAXED);
+ } while (success == 0);
+
+ return old_head.top;
+}
+
+#endif /* _RTE_STACK_LF_GENERIC_H_ */
diff --git a/src/spdk/dpdk/lib/librte_stack/rte_stack_lf_stubs.h b/src/spdk/dpdk/lib/librte_stack/rte_stack_lf_stubs.h
new file mode 100644
index 000000000..a05abf1f1
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_stack/rte_stack_lf_stubs.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Arm Limited
+ */
+
+#ifndef _RTE_STACK_LF_STUBS_H_
+#define _RTE_STACK_LF_STUBS_H_
+
+#include <rte_common.h>
+
+static __rte_always_inline unsigned int
+__rte_stack_lf_count(struct rte_stack *s)
+{
+ RTE_SET_USED(s);
+
+ return 0;
+}
+
+static __rte_always_inline void
+__rte_stack_lf_push_elems(struct rte_stack_lf_list *list,
+ struct rte_stack_lf_elem *first,
+ struct rte_stack_lf_elem *last,
+ unsigned int num)
+{
+ RTE_SET_USED(first);
+ RTE_SET_USED(last);
+ RTE_SET_USED(list);
+ RTE_SET_USED(num);
+}
+
+static __rte_always_inline struct rte_stack_lf_elem *
+__rte_stack_lf_pop_elems(struct rte_stack_lf_list *list,
+ unsigned int num,
+ void **obj_table,
+ struct rte_stack_lf_elem **last)
+{
+ RTE_SET_USED(obj_table);
+ RTE_SET_USED(last);
+ RTE_SET_USED(list);
+ RTE_SET_USED(num);
+
+ return NULL;
+}
+
+#endif /* _RTE_STACK_LF_STUBS_H_ */
diff --git a/src/spdk/dpdk/lib/librte_stack/rte_stack_std.c b/src/spdk/dpdk/lib/librte_stack/rte_stack_std.c
new file mode 100644
index 000000000..0a310d7c6
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_stack/rte_stack_std.c
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include "rte_stack.h"
+
+void
+rte_stack_std_init(struct rte_stack *s)
+{
+ rte_spinlock_init(&s->stack_std.lock);
+}
+
+ssize_t
+rte_stack_std_get_memsize(unsigned int count)
+{
+ ssize_t sz = sizeof(struct rte_stack);
+
+ sz += RTE_CACHE_LINE_ROUNDUP(count * sizeof(void *));
+
+ /* Add padding to avoid false sharing conflicts caused by
+ * next-line hardware prefetchers.
+ */
+ sz += 2 * RTE_CACHE_LINE_SIZE;
+
+ return sz;
+}
diff --git a/src/spdk/dpdk/lib/librte_stack/rte_stack_std.h b/src/spdk/dpdk/lib/librte_stack/rte_stack_std.h
new file mode 100644
index 000000000..7142cbf8e
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_stack/rte_stack_std.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _RTE_STACK_STD_H_
+#define _RTE_STACK_STD_H_
+
+#include <rte_branch_prediction.h>
+
+/**
+ * @internal Push several objects on the stack (MT-safe).
+ *
+ * @param s
+ * A pointer to the stack structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to push on the stack from the obj_table.
+ * @return
+ * Actual number of objects pushed (either 0 or *n*).
+ */
+__rte_experimental
+static __rte_always_inline unsigned int
+__rte_stack_std_push(struct rte_stack *s, void * const *obj_table,
+ unsigned int n)
+{
+ struct rte_stack_std *stack = &s->stack_std;
+ unsigned int index;
+ void **cache_objs;
+
+ rte_spinlock_lock(&stack->lock);
+ cache_objs = &stack->objs[stack->len];
+
+ /* Is there sufficient space in the stack? */
+ if ((stack->len + n) > s->capacity) {
+ rte_spinlock_unlock(&stack->lock);
+ return 0;
+ }
+
+ /* Add elements back into the cache */
+ for (index = 0; index < n; ++index, obj_table++)
+ cache_objs[index] = *obj_table;
+
+ stack->len += n;
+
+ rte_spinlock_unlock(&stack->lock);
+ return n;
+}
+
+/**
+ * @internal Pop several objects from the stack (MT-safe).
+ *
+ * @param s
+ * A pointer to the stack structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to pull from the stack.
+ * @return
+ * Actual number of objects popped (either 0 or *n*).
+ */
+__rte_experimental
+static __rte_always_inline unsigned int
+__rte_stack_std_pop(struct rte_stack *s, void **obj_table, unsigned int n)
+{
+ struct rte_stack_std *stack = &s->stack_std;
+ unsigned int index, len;
+ void **cache_objs;
+
+ rte_spinlock_lock(&stack->lock);
+
+ if (unlikely(n > stack->len)) {
+ rte_spinlock_unlock(&stack->lock);
+ return 0;
+ }
+
+ cache_objs = stack->objs;
+
+ for (index = 0, len = stack->len - 1; index < n;
+ ++index, len--, obj_table++)
+ *obj_table = cache_objs[len];
+
+ stack->len -= n;
+ rte_spinlock_unlock(&stack->lock);
+
+ return n;
+}
+
+/**
+ * @internal Return the number of used entries in a stack.
+ *
+ * @param s
+ * A pointer to the stack structure.
+ * @return
+ * The number of used entries in the stack.
+ */
+__rte_experimental
+static __rte_always_inline unsigned int
+__rte_stack_std_count(struct rte_stack *s)
+{
+ return (unsigned int)s->stack_std.len;
+}
+
+/**
+ * @internal Initialize a standard stack.
+ *
+ * @param s
+ * A pointer to the stack structure.
+ */
+void
+rte_stack_std_init(struct rte_stack *s);
+
+/**
+ * @internal Return the memory required for a standard stack.
+ *
+ * @param count
+ * The size of the stack.
+ * @return
+ * The bytes to allocate for a standard stack.
+ */
+ssize_t
+rte_stack_std_get_memsize(unsigned int count);
+
+#endif /* _RTE_STACK_STD_H_ */
diff --git a/src/spdk/dpdk/lib/librte_stack/rte_stack_version.map b/src/spdk/dpdk/lib/librte_stack/rte_stack_version.map
new file mode 100644
index 000000000..6662679c3
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_stack/rte_stack_version.map
@@ -0,0 +1,9 @@
+EXPERIMENTAL {
+ global:
+
+ rte_stack_create;
+ rte_stack_free;
+ rte_stack_lookup;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/lib/librte_stack/stack_pvt.h b/src/spdk/dpdk/lib/librte_stack/stack_pvt.h
new file mode 100644
index 000000000..ecf40819c
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_stack/stack_pvt.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _STACK_PVT_H_
+#define _STACK_PVT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_log.h>
+
+extern int stack_logtype;
+
+#define STACK_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ##level, stack_logtype, "%s(): "fmt "\n", \
+ __func__, ##args)
+
+#define STACK_LOG_ERR(fmt, args...) \
+ STACK_LOG(ERR, fmt, ## args)
+
+#define STACK_LOG_WARN(fmt, args...) \
+ STACK_LOG(WARNING, fmt, ## args)
+
+#define STACK_LOG_INFO(fmt, args...) \
+ STACK_LOG(INFO, fmt, ## args)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STACK_PVT_H_ */