summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/lib/librte_eal/include/generic
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
commit19fcec84d8d7d21e796c7624e521b60d28ee21ed (patch)
tree42d26aa27d1e3f7c0b8bd3fd14e7d7082f5008dc /src/spdk/dpdk/lib/librte_eal/include/generic
parentInitial commit. (diff)
downloadceph-upstream.tar.xz
ceph-upstream.zip
Adding upstream version 16.2.11+ds.upstream/16.2.11+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/lib/librte_eal/include/generic')
-rw-r--r--src/spdk/dpdk/lib/librte_eal/include/generic/rte_atomic.h1150
-rw-r--r--src/spdk/dpdk/lib/librte_eal/include/generic/rte_byteorder.h247
-rw-r--r--src/spdk/dpdk/lib/librte_eal/include/generic/rte_cpuflags.h79
-rw-r--r--src/spdk/dpdk/lib/librte_eal/include/generic/rte_cycles.h181
-rw-r--r--src/spdk/dpdk/lib/librte_eal/include/generic/rte_io.h350
-rw-r--r--src/spdk/dpdk/lib/librte_eal/include/generic/rte_mcslock.h179
-rw-r--r--src/spdk/dpdk/lib/librte_eal/include/generic/rte_memcpy.h112
-rw-r--r--src/spdk/dpdk/lib/librte_eal/include/generic/rte_pause.h128
-rw-r--r--src/spdk/dpdk/lib/librte_eal/include/generic/rte_prefetch.h54
-rw-r--r--src/spdk/dpdk/lib/librte_eal/include/generic/rte_rwlock.h239
-rw-r--r--src/spdk/dpdk/lib/librte_eal/include/generic/rte_spinlock.h305
-rw-r--r--src/spdk/dpdk/lib/librte_eal/include/generic/rte_ticketlock.h223
-rw-r--r--src/spdk/dpdk/lib/librte_eal/include/generic/rte_vect.h186
13 files changed, 3433 insertions, 0 deletions
diff --git a/src/spdk/dpdk/lib/librte_eal/include/generic/rte_atomic.h b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_atomic.h
new file mode 100644
index 000000000..e6ab15a97
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_atomic.h
@@ -0,0 +1,1150 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _RTE_ATOMIC_H_
+#define _RTE_ATOMIC_H_
+
+/**
+ * @file
+ * Atomic Operations
+ *
+ * This file defines a generic API for atomic operations.
+ */
+
+#include <stdint.h>
+#include <rte_common.h>
+
+#ifdef __DOXYGEN__
+
+/** @name Memory Barrier
+ */
+///@{
+/**
+ * General memory barrier.
+ *
+ * Guarantees that the LOAD and STORE operations generated before the
+ * barrier occur before the LOAD and STORE operations generated after.
+ */
+static inline void rte_mb(void);
+
+/**
+ * Write memory barrier.
+ *
+ * Guarantees that the STORE operations generated before the barrier
+ * occur before the STORE operations generated after.
+ */
+static inline void rte_wmb(void);
+
+/**
+ * Read memory barrier.
+ *
+ * Guarantees that the LOAD operations generated before the barrier
+ * occur before the LOAD operations generated after.
+ */
+static inline void rte_rmb(void);
+///@}
+
+/** @name SMP Memory Barrier
+ */
+///@{
+/**
+ * General memory barrier between lcores
+ *
+ * Guarantees that the LOAD and STORE operations that precede the
+ * rte_smp_mb() call are globally visible across the lcores
+ * before the LOAD and STORE operations that follows it.
+ */
+static inline void rte_smp_mb(void);
+
+/**
+ * Write memory barrier between lcores
+ *
+ * Guarantees that the STORE operations that precede the
+ * rte_smp_wmb() call are globally visible across the lcores
+ * before the STORE operations that follows it.
+ */
+static inline void rte_smp_wmb(void);
+
+/**
+ * Read memory barrier between lcores
+ *
+ * Guarantees that the LOAD operations that precede the
+ * rte_smp_rmb() call are globally visible across the lcores
+ * before the LOAD operations that follows it.
+ */
+static inline void rte_smp_rmb(void);
+///@}
+
+/** @name I/O Memory Barrier
+ */
+///@{
+/**
+ * General memory barrier for I/O device
+ *
+ * Guarantees that the LOAD and STORE operations that precede the
+ * rte_io_mb() call are visible to I/O device or CPU before the
+ * LOAD and STORE operations that follow it.
+ */
+static inline void rte_io_mb(void);
+
+/**
+ * Write memory barrier for I/O device
+ *
+ * Guarantees that the STORE operations that precede the
+ * rte_io_wmb() call are visible to I/O device before the STORE
+ * operations that follow it.
+ */
+static inline void rte_io_wmb(void);
+
+/**
+ * Read memory barrier for IO device
+ *
+ * Guarantees that the LOAD operations on I/O device that precede the
+ * rte_io_rmb() call are visible to CPU before the LOAD
+ * operations that follow it.
+ */
+static inline void rte_io_rmb(void);
+///@}
+
+/** @name Coherent I/O Memory Barrier
+ *
+ * Coherent I/O memory barrier is a lightweight version of I/O memory
+ * barriers which are system-wide data synchronization barriers. This
+ * is for only coherent memory domain between lcore and I/O device but
+ * it is same as the I/O memory barriers in most of architectures.
+ * However, some architecture provides even lighter barriers which are
+ * somewhere in between I/O memory barriers and SMP memory barriers.
+ * For example, in case of ARMv8, DMB(data memory barrier) instruction
+ * can have different shareability domains - inner-shareable and
+ * outer-shareable. And inner-shareable DMB fits for SMP memory
+ * barriers and outer-shareable DMB for coherent I/O memory barriers,
+ * which acts on coherent memory.
+ *
+ * In most cases, I/O memory barriers are safer but if operations are
+ * on coherent memory instead of incoherent MMIO region of a device,
+ * then coherent I/O memory barriers can be used and this could bring
+ * performance gain depending on architectures.
+ */
+///@{
+/**
+ * Write memory barrier for coherent memory between lcore and I/O device
+ *
+ * Guarantees that the STORE operations on coherent memory that
+ * precede the rte_cio_wmb() call are visible to I/O device before the
+ * STORE operations that follow it.
+ */
+static inline void rte_cio_wmb(void);
+
+/**
+ * Read memory barrier for coherent memory between lcore and I/O device
+ *
+ * Guarantees that the LOAD operations on coherent memory updated by
+ * I/O device that precede the rte_cio_rmb() call are visible to CPU
+ * before the LOAD operations that follow it.
+ */
+static inline void rte_cio_rmb(void);
+///@}
+
+#endif /* __DOXYGEN__ */
+
+/**
+ * Compiler barrier.
+ *
+ * Guarantees that operation reordering does not occur at compile time
+ * for operations directly before and after the barrier.
+ */
+#define rte_compiler_barrier() do { \
+ asm volatile ("" : : : "memory"); \
+} while(0)
+
+/*------------------------- 16 bit atomic operations -------------------------*/
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ * if (*dst == exp)
+ * *dst = src (all 16-bit words)
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param exp
+ * The expected value.
+ * @param src
+ * The new value.
+ * @return
+ * Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int
+rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
+{
+ return __sync_bool_compare_and_swap(dst, exp, src);
+}
+#endif
+
+/**
+ * Atomic exchange.
+ *
+ * (atomic) equivalent to:
+ * ret = *dst
+ * *dst = val;
+ * return ret;
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param val
+ * The new value.
+ * @return
+ * The original value at that location
+ */
+static inline uint16_t
+rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline uint16_t
+rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
+{
+#if defined(__clang__)
+ return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
+#else
+ return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
+#endif
+}
+#endif
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+ volatile int16_t cnt; /**< An internal counter value. */
+} rte_atomic16_t;
+
+/**
+ * Static initializer for an atomic counter.
+ */
+#define RTE_ATOMIC16_INIT(val) { (val) }
+
+/**
+ * Initialize an atomic counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic16_init(rte_atomic16_t *v)
+{
+ v->cnt = 0;
+}
+
+/**
+ * Atomically read a 16-bit value from a counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * The value of the counter.
+ */
+static inline int16_t
+rte_atomic16_read(const rte_atomic16_t *v)
+{
+ return v->cnt;
+}
+
+/**
+ * Atomically set a counter to a 16-bit value.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param new_value
+ * The new value for the counter.
+ */
+static inline void
+rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
+{
+ v->cnt = new_value;
+}
+
+/**
+ * Atomically add a 16-bit value to an atomic counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param inc
+ * The value to be added to the counter.
+ */
+static inline void
+rte_atomic16_add(rte_atomic16_t *v, int16_t inc)
+{
+ __sync_fetch_and_add(&v->cnt, inc);
+}
+
+/**
+ * Atomically subtract a 16-bit value from an atomic counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param dec
+ * The value to be subtracted from the counter.
+ */
+static inline void
+rte_atomic16_sub(rte_atomic16_t *v, int16_t dec)
+{
+ __sync_fetch_and_sub(&v->cnt, dec);
+}
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic16_inc(rte_atomic16_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic16_inc(rte_atomic16_t *v)
+{
+ rte_atomic16_add(v, 1);
+}
+#endif
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic16_dec(rte_atomic16_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic16_dec(rte_atomic16_t *v)
+{
+ rte_atomic16_sub(v, 1);
+}
+#endif
+
+/**
+ * Atomically add a 16-bit value to a counter and return the result.
+ *
+ * Atomically adds the 16-bits value (inc) to the atomic counter (v) and
+ * returns the value of v after addition.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param inc
+ * The value to be added to the counter.
+ * @return
+ * The value of v after the addition.
+ */
+static inline int16_t
+rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
+{
+ return __sync_add_and_fetch(&v->cnt, inc);
+}
+
+/**
+ * Atomically subtract a 16-bit value from a counter and return
+ * the result.
+ *
+ * Atomically subtracts the 16-bit value (inc) from the atomic counter
+ * (v) and returns the value of v after the subtraction.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param dec
+ * The value to be subtracted from the counter.
+ * @return
+ * The value of v after the subtraction.
+ */
+static inline int16_t
+rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec)
+{
+ return __sync_sub_and_fetch(&v->cnt, dec);
+}
+
+/**
+ * Atomically increment a 16-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
+{
+ return __sync_add_and_fetch(&v->cnt, 1) == 0;
+}
+#endif
+
+/**
+ * Atomically decrement a 16-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
+{
+ return __sync_sub_and_fetch(&v->cnt, 1) == 0;
+}
+#endif
+
+/**
+ * Atomically test and set a 16-bit atomic counter.
+ *
+ * If the counter value is already set, return 0 (failed). Otherwise, set
+ * the counter value to 1 and return 1 (success).
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * 0 if failed; else 1, success.
+ */
+static inline int rte_atomic16_test_and_set(rte_atomic16_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
+{
+ return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
+}
+#endif
+
+/**
+ * Atomically set a 16-bit counter to 0.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void rte_atomic16_clear(rte_atomic16_t *v)
+{
+ v->cnt = 0;
+}
+
+/*------------------------- 32 bit atomic operations -------------------------*/
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ * if (*dst == exp)
+ * *dst = src (all 32-bit words)
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param exp
+ * The expected value.
+ * @param src
+ * The new value.
+ * @return
+ * Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int
+rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
+{
+ return __sync_bool_compare_and_swap(dst, exp, src);
+}
+#endif
+
+/**
+ * Atomic exchange.
+ *
+ * (atomic) equivalent to:
+ * ret = *dst
+ * *dst = val;
+ * return ret;
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param val
+ * The new value.
+ * @return
+ * The original value at that location
+ */
+static inline uint32_t
+rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline uint32_t
+rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
+{
+#if defined(__clang__)
+ return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
+#else
+ return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
+#endif
+}
+#endif
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+ volatile int32_t cnt; /**< An internal counter value. */
+} rte_atomic32_t;
+
+/**
+ * Static initializer for an atomic counter.
+ */
+#define RTE_ATOMIC32_INIT(val) { (val) }
+
+/**
+ * Initialize an atomic counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic32_init(rte_atomic32_t *v)
+{
+ v->cnt = 0;
+}
+
+/**
+ * Atomically read a 32-bit value from a counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * The value of the counter.
+ */
+static inline int32_t
+rte_atomic32_read(const rte_atomic32_t *v)
+{
+ return v->cnt;
+}
+
+/**
+ * Atomically set a counter to a 32-bit value.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param new_value
+ * The new value for the counter.
+ */
+static inline void
+rte_atomic32_set(rte_atomic32_t *v, int32_t new_value)
+{
+ v->cnt = new_value;
+}
+
+/**
+ * Atomically add a 32-bit value to an atomic counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param inc
+ * The value to be added to the counter.
+ */
+static inline void
+rte_atomic32_add(rte_atomic32_t *v, int32_t inc)
+{
+ __sync_fetch_and_add(&v->cnt, inc);
+}
+
+/**
+ * Atomically subtract a 32-bit value from an atomic counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param dec
+ * The value to be subtracted from the counter.
+ */
+static inline void
+rte_atomic32_sub(rte_atomic32_t *v, int32_t dec)
+{
+ __sync_fetch_and_sub(&v->cnt, dec);
+}
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic32_inc(rte_atomic32_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic32_inc(rte_atomic32_t *v)
+{
+ rte_atomic32_add(v, 1);
+}
+#endif
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic32_dec(rte_atomic32_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic32_dec(rte_atomic32_t *v)
+{
+ rte_atomic32_sub(v,1);
+}
+#endif
+
+/**
+ * Atomically add a 32-bit value to a counter and return the result.
+ *
+ * Atomically adds the 32-bits value (inc) to the atomic counter (v) and
+ * returns the value of v after addition.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param inc
+ * The value to be added to the counter.
+ * @return
+ * The value of v after the addition.
+ */
+static inline int32_t
+rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc)
+{
+ return __sync_add_and_fetch(&v->cnt, inc);
+}
+
+/**
+ * Atomically subtract a 32-bit value from a counter and return
+ * the result.
+ *
+ * Atomically subtracts the 32-bit value (inc) from the atomic counter
+ * (v) and returns the value of v after the subtraction.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param dec
+ * The value to be subtracted from the counter.
+ * @return
+ * The value of v after the subtraction.
+ */
+static inline int32_t
+rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec)
+{
+ return __sync_sub_and_fetch(&v->cnt, dec);
+}
+
+/**
+ * Atomically increment a 32-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
+{
+ return __sync_add_and_fetch(&v->cnt, 1) == 0;
+}
+#endif
+
+/**
+ * Atomically decrement a 32-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
+{
+ return __sync_sub_and_fetch(&v->cnt, 1) == 0;
+}
+#endif
+
+/**
+ * Atomically test and set a 32-bit atomic counter.
+ *
+ * If the counter value is already set, return 0 (failed). Otherwise, set
+ * the counter value to 1 and return 1 (success).
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * 0 if failed; else 1, success.
+ */
+static inline int rte_atomic32_test_and_set(rte_atomic32_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
+{
+ return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
+}
+#endif
+
+/**
+ * Atomically set a 32-bit counter to 0.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void rte_atomic32_clear(rte_atomic32_t *v)
+{
+ v->cnt = 0;
+}
+
+/*------------------------- 64 bit atomic operations -------------------------*/
+
+/**
+ * An atomic compare and set function used by the mutex functions.
+ * (atomic) equivalent to:
+ * if (*dst == exp)
+ * *dst = src (all 64-bit words)
+ *
+ * @param dst
+ * The destination into which the value will be written.
+ * @param exp
+ * The expected value.
+ * @param src
+ * The new value.
+ * @return
+ * Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int
+rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
+{
+ return __sync_bool_compare_and_swap(dst, exp, src);
+}
+#endif
+
+/**
+ * Atomic exchange.
+ *
+ * (atomic) equivalent to:
+ * ret = *dst
+ * *dst = val;
+ * return ret;
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param val
+ * The new value.
+ * @return
+ * The original value at that location
+ */
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
+{
+#if defined(__clang__)
+ return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
+#else
+ return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST);
+#endif
+}
+#endif
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+ volatile int64_t cnt; /**< Internal counter value. */
+} rte_atomic64_t;
+
+/**
+ * Static initializer for an atomic counter.
+ */
+#define RTE_ATOMIC64_INIT(val) { (val) }
+
+/**
+ * Initialize the atomic counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic64_init(rte_atomic64_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic64_init(rte_atomic64_t *v)
+{
+#ifdef __LP64__
+ v->cnt = 0;
+#else
+ int success = 0;
+ uint64_t tmp;
+
+ while (success == 0) {
+ tmp = v->cnt;
+ success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+ tmp, 0);
+ }
+#endif
+}
+#endif
+
+/**
+ * Atomically read a 64-bit counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * The value of the counter.
+ */
+static inline int64_t
+rte_atomic64_read(rte_atomic64_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int64_t
+rte_atomic64_read(rte_atomic64_t *v)
+{
+#ifdef __LP64__
+ return v->cnt;
+#else
+ int success = 0;
+ uint64_t tmp;
+
+ while (success == 0) {
+ tmp = v->cnt;
+ /* replace the value by itself */
+ success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+ tmp, tmp);
+ }
+ return tmp;
+#endif
+}
+#endif
+
+/**
+ * Atomically set a 64-bit counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param new_value
+ * The new value of the counter.
+ */
+static inline void
+rte_atomic64_set(rte_atomic64_t *v, int64_t new_value);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
+{
+#ifdef __LP64__
+ v->cnt = new_value;
+#else
+ int success = 0;
+ uint64_t tmp;
+
+ while (success == 0) {
+ tmp = v->cnt;
+ success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+ tmp, new_value);
+ }
+#endif
+}
+#endif
+
+/**
+ * Atomically add a 64-bit value to a counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param inc
+ * The value to be added to the counter.
+ */
+static inline void
+rte_atomic64_add(rte_atomic64_t *v, int64_t inc);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
+{
+ __sync_fetch_and_add(&v->cnt, inc);
+}
+#endif
+
+/**
+ * Atomically subtract a 64-bit value from a counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param dec
+ * The value to be subtracted from the counter.
+ */
+static inline void
+rte_atomic64_sub(rte_atomic64_t *v, int64_t dec);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
+{
+ __sync_fetch_and_sub(&v->cnt, dec);
+}
+#endif
+
+/**
+ * Atomically increment a 64-bit counter by one and test.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic64_inc(rte_atomic64_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic64_inc(rte_atomic64_t *v)
+{
+ rte_atomic64_add(v, 1);
+}
+#endif
+
+/**
+ * Atomically decrement a 64-bit counter by one and test.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic64_dec(rte_atomic64_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic64_dec(rte_atomic64_t *v)
+{
+ rte_atomic64_sub(v, 1);
+}
+#endif
+
+/**
+ * Add a 64-bit value to an atomic counter and return the result.
+ *
+ * Atomically adds the 64-bit value (inc) to the atomic counter (v) and
+ * returns the value of v after the addition.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param inc
+ * The value to be added to the counter.
+ * @return
+ * The value of v after the addition.
+ */
+static inline int64_t
+rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int64_t
+rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
+{
+ return __sync_add_and_fetch(&v->cnt, inc);
+}
+#endif
+
+/**
+ * Subtract a 64-bit value from an atomic counter and return the result.
+ *
+ * Atomically subtracts the 64-bit value (dec) from the atomic counter (v)
+ * and returns the value of v after the subtraction.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param dec
+ * The value to be subtracted from the counter.
+ * @return
+ * The value of v after the subtraction.
+ */
+static inline int64_t
+rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int64_t
+rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
+{
+ return __sync_sub_and_fetch(&v->cnt, dec);
+}
+#endif
+
+/**
+ * Atomically increment a 64-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns
+ * true if the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the addition is 0; false otherwise.
+ */
+static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
+{
+ return rte_atomic64_add_return(v, 1) == 0;
+}
+#endif
+
+/**
+ * Atomically decrement a 64-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after subtraction is 0; false otherwise.
+ */
+static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
+{
+ return rte_atomic64_sub_return(v, 1) == 0;
+}
+#endif
+
+/**
+ * Atomically test and set a 64-bit atomic counter.
+ *
+ * If the counter value is already set, return 0 (failed). Otherwise, set
+ * the counter value to 1 and return 1 (success).
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * 0 if failed; else 1, success.
+ */
+static inline int rte_atomic64_test_and_set(rte_atomic64_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
+{
+ return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
+}
+#endif
+
+/**
+ * Atomically set a 64-bit counter to 0.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void rte_atomic64_clear(rte_atomic64_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void rte_atomic64_clear(rte_atomic64_t *v)
+{
+ rte_atomic64_set(v, 0);
+}
+#endif
+
+/*------------------------ 128 bit atomic operations -------------------------*/
+
+/**
+ * 128-bit integer structure.
+ */
+RTE_STD_C11
+typedef struct {
+ RTE_STD_C11
+ union {
+ uint64_t val[2];
+#ifdef RTE_ARCH_64
+ __extension__ __int128 int128;
+#endif
+ };
+} __rte_aligned(16) rte_int128_t;
+
+#ifdef __DOXYGEN__
+
+/**
+ * An atomic compare and set function used by the mutex functions.
+ * (Atomically) Equivalent to:
+ * @code
+ * if (*dst == *exp)
+ * *dst = *src
+ * else
+ * *exp = *dst
+ * @endcode
+ *
+ * @note This function is currently available for the x86-64 and aarch64
+ * platforms.
+ *
+ * @note The success and failure arguments must be one of the __ATOMIC_* values
+ * defined in the C++11 standard. For details on their behavior, refer to the
+ * standard.
+ *
+ * @param dst
+ * The destination into which the value will be written.
+ * @param exp
+ * Pointer to the expected value. If the operation fails, this memory is
+ * updated with the actual value.
+ * @param src
+ * Pointer to the new value.
+ * @param weak
+ * A value of true allows the comparison to spuriously fail and allows the
+ * 'exp' update to occur non-atomically (i.e. a torn read may occur).
+ * Implementations may ignore this argument and only implement the strong
+ * variant.
+ * @param success
+ * If successful, the operation's memory behavior conforms to this (or a
+ * stronger) model.
+ * @param failure
+ * If unsuccessful, the operation's memory behavior conforms to this (or a
+ * stronger) model. This argument cannot be __ATOMIC_RELEASE,
+ * __ATOMIC_ACQ_REL, or a stronger model than success.
+ * @return
+ * Non-zero on success; 0 on failure.
+ */
+__rte_experimental
+static inline int
+rte_atomic128_cmp_exchange(rte_int128_t *dst,
+ rte_int128_t *exp,
+ const rte_int128_t *src,
+ unsigned int weak,
+ int success,
+ int failure);
+
+#endif /* __DOXYGEN__ */
+
+#endif /* _RTE_ATOMIC_H_ */
diff --git a/src/spdk/dpdk/lib/librte_eal/include/generic/rte_byteorder.h b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_byteorder.h
new file mode 100644
index 000000000..9ca960932
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_byteorder.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _RTE_BYTEORDER_H_
+#define _RTE_BYTEORDER_H_
+
+/**
+ * @file
+ *
+ * Byte Swap Operations
+ *
+ * This file defines a generic API for byte swap operations. Part of
+ * the implementation is architecture-specific.
+ */
+
+#include <stdint.h>
+#ifdef RTE_EXEC_ENV_FREEBSD
+#include <sys/endian.h>
+#else
+#include <endian.h>
+#endif
+
+#include <rte_common.h>
+#include <rte_config.h>
+
+/*
+ * Compile-time endianness detection
+ */
+#define RTE_BIG_ENDIAN 1
+#define RTE_LITTLE_ENDIAN 2
+#if defined __BYTE_ORDER__
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define RTE_BYTE_ORDER RTE_BIG_ENDIAN
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN
+#endif /* __BYTE_ORDER__ */
+#elif defined __BYTE_ORDER
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define RTE_BYTE_ORDER RTE_BIG_ENDIAN
+#elif __BYTE_ORDER == __LITTLE_ENDIAN
+#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN
+#endif /* __BYTE_ORDER */
+#elif defined __BIG_ENDIAN__
+#define RTE_BYTE_ORDER RTE_BIG_ENDIAN
+#elif defined __LITTLE_ENDIAN__
+#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN
+#endif
+#if !defined(RTE_BYTE_ORDER)
+#error Unknown endianness.
+#endif
+
+#define RTE_STATIC_BSWAP16(v) \
+ ((((uint16_t)(v) & UINT16_C(0x00ff)) << 8) | \
+ (((uint16_t)(v) & UINT16_C(0xff00)) >> 8))
+
+#define RTE_STATIC_BSWAP32(v) \
+ ((((uint32_t)(v) & UINT32_C(0x000000ff)) << 24) | \
+ (((uint32_t)(v) & UINT32_C(0x0000ff00)) << 8) | \
+ (((uint32_t)(v) & UINT32_C(0x00ff0000)) >> 8) | \
+ (((uint32_t)(v) & UINT32_C(0xff000000)) >> 24))
+
+#define RTE_STATIC_BSWAP64(v) \
+ ((((uint64_t)(v) & UINT64_C(0x00000000000000ff)) << 56) | \
+ (((uint64_t)(v) & UINT64_C(0x000000000000ff00)) << 40) | \
+ (((uint64_t)(v) & UINT64_C(0x0000000000ff0000)) << 24) | \
+ (((uint64_t)(v) & UINT64_C(0x00000000ff000000)) << 8) | \
+ (((uint64_t)(v) & UINT64_C(0x000000ff00000000)) >> 8) | \
+ (((uint64_t)(v) & UINT64_C(0x0000ff0000000000)) >> 24) | \
+ (((uint64_t)(v) & UINT64_C(0x00ff000000000000)) >> 40) | \
+ (((uint64_t)(v) & UINT64_C(0xff00000000000000)) >> 56))
+
+/*
+ * These macros are functionally similar to rte_cpu_to_(be|le)(16|32|64)(),
+ * they take values in host CPU order and return them converted to the
+ * intended endianness.
+ *
+ * They resolve at compilation time to integer constants which can safely be
+ * used with static initializers, since those cannot involve function calls.
+ *
+ * On the other hand, they are not as optimized as their rte_cpu_to_*()
+ * counterparts, therefore applications should refrain from using them on
+ * variable values, particularly inside performance-sensitive code.
+ */
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#define RTE_BE16(v) (rte_be16_t)(v)
+#define RTE_BE32(v) (rte_be32_t)(v)
+#define RTE_BE64(v) (rte_be64_t)(v)
+#define RTE_LE16(v) (rte_le16_t)(RTE_STATIC_BSWAP16(v))
+#define RTE_LE32(v) (rte_le32_t)(RTE_STATIC_BSWAP32(v))
+#define RTE_LE64(v) (rte_le64_t)(RTE_STATIC_BSWAP64(v))
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define RTE_BE16(v) (rte_be16_t)(RTE_STATIC_BSWAP16(v))
+#define RTE_BE32(v) (rte_be32_t)(RTE_STATIC_BSWAP32(v))
+#define RTE_BE64(v) (rte_be64_t)(RTE_STATIC_BSWAP64(v))
+#define RTE_LE16(v) (rte_le16_t)(v)
+#define RTE_LE32(v) (rte_le32_t)(v)
+#define RTE_LE64(v) (rte_le64_t)(v)
+#else
+#error Unsupported endianness.
+#endif
+
+/*
+ * The following types should be used when handling values according to a
+ * specific byte ordering, which may differ from that of the host CPU.
+ *
+ * Libraries, public APIs and applications are encouraged to use them for
+ * documentation purposes.
+ */
+typedef uint16_t rte_be16_t; /**< 16-bit big-endian value. */
+typedef uint32_t rte_be32_t; /**< 32-bit big-endian value. */
+typedef uint64_t rte_be64_t; /**< 64-bit big-endian value. */
+typedef uint16_t rte_le16_t; /**< 16-bit little-endian value. */
+typedef uint32_t rte_le32_t; /**< 32-bit little-endian value. */
+typedef uint64_t rte_le64_t; /**< 64-bit little-endian value. */
+
+/*
+ * An internal function to swap bytes in a 16-bit value.
+ *
+ * It is used by rte_bswap16() when the value is constant. Do not use
+ * this function directly; rte_bswap16() is preferred.
+ */
+static inline uint16_t
+rte_constant_bswap16(uint16_t x)
+{
+ return (uint16_t)RTE_STATIC_BSWAP16(x);
+}
+
+/*
+ * An internal function to swap bytes in a 32-bit value.
+ *
+ * It is used by rte_bswap32() when the value is constant. Do not use
+ * this function directly; rte_bswap32() is preferred.
+ */
+static inline uint32_t
+rte_constant_bswap32(uint32_t x)
+{
+ return (uint32_t)RTE_STATIC_BSWAP32(x);
+}
+
+/*
+ * An internal function to swap bytes of a 64-bit value.
+ *
+ * It is used by rte_bswap64() when the value is constant. Do not use
+ * this function directly; rte_bswap64() is preferred.
+ */
+static inline uint64_t
+rte_constant_bswap64(uint64_t x)
+{
+ return (uint64_t)RTE_STATIC_BSWAP64(x);
+}
+
+
+#ifdef __DOXYGEN__
+
+/**
+ * Swap bytes in a 16-bit value.
+ */
+static uint16_t rte_bswap16(uint16_t _x);
+
+/**
+ * Swap bytes in a 32-bit value.
+ */
+static uint32_t rte_bswap32(uint32_t x);
+
+/**
+ * Swap bytes in a 64-bit value.
+ */
+static uint64_t rte_bswap64(uint64_t x);
+
+/**
+ * Convert a 16-bit value from CPU order to little endian.
+ */
+static rte_le16_t rte_cpu_to_le_16(uint16_t x);
+
+/**
+ * Convert a 32-bit value from CPU order to little endian.
+ */
+static rte_le32_t rte_cpu_to_le_32(uint32_t x);
+
+/**
+ * Convert a 64-bit value from CPU order to little endian.
+ */
+static rte_le64_t rte_cpu_to_le_64(uint64_t x);
+
+
+/**
+ * Convert a 16-bit value from CPU order to big endian.
+ */
+static rte_be16_t rte_cpu_to_be_16(uint16_t x);
+
+/**
+ * Convert a 32-bit value from CPU order to big endian.
+ */
+static rte_be32_t rte_cpu_to_be_32(uint32_t x);
+
+/**
+ * Convert a 64-bit value from CPU order to big endian.
+ */
+static rte_be64_t rte_cpu_to_be_64(uint64_t x);
+
+
+/**
+ * Convert a 16-bit value from little endian to CPU order.
+ */
+static uint16_t rte_le_to_cpu_16(rte_le16_t x);
+
+/**
+ * Convert a 32-bit value from little endian to CPU order.
+ */
+static uint32_t rte_le_to_cpu_32(rte_le32_t x);
+
+/**
+ * Convert a 64-bit value from little endian to CPU order.
+ */
+static uint64_t rte_le_to_cpu_64(rte_le64_t x);
+
+
+/**
+ * Convert a 16-bit value from big endian to CPU order.
+ */
+static uint16_t rte_be_to_cpu_16(rte_be16_t x);
+
+/**
+ * Convert a 32-bit value from big endian to CPU order.
+ */
+static uint32_t rte_be_to_cpu_32(rte_be32_t x);
+
+/**
+ * Convert a 64-bit value from big endian to CPU order.
+ */
+static uint64_t rte_be_to_cpu_64(rte_be64_t x);
+
+#endif /* __DOXYGEN__ */
+
+#ifdef RTE_FORCE_INTRINSICS
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+#define rte_bswap16(x) __builtin_bswap16(x)
+#endif
+
+#define rte_bswap32(x) __builtin_bswap32(x)
+
+#define rte_bswap64(x) __builtin_bswap64(x)
+
+#endif
+
+#endif /* _RTE_BYTEORDER_H_ */
diff --git a/src/spdk/dpdk/lib/librte_eal/include/generic/rte_cpuflags.h b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_cpuflags.h
new file mode 100644
index 000000000..872f0ebe3
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_cpuflags.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _RTE_CPUFLAGS_H_
+#define _RTE_CPUFLAGS_H_
+
+/**
+ * @file
+ * Architecture specific API to determine available CPU features at runtime.
+ */
+
+#include "rte_common.h"
+#include <errno.h>
+
+/**
+ * Enumeration of all CPU features supported
+ */
+__extension__
+enum rte_cpu_flag_t;
+
+/**
+ * Get name of CPU flag
+ *
+ * @param feature
+ * CPU flag ID
+ * @return
+ * flag name
+ * NULL if flag ID is invalid
+ */
+__extension__
+const char *
+rte_cpu_get_flag_name(enum rte_cpu_flag_t feature);
+
+/**
+ * Function for checking a CPU flag availability
+ *
+ * @param feature
+ * CPU flag to query CPU for
+ * @return
+ * 1 if flag is available
+ * 0 if flag is not available
+ * -ENOENT if flag is invalid
+ */
+__extension__
+int
+rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature);
+
+/**
+ * This function checks that the currently used CPU supports the CPU features
+ * that were specified at compile time. It is called automatically within the
+ * EAL, so does not need to be used by applications. This version returns a
+ * result so that decisions may be made (for instance, graceful shutdowns).
+ */
+int
+rte_cpu_is_supported(void);
+
+/**
+ * This function attempts to retrieve a value from the auxiliary vector.
+ * If it is unsuccessful, the result will be 0, and errno will be set.
+ *
+ * @return A value from the auxiliary vector. When the value is 0, check
+ * errno to determine if an error occurred.
+ */
+unsigned long
+rte_cpu_getauxval(unsigned long type);
+
+/**
+ * This function retrieves a value from the auxiliary vector, and compares it
+ * as a string against the value retrieved.
+ *
+ * @return The result of calling strcmp() against the value retrieved from
+ * the auxiliary vector. When the value is 0 (meaning a match is found),
+ * check errno to determine if an error occurred.
+ */
+int
+rte_cpu_strcmp_auxval(unsigned long type, const char *str);
+
+#endif /* _RTE_CPUFLAGS_H_ */
diff --git a/src/spdk/dpdk/lib/librte_eal/include/generic/rte_cycles.h b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_cycles.h
new file mode 100644
index 000000000..73d1fa7b9
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_cycles.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation.
+ * Copyright(c) 2013 6WIND S.A.
+ */
+
+#ifndef _RTE_CYCLES_H_
+#define _RTE_CYCLES_H_
+
+/**
+ * @file
+ *
+ * Simple Time Reference Functions (Cycles and HPET).
+ */
+
+#include <stdint.h>
+#include <rte_compat.h>
+#include <rte_debug.h>
+#include <rte_atomic.h>
+
+#define MS_PER_S 1000
+#define US_PER_S 1000000
+#define NS_PER_S 1000000000
+
+enum timer_source {
+ EAL_TIMER_TSC = 0,
+ EAL_TIMER_HPET
+};
+extern enum timer_source eal_timer_source;
+
+/**
+ * Get the measured frequency of the RDTSC counter
+ *
+ * @return
+ * The TSC frequency for this lcore
+ */
+uint64_t
+rte_get_tsc_hz(void);
+
+/**
+ * Return the number of TSC cycles since boot
+ *
+ * @return
+ * the number of cycles
+ */
+static inline uint64_t
+rte_get_tsc_cycles(void);
+
+#ifdef RTE_LIBEAL_USE_HPET
+/**
+ * Return the number of HPET cycles since boot
+ *
+ * This counter is global for all execution units. The number of
+ * cycles in one second can be retrieved using rte_get_hpet_hz().
+ *
+ * @return
+ * the number of cycles
+ */
+uint64_t
+rte_get_hpet_cycles(void);
+
+/**
+ * Get the number of HPET cycles in one second.
+ *
+ * @return
+ * The number of cycles in one second.
+ */
+uint64_t
+rte_get_hpet_hz(void);
+
+/**
+ * Initialise the HPET for use. This must be called before the rte_get_hpet_hz
+ * and rte_get_hpet_cycles APIs are called. If this function does not succeed,
+ * then the HPET functions are unavailable and should not be called.
+ *
+ * @param make_default
+ * If set, the hpet timer becomes the default timer whose values are
+ * returned by the rte_get_timer_hz/cycles API calls
+ *
+ * @return
+ * 0 on success,
+ * -1 on error, and the make_default parameter is ignored.
+ */
+int rte_eal_hpet_init(int make_default);
+
+#endif
+
+/**
+ * Get the number of cycles since boot from the default timer.
+ *
+ * @return
+ * The number of cycles
+ */
+static inline uint64_t
+rte_get_timer_cycles(void)
+{
+#ifdef RTE_LIBEAL_USE_HPET
+ switch(eal_timer_source) {
+ case EAL_TIMER_TSC:
+#endif
+ return rte_get_tsc_cycles();
+#ifdef RTE_LIBEAL_USE_HPET
+ case EAL_TIMER_HPET:
+ return rte_get_hpet_cycles();
+ default: rte_panic("Invalid timer source specified\n");
+ }
+#endif
+}
+
+/**
+ * Get the number of cycles in one second for the default timer.
+ *
+ * @return
+ * The number of cycles in one second.
+ */
+static inline uint64_t
+rte_get_timer_hz(void)
+{
+#ifdef RTE_LIBEAL_USE_HPET
+ switch(eal_timer_source) {
+ case EAL_TIMER_TSC:
+#endif
+ return rte_get_tsc_hz();
+#ifdef RTE_LIBEAL_USE_HPET
+ case EAL_TIMER_HPET:
+ return rte_get_hpet_hz();
+ default: rte_panic("Invalid timer source specified\n");
+ }
+#endif
+}
+/**
+ * Wait at least us microseconds.
+ * This function can be replaced with user-defined function.
+ * @see rte_delay_us_callback_register
+ *
+ * @param us
+ * The number of microseconds to wait.
+ */
+extern void
+(*rte_delay_us)(unsigned int us);
+
+/**
+ * Wait at least ms milliseconds.
+ *
+ * @param ms
+ * The number of milliseconds to wait.
+ */
+static inline void
+rte_delay_ms(unsigned ms)
+{
+ rte_delay_us(ms * 1000);
+}
+
+/**
+ * Blocking delay function.
+ *
+ * @param us
+ * Number of microseconds to wait.
+ */
+void rte_delay_us_block(unsigned int us);
+
+/**
+ * Delay function that uses system sleep.
+ * Does not block the CPU core.
+ *
+ * @param us
+ * Number of microseconds to wait.
+ */
+__rte_experimental
+void
+rte_delay_us_sleep(unsigned int us);
+
+/**
+ * Replace rte_delay_us with user defined function.
+ *
+ * @param userfunc
+ * User function which replaces rte_delay_us. rte_delay_us_block restores
+ * builtin block delay function.
+ */
+void rte_delay_us_callback_register(void(*userfunc)(unsigned int));
+
+#endif /* _RTE_CYCLES_H_ */
diff --git a/src/spdk/dpdk/lib/librte_eal/include/generic/rte_io.h b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_io.h
new file mode 100644
index 000000000..da457f7f7
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_io.h
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef _RTE_IO_H_
+#define _RTE_IO_H_
+
+/**
+ * @file
+ * I/O device memory operations
+ *
+ * This file defines the generic API for I/O device memory read/write operations
+ */
+
+#include <stdint.h>
+#include <rte_common.h>
+#include <rte_atomic.h>
+
+#ifdef __DOXYGEN__
+
+/**
+ * Read a 8-bit value from I/O device memory address *addr*.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param addr
+ * I/O memory address to read the value from
+ * @return
+ * read value
+ */
+static inline uint8_t
+rte_read8_relaxed(const volatile void *addr);
+
+/**
+ * Read a 16-bit value from I/O device memory address *addr*.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param addr
+ * I/O memory address to read the value from
+ * @return
+ * read value
+ */
+static inline uint16_t
+rte_read16_relaxed(const volatile void *addr);
+
+/**
+ * Read a 32-bit value from I/O device memory address *addr*.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param addr
+ * I/O memory address to read the value from
+ * @return
+ * read value
+ */
+static inline uint32_t
+rte_read32_relaxed(const volatile void *addr);
+
+/**
+ * Read a 64-bit value from I/O device memory address *addr*.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param addr
+ * I/O memory address to read the value from
+ * @return
+ * read value
+ */
+static inline uint64_t
+rte_read64_relaxed(const volatile void *addr);
+
+/**
+ * Write a 8-bit value to I/O device memory address *addr*.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+
+static inline void
+rte_write8_relaxed(uint8_t value, volatile void *addr);
+
+/**
+ * Write a 16-bit value to I/O device memory address *addr*.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+static inline void
+rte_write16_relaxed(uint16_t value, volatile void *addr);
+
+/**
+ * Write a 32-bit value to I/O device memory address *addr*.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+static inline void
+rte_write32_relaxed(uint32_t value, volatile void *addr);
+
+/**
+ * Write a 64-bit value to I/O device memory address *addr*.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+static inline void
+rte_write64_relaxed(uint64_t value, volatile void *addr);
+
+/**
+ * Read a 8-bit value from I/O device memory address *addr*.
+ *
+ * @param addr
+ * I/O memory address to read the value from
+ * @return
+ * read value
+ */
+static inline uint8_t
+rte_read8(const volatile void *addr);
+
+/**
+ * Read a 16-bit value from I/O device memory address *addr*.
+ *
+ *
+ * @param addr
+ * I/O memory address to read the value from
+ * @return
+ * read value
+ */
+static inline uint16_t
+rte_read16(const volatile void *addr);
+
+/**
+ * Read a 32-bit value from I/O device memory address *addr*.
+ *
+ * @param addr
+ * I/O memory address to read the value from
+ * @return
+ * read value
+ */
+static inline uint32_t
+rte_read32(const volatile void *addr);
+
+/**
+ * Read a 64-bit value from I/O device memory address *addr*.
+ *
+ * @param addr
+ * I/O memory address to read the value from
+ * @return
+ * read value
+ */
+static inline uint64_t
+rte_read64(const volatile void *addr);
+
+/**
+ * Write a 8-bit value to I/O device memory address *addr*.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+
+static inline void
+rte_write8(uint8_t value, volatile void *addr);
+
+/**
+ * Write a 16-bit value to I/O device memory address *addr*.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+static inline void
+rte_write16(uint16_t value, volatile void *addr);
+
+/**
+ * Write a 32-bit value to I/O device memory address *addr*.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+static inline void
+rte_write32(uint32_t value, volatile void *addr);
+
+/**
+ * Write a 64-bit value to I/O device memory address *addr*.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+static inline void
+rte_write64(uint64_t value, volatile void *addr);
+
+#endif /* __DOXYGEN__ */
+
+#ifndef RTE_OVERRIDE_IO_H
+
+static __rte_always_inline uint8_t
+rte_read8_relaxed(const volatile void *addr)
+{
+ return *(const volatile uint8_t *)addr;
+}
+
+static __rte_always_inline uint16_t
+rte_read16_relaxed(const volatile void *addr)
+{
+ return *(const volatile uint16_t *)addr;
+}
+
+static __rte_always_inline uint32_t
+rte_read32_relaxed(const volatile void *addr)
+{
+ return *(const volatile uint32_t *)addr;
+}
+
+static __rte_always_inline uint64_t
+rte_read64_relaxed(const volatile void *addr)
+{
+ return *(const volatile uint64_t *)addr;
+}
+
+static __rte_always_inline void
+rte_write8_relaxed(uint8_t value, volatile void *addr)
+{
+ *(volatile uint8_t *)addr = value;
+}
+
+static __rte_always_inline void
+rte_write16_relaxed(uint16_t value, volatile void *addr)
+{
+ *(volatile uint16_t *)addr = value;
+}
+
+static __rte_always_inline void
+rte_write32_relaxed(uint32_t value, volatile void *addr)
+{
+ *(volatile uint32_t *)addr = value;
+}
+
+static __rte_always_inline void
+rte_write64_relaxed(uint64_t value, volatile void *addr)
+{
+ *(volatile uint64_t *)addr = value;
+}
+
+static __rte_always_inline uint8_t
+rte_read8(const volatile void *addr)
+{
+ uint8_t val;
+ val = rte_read8_relaxed(addr);
+ rte_io_rmb();
+ return val;
+}
+
+static __rte_always_inline uint16_t
+rte_read16(const volatile void *addr)
+{
+ uint16_t val;
+ val = rte_read16_relaxed(addr);
+ rte_io_rmb();
+ return val;
+}
+
+static __rte_always_inline uint32_t
+rte_read32(const volatile void *addr)
+{
+ uint32_t val;
+ val = rte_read32_relaxed(addr);
+ rte_io_rmb();
+ return val;
+}
+
+static __rte_always_inline uint64_t
+rte_read64(const volatile void *addr)
+{
+ uint64_t val;
+ val = rte_read64_relaxed(addr);
+ rte_io_rmb();
+ return val;
+}
+
+static __rte_always_inline void
+rte_write8(uint8_t value, volatile void *addr)
+{
+ rte_io_wmb();
+ rte_write8_relaxed(value, addr);
+}
+
+static __rte_always_inline void
+rte_write16(uint16_t value, volatile void *addr)
+{
+ rte_io_wmb();
+ rte_write16_relaxed(value, addr);
+}
+
+static __rte_always_inline void
+rte_write32(uint32_t value, volatile void *addr)
+{
+ rte_io_wmb();
+ rte_write32_relaxed(value, addr);
+}
+
+static __rte_always_inline void
+rte_write64(uint64_t value, volatile void *addr)
+{
+ rte_io_wmb();
+ rte_write64_relaxed(value, addr);
+}
+
+#endif /* RTE_OVERRIDE_IO_H */
+
+#endif /* _RTE_IO_H_ */
diff --git a/src/spdk/dpdk/lib/librte_eal/include/generic/rte_mcslock.h b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_mcslock.h
new file mode 100644
index 000000000..2bef28351
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_mcslock.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Arm Limited
+ */
+
+#ifndef _RTE_MCSLOCK_H_
+#define _RTE_MCSLOCK_H_
+
+/**
+ * @file
+ *
+ * RTE MCS lock
+ *
+ * This file defines the main data structure and APIs for MCS queued lock.
+ *
+ * The MCS lock (proposed by John M. Mellor-Crummey and Michael L. Scott)
+ * provides scalability by spinning on a CPU/thread local variable which
+ * avoids expensive cache bouncings. It provides fairness by maintaining
+ * a list of acquirers and passing the lock to each CPU/thread in the order
+ * they acquired the lock.
+ */
+
+#include <rte_lcore.h>
+#include <rte_common.h>
+#include <rte_pause.h>
+
+/**
+ * The rte_mcslock_t type.
+ */
+typedef struct rte_mcslock {
+ struct rte_mcslock *next;
+ int locked; /* 1 if the queue locked, 0 otherwise */
+} rte_mcslock_t;
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: This API may change without prior notice
+ *
+ * Take the MCS lock.
+ *
+ * @param msl
+ * A pointer to the pointer of a MCS lock.
+ * When the lock is initialized or declared, the msl pointer should be
+ * set to NULL.
+ * @param me
+ * A pointer to a new node of MCS lock. Each CPU/thread acquiring the
+ * lock should use its 'own node'.
+ */
+__rte_experimental
+static inline void
+rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me)
+{
+ rte_mcslock_t *prev;
+
+ /* Init me node */
+ __atomic_store_n(&me->locked, 1, __ATOMIC_RELAXED);
+ __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);
+
+ /* If the queue is empty, the exchange operation is enough to acquire
+ * the lock. Hence, the exchange operation requires acquire semantics.
+ * The store to me->next above should complete before the node is
+ * visible to other CPUs/threads. Hence, the exchange operation requires
+ * release semantics as well.
+ */
+ prev = __atomic_exchange_n(msl, me, __ATOMIC_ACQ_REL);
+ if (likely(prev == NULL)) {
+ /* Queue was empty, no further action required,
+ * proceed with lock taken.
+ */
+ return;
+ }
+ __atomic_store_n(&prev->next, me, __ATOMIC_RELAXED);
+
+ /* The while-load of me->locked should not move above the previous
+ * store to prev->next. Otherwise it will cause a deadlock. Need a
+ * store-load barrier.
+ */
+ __atomic_thread_fence(__ATOMIC_ACQ_REL);
+ /* If the lock has already been acquired, it first atomically
+ * places the node at the end of the queue and then proceeds
+ * to spin on me->locked until the previous lock holder resets
+ * the me->locked using mcslock_unlock().
+ */
+ while (__atomic_load_n(&me->locked, __ATOMIC_ACQUIRE))
+ rte_pause();
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: This API may change without prior notice
+ *
+ * Release the MCS lock.
+ *
+ * @param msl
+ * A pointer to the pointer of a MCS lock.
+ * @param me
+ * A pointer to the node of MCS lock passed in rte_mcslock_lock.
+ */
+__rte_experimental
+static inline void
+rte_mcslock_unlock(rte_mcslock_t **msl, rte_mcslock_t *me)
+{
+ /* Check if there are more nodes in the queue. */
+ if (likely(__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)) {
+ /* No, last member in the queue. */
+ rte_mcslock_t *save_me = __atomic_load_n(&me, __ATOMIC_RELAXED);
+
+ /* Release the lock by setting it to NULL */
+ if (likely(__atomic_compare_exchange_n(msl, &save_me, NULL, 0,
+ __ATOMIC_RELEASE, __ATOMIC_RELAXED)))
+ return;
+
+ /* Speculative execution would be allowed to read in the
+ * while-loop first. This has the potential to cause a
+ * deadlock. Need a load barrier.
+ */
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+ /* More nodes added to the queue by other CPUs.
+ * Wait until the next pointer is set.
+ */
+ while (__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)
+ rte_pause();
+ }
+
+ /* Pass lock to next waiter. */
+ __atomic_store_n(&me->next->locked, 0, __ATOMIC_RELEASE);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: This API may change without prior notice
+ *
+ * Try to take the lock.
+ *
+ * @param msl
+ * A pointer to the pointer of a MCS lock.
+ * @param me
+ * A pointer to a new node of MCS lock.
+ * @return
+ * 1 if the lock is successfully taken; 0 otherwise.
+ */
+__rte_experimental
+static inline int
+rte_mcslock_trylock(rte_mcslock_t **msl, rte_mcslock_t *me)
+{
+ /* Init me node */
+ __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);
+
+ /* Try to lock */
+ rte_mcslock_t *expected = NULL;
+
+ /* The lock can be taken only when the queue is empty. Hence,
+ * the compare-exchange operation requires acquire semantics.
+ * The store to me->next above should complete before the node
+ * is visible to other CPUs/threads. Hence, the compare-exchange
+ * operation requires release semantics as well.
+ */
+ return __atomic_compare_exchange_n(msl, &expected, me, 0,
+ __ATOMIC_ACQ_REL, __ATOMIC_RELAXED);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: This API may change without prior notice
+ *
+ * Test if the lock is taken.
+ *
+ * @param msl
+ * A pointer to a MCS lock node.
+ * @return
+ * 1 if the lock is currently taken; 0 otherwise.
+ */
+__rte_experimental
+static inline int
+rte_mcslock_is_locked(rte_mcslock_t *msl)
+{
+ return (__atomic_load_n(&msl, __ATOMIC_RELAXED) != NULL);
+}
+
+#endif /* _RTE_MCSLOCK_H_ */
diff --git a/src/spdk/dpdk/lib/librte_eal/include/generic/rte_memcpy.h b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_memcpy.h
new file mode 100644
index 000000000..701e550c3
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_memcpy.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _RTE_MEMCPY_H_
+#define _RTE_MEMCPY_H_
+
+/**
+ * @file
+ *
+ * Functions for vectorised implementation of memcpy().
+ */
+
+/**
+ * Copy 16 bytes from one location to another using optimised
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ * Pointer to the destination of the data.
+ * @param src
+ * Pointer to the source data.
+ */
+static inline void
+rte_mov16(uint8_t *dst, const uint8_t *src);
+
+/**
+ * Copy 32 bytes from one location to another using optimised
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ * Pointer to the destination of the data.
+ * @param src
+ * Pointer to the source data.
+ */
+static inline void
+rte_mov32(uint8_t *dst, const uint8_t *src);
+
+#ifdef __DOXYGEN__
+
+/**
+ * Copy 48 bytes from one location to another using optimised
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ * Pointer to the destination of the data.
+ * @param src
+ * Pointer to the source data.
+ */
+static inline void
+rte_mov48(uint8_t *dst, const uint8_t *src);
+
+#endif /* __DOXYGEN__ */
+
+/**
+ * Copy 64 bytes from one location to another using optimised
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ * Pointer to the destination of the data.
+ * @param src
+ * Pointer to the source data.
+ */
+static inline void
+rte_mov64(uint8_t *dst, const uint8_t *src);
+
+/**
+ * Copy 128 bytes from one location to another using optimised
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ * Pointer to the destination of the data.
+ * @param src
+ * Pointer to the source data.
+ */
+static inline void
+rte_mov128(uint8_t *dst, const uint8_t *src);
+
+/**
+ * Copy 256 bytes from one location to another using optimised
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ * Pointer to the destination of the data.
+ * @param src
+ * Pointer to the source data.
+ */
+static inline void
+rte_mov256(uint8_t *dst, const uint8_t *src);
+
+#ifdef __DOXYGEN__
+
+/**
+ * Copy bytes from one location to another. The locations must not overlap.
+ *
+ * @note This is implemented as a macro, so it's address should not be taken
+ * and care is needed as parameter expressions may be evaluated multiple times.
+ *
+ * @param dst
+ * Pointer to the destination of the data.
+ * @param src
+ * Pointer to the source data.
+ * @param n
+ * Number of bytes to copy.
+ * @return
+ * Pointer to the destination data.
+ */
+static void *
+rte_memcpy(void *dst, const void *src, size_t n);
+
+#endif /* __DOXYGEN__ */
+
+#endif /* _RTE_MEMCPY_H_ */
diff --git a/src/spdk/dpdk/lib/librte_eal/include/generic/rte_pause.h b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_pause.h
new file mode 100644
index 000000000..7422785f1
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_pause.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ * Copyright(c) 2019 Arm Limited
+ */
+
+#ifndef _RTE_PAUSE_H_
+#define _RTE_PAUSE_H_
+
+/**
+ * @file
+ *
+ * CPU pause operation.
+ *
+ */
+
+#include <stdint.h>
+#include <assert.h>
+#include <rte_common.h>
+#include <rte_atomic.h>
+#include <rte_compat.h>
+
+/**
+ * Pause CPU execution for a short while
+ *
+ * This call is intended for tight loops which poll a shared resource or wait
+ * for an event. A short pause within the loop may reduce the power consumption.
+ */
+static inline void rte_pause(void);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Wait for *addr to be updated with a 16-bit expected value, with a relaxed
+ * memory ordering model meaning the loads around this API can be reordered.
+ *
+ * @param addr
+ * A pointer to the memory location.
+ * @param expected
+ * A 16-bit expected value to be in the memory location.
+ * @param memorder
+ * Two different memory orders that can be specified:
+ * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ * C++11 memory orders with the same names, see the C++11 standard or
+ * the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
+ int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Wait for *addr to be updated with a 32-bit expected value, with a relaxed
+ * memory ordering model meaning the loads around this API can be reordered.
+ *
+ * @param addr
+ * A pointer to the memory location.
+ * @param expected
+ * A 32-bit expected value to be in the memory location.
+ * @param memorder
+ * Two different memory orders that can be specified:
+ * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ * C++11 memory orders with the same names, see the C++11 standard or
+ * the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
+ int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Wait for *addr to be updated with a 64-bit expected value, with a relaxed
+ * memory ordering model meaning the loads around this API can be reordered.
+ *
+ * @param addr
+ * A pointer to the memory location.
+ * @param expected
+ * A 64-bit expected value to be in the memory location.
+ * @param memorder
+ * Two different memory orders that can be specified:
+ * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ * C++11 memory orders with the same names, see the C++11 standard or
+ * the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
+ int memorder);
+
+#ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
+static __rte_always_inline void
+rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
+ int memorder)
+{
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ while (__atomic_load_n(addr, memorder) != expected)
+ rte_pause();
+}
+
+static __rte_always_inline void
+rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
+ int memorder)
+{
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ while (__atomic_load_n(addr, memorder) != expected)
+ rte_pause();
+}
+
+static __rte_always_inline void
+rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
+ int memorder)
+{
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ while (__atomic_load_n(addr, memorder) != expected)
+ rte_pause();
+}
+#endif
+
+#endif /* _RTE_PAUSE_H_ */
diff --git a/src/spdk/dpdk/lib/librte_eal/include/generic/rte_prefetch.h b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_prefetch.h
new file mode 100644
index 000000000..6e47bdfba
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_prefetch.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#ifndef _RTE_PREFETCH_H_
+#define _RTE_PREFETCH_H_
+
+/**
+ * @file
+ *
+ * Prefetch operations.
+ *
+ * This file defines an API for prefetch macros / inline-functions,
+ * which are architecture-dependent. Prefetching occurs when a
+ * processor requests an instruction or data from memory to cache
+ * before it is actually needed, potentially speeding up the execution of the
+ * program.
+ */
+
+/**
+ * Prefetch a cache line into all cache levels.
+ * @param p
+ * Address to prefetch
+ */
+static inline void rte_prefetch0(const volatile void *p);
+
+/**
+ * Prefetch a cache line into all cache levels except the 0th cache level.
+ * @param p
+ * Address to prefetch
+ */
+static inline void rte_prefetch1(const volatile void *p);
+
+/**
+ * Prefetch a cache line into all cache levels except the 0th and 1th cache
+ * levels.
+ * @param p
+ * Address to prefetch
+ */
+static inline void rte_prefetch2(const volatile void *p);
+
+/**
+ * Prefetch a cache line into all cache levels (non-temporal/transient version)
+ *
+ * The non-temporal prefetch is intended as a prefetch hint that processor will
+ * use the prefetched data only once or short period, unlike the
+ * rte_prefetch0() function which imply that prefetched data to use repeatedly.
+ *
+ * @param p
+ * Address to prefetch
+ */
+static inline void rte_prefetch_non_temporal(const volatile void *p);
+
+#endif /* _RTE_PREFETCH_H_ */
diff --git a/src/spdk/dpdk/lib/librte_eal/include/generic/rte_rwlock.h b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_rwlock.h
new file mode 100644
index 000000000..da9bc3e9c
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_rwlock.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _RTE_RWLOCK_H_
+#define _RTE_RWLOCK_H_
+
+/**
+ * @file
+ *
+ * RTE Read-Write Locks
+ *
+ * This file defines an API for read-write locks. The lock is used to
+ * protect data that allows multiple readers in parallel, but only
+ * one writer. All readers are blocked until the writer is finished
+ * writing.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include <rte_atomic.h>
+#include <rte_pause.h>
+
+/**
+ * The rte_rwlock_t type.
+ *
+ * cnt is -1 when write lock is held, and > 0 when read locks are held.
+ */
+typedef struct {
+ volatile int32_t cnt; /**< -1 when W lock held, > 0 when R locks held. */
+} rte_rwlock_t;
+
+/**
+ * A static rwlock initializer.
+ */
+#define RTE_RWLOCK_INITIALIZER { 0 }
+
+/**
+ * Initialize the rwlock to an unlocked state.
+ *
+ * @param rwl
+ * A pointer to the rwlock structure.
+ */
+static inline void
+rte_rwlock_init(rte_rwlock_t *rwl)
+{
+ rwl->cnt = 0;
+}
+
+/**
+ * Take a read lock. Loop until the lock is held.
+ *
+ * @param rwl
+ * A pointer to a rwlock structure.
+ */
+static inline void
+rte_rwlock_read_lock(rte_rwlock_t *rwl)
+{
+ int32_t x;
+ int success = 0;
+
+ while (success == 0) {
+ x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
+ /* write lock is held */
+ if (x < 0) {
+ rte_pause();
+ continue;
+ }
+ success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1,
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
+ }
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * try to take a read lock.
+ *
+ * @param rwl
+ * A pointer to a rwlock structure.
+ * @return
+ * - zero if the lock is successfully taken
+ * - -EBUSY if lock could not be acquired for reading because a
+ * writer holds the lock
+ */
+__rte_experimental
+static inline int
+rte_rwlock_read_trylock(rte_rwlock_t *rwl)
+{
+ int32_t x;
+ int success = 0;
+
+ while (success == 0) {
+ x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
+ /* write lock is held */
+ if (x < 0)
+ return -EBUSY;
+ success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1,
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
+ }
+
+ return 0;
+}
+
+/**
+ * Release a read lock.
+ *
+ * @param rwl
+ * A pointer to the rwlock structure.
+ */
+static inline void
+rte_rwlock_read_unlock(rte_rwlock_t *rwl)
+{
+ __atomic_fetch_sub(&rwl->cnt, 1, __ATOMIC_RELEASE);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * try to take a write lock.
+ *
+ * @param rwl
+ * A pointer to a rwlock structure.
+ * @return
+ * - zero if the lock is successfully taken
+ * - -EBUSY if lock could not be acquired for writing because
+ * it was already locked for reading or writing
+ */
+__rte_experimental
+static inline int
+rte_rwlock_write_trylock(rte_rwlock_t *rwl)
+{
+ int32_t x;
+
+ x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
+ if (x != 0 || __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1,
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) == 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * Take a write lock. Loop until the lock is held.
+ *
+ * @param rwl
+ * A pointer to a rwlock structure.
+ */
+static inline void
+rte_rwlock_write_lock(rte_rwlock_t *rwl)
+{
+ int32_t x;
+ int success = 0;
+
+ while (success == 0) {
+ x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
+ /* a lock is held */
+ if (x != 0) {
+ rte_pause();
+ continue;
+ }
+ success = __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1,
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
+ }
+}
+
+/**
+ * Release a write lock.
+ *
+ * @param rwl
+ * A pointer to a rwlock structure.
+ */
+static inline void
+rte_rwlock_write_unlock(rte_rwlock_t *rwl)
+{
+ __atomic_store_n(&rwl->cnt, 0, __ATOMIC_RELEASE);
+}
+
+/**
+ * Try to execute critical section in a hardware memory transaction, if it
+ * fails or not available take a read lock
+ *
+ * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
+ * transaction always aborts the transaction since the CPU is not able to
+ * roll-back should the transaction fail. Therefore, hardware transactional
+ * locks are not advised to be used around rte_eth_rx_burst() and
+ * rte_eth_tx_burst() calls.
+ *
+ * @param rwl
+ * A pointer to a rwlock structure.
+ */
+static inline void
+rte_rwlock_read_lock_tm(rte_rwlock_t *rwl);
+
+/**
+ * Commit hardware memory transaction or release the read lock if the lock is used as a fall-back
+ *
+ * @param rwl
+ * A pointer to the rwlock structure.
+ */
+static inline void
+rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl);
+
+/**
+ * Try to execute critical section in a hardware memory transaction, if it
+ * fails or not available take a write lock
+ *
+ * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
+ * transaction always aborts the transaction since the CPU is not able to
+ * roll-back should the transaction fail. Therefore, hardware transactional
+ * locks are not advised to be used around rte_eth_rx_burst() and
+ * rte_eth_tx_burst() calls.
+ *
+ * @param rwl
+ * A pointer to a rwlock structure.
+ */
+static inline void
+rte_rwlock_write_lock_tm(rte_rwlock_t *rwl);
+
+/**
+ * Commit hardware memory transaction or release the write lock if the lock is used as a fall-back
+ *
+ * @param rwl
+ * A pointer to a rwlock structure.
+ */
+static inline void
+rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_RWLOCK_H_ */
diff --git a/src/spdk/dpdk/lib/librte_eal/include/generic/rte_spinlock.h b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_spinlock.h
new file mode 100644
index 000000000..87ae7a4f1
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_spinlock.h
@@ -0,0 +1,305 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _RTE_SPINLOCK_H_
+#define _RTE_SPINLOCK_H_
+
+/**
+ * @file
+ *
+ * RTE Spinlocks
+ *
+ * This file defines an API for read-write locks, which are implemented
+ * in an architecture-specific way. This kind of lock simply waits in
+ * a loop repeatedly checking until the lock becomes available.
+ *
+ * All locks must be initialised before use, and only initialised once.
+ *
+ */
+
+#include <rte_lcore.h>
+#ifdef RTE_FORCE_INTRINSICS
+#include <rte_common.h>
+#endif
+#include <rte_pause.h>
+
+/**
+ * The rte_spinlock_t type.
+ */
+typedef struct {
+ volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
+} rte_spinlock_t;
+
+/**
+ * A static spinlock initializer.
+ */
+#define RTE_SPINLOCK_INITIALIZER { 0 }
+
+/**
+ * Initialize the spinlock to an unlocked state.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ */
+static inline void
+rte_spinlock_init(rte_spinlock_t *sl)
+{
+ sl->locked = 0;
+}
+
+/**
+ * Take the spinlock.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ */
+static inline void
+rte_spinlock_lock(rte_spinlock_t *sl);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_spinlock_lock(rte_spinlock_t *sl)
+{
+ int exp = 0;
+
+ while (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0,
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+ while (__atomic_load_n(&sl->locked, __ATOMIC_RELAXED))
+ rte_pause();
+ exp = 0;
+ }
+}
+#endif
+
+/**
+ * Release the spinlock.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ */
+static inline void
+rte_spinlock_unlock (rte_spinlock_t *sl);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_spinlock_unlock (rte_spinlock_t *sl)
+{
+ __atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE);
+}
+#endif
+
+/**
+ * Try to take the lock.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ * @return
+ * 1 if the lock is successfully taken; 0 otherwise.
+ */
+static inline int
+rte_spinlock_trylock (rte_spinlock_t *sl);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int
+rte_spinlock_trylock (rte_spinlock_t *sl)
+{
+ int exp = 0;
+ return __atomic_compare_exchange_n(&sl->locked, &exp, 1,
+ 0, /* disallow spurious failure */
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
+}
+#endif
+
+/**
+ * Test if the lock is taken.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ * @return
+ * 1 if the lock is currently taken; 0 otherwise.
+ */
+static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
+{
+ return __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE);
+}
+
+/**
+ * Test if hardware transactional memory (lock elision) is supported
+ *
+ * @return
+ * 1 if the hardware transactional memory is supported; 0 otherwise.
+ */
+static inline int rte_tm_supported(void);
+
+/**
+ * Try to execute critical section in a hardware memory transaction,
+ * if it fails or not available take the spinlock.
+ *
+ * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
+ * transaction always aborts the transaction since the CPU is not able to
+ * roll-back should the transaction fail. Therefore, hardware transactional
+ * locks are not advised to be used around rte_eth_rx_burst() and
+ * rte_eth_tx_burst() calls.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ */
+static inline void
+rte_spinlock_lock_tm(rte_spinlock_t *sl);
+
+/**
+ * Commit hardware memory transaction or release the spinlock if
+ * the spinlock is used as a fall-back
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ */
+static inline void
+rte_spinlock_unlock_tm(rte_spinlock_t *sl);
+
+/**
+ * Try to execute critical section in a hardware memory transaction,
+ * if it fails or not available try to take the lock.
+ *
+ * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
+ * transaction always aborts the transaction since the CPU is not able to
+ * roll-back should the transaction fail. Therefore, hardware transactional
+ * locks are not advised to be used around rte_eth_rx_burst() and
+ * rte_eth_tx_burst() calls.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ * @return
+ * 1 if the hardware memory transaction is successfully started
+ * or lock is successfully taken; 0 otherwise.
+ */
+static inline int
+rte_spinlock_trylock_tm(rte_spinlock_t *sl);
+
+/**
+ * The rte_spinlock_recursive_t type.
+ */
+typedef struct {
+ rte_spinlock_t sl; /**< the actual spinlock */
+ volatile int user; /**< core id using lock, -1 for unused */
+ volatile int count; /**< count of time this lock has been called */
+} rte_spinlock_recursive_t;
+
+/**
+ * A static recursive spinlock initializer.
+ */
+#define RTE_SPINLOCK_RECURSIVE_INITIALIZER {RTE_SPINLOCK_INITIALIZER, -1, 0}
+
+/**
+ * Initialize the recursive spinlock to an unlocked state.
+ *
+ * @param slr
+ * A pointer to the recursive spinlock.
+ */
+static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr)
+{
+ rte_spinlock_init(&slr->sl);
+ slr->user = -1;
+ slr->count = 0;
+}
+
+/**
+ * Take the recursive spinlock.
+ *
+ * @param slr
+ * A pointer to the recursive spinlock.
+ */
+static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
+{
+ int id = rte_gettid();
+
+ if (slr->user != id) {
+ rte_spinlock_lock(&slr->sl);
+ slr->user = id;
+ }
+ slr->count++;
+}
+/**
+ * Release the recursive spinlock.
+ *
+ * @param slr
+ * A pointer to the recursive spinlock.
+ */
+static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
+{
+ if (--(slr->count) == 0) {
+ slr->user = -1;
+ rte_spinlock_unlock(&slr->sl);
+ }
+
+}
+
+/**
+ * Try to take the recursive lock.
+ *
+ * @param slr
+ * A pointer to the recursive spinlock.
+ * @return
+ * 1 if the lock is successfully taken; 0 otherwise.
+ */
+static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
+{
+ int id = rte_gettid();
+
+ if (slr->user != id) {
+ if (rte_spinlock_trylock(&slr->sl) == 0)
+ return 0;
+ slr->user = id;
+ }
+ slr->count++;
+ return 1;
+}
+
+
+/**
+ * Try to execute critical section in a hardware memory transaction,
+ * if it fails or not available take the recursive spinlocks
+ *
+ * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
+ * transaction always aborts the transaction since the CPU is not able to
+ * roll-back should the transaction fail. Therefore, hardware transactional
+ * locks are not advised to be used around rte_eth_rx_burst() and
+ * rte_eth_tx_burst() calls.
+ *
+ * @param slr
+ * A pointer to the recursive spinlock.
+ */
+static inline void rte_spinlock_recursive_lock_tm(
+ rte_spinlock_recursive_t *slr);
+
+/**
+ * Commit hardware memory transaction or release the recursive spinlock
+ * if the recursive spinlock is used as a fall-back
+ *
+ * @param slr
+ * A pointer to the recursive spinlock.
+ */
+static inline void rte_spinlock_recursive_unlock_tm(
+ rte_spinlock_recursive_t *slr);
+
+/**
+ * Try to execute critical section in a hardware memory transaction,
+ * if it fails or not available try to take the recursive lock
+ *
+ * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
+ * transaction always aborts the transaction since the CPU is not able to
+ * roll-back should the transaction fail. Therefore, hardware transactional
+ * locks are not advised to be used around rte_eth_rx_burst() and
+ * rte_eth_tx_burst() calls.
+ *
+ * @param slr
+ * A pointer to the recursive spinlock.
+ * @return
+ * 1 if the hardware memory transaction is successfully started
+ * or lock is successfully taken; 0 otherwise.
+ */
+static inline int rte_spinlock_recursive_trylock_tm(
+ rte_spinlock_recursive_t *slr);
+
+#endif /* _RTE_SPINLOCK_H_ */
diff --git a/src/spdk/dpdk/lib/librte_eal/include/generic/rte_ticketlock.h b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_ticketlock.h
new file mode 100644
index 000000000..c295ae7f7
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_ticketlock.h
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Arm Limited
+ */
+
+#ifndef _RTE_TICKETLOCK_H_
+#define _RTE_TICKETLOCK_H_
+
+/**
+ * @file
+ *
+ * RTE ticket locks
+ *
+ * This file defines an API for ticket locks, which give each waiting
+ * thread a ticket and take the lock one by one, first come, first
+ * serviced.
+ *
+ * All locks must be initialised before use, and only initialised once.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include <rte_lcore.h>
+#include <rte_pause.h>
+
+/**
+ * The rte_ticketlock_t type.
+ */
+typedef union {
+ uint32_t tickets;
+ struct {
+ uint16_t current;
+ uint16_t next;
+ } s;
+} rte_ticketlock_t;
+
+/**
+ * A static ticketlock initializer.
+ */
+#define RTE_TICKETLOCK_INITIALIZER { 0 }
+
+/**
+ * Initialize the ticketlock to an unlocked state.
+ *
+ * @param tl
+ * A pointer to the ticketlock.
+ */
+__rte_experimental
+static inline void
+rte_ticketlock_init(rte_ticketlock_t *tl)
+{
+ __atomic_store_n(&tl->tickets, 0, __ATOMIC_RELAXED);
+}
+
+/**
+ * Take the ticketlock.
+ *
+ * @param tl
+ * A pointer to the ticketlock.
+ */
+__rte_experimental
+static inline void
+rte_ticketlock_lock(rte_ticketlock_t *tl)
+{
+ uint16_t me = __atomic_fetch_add(&tl->s.next, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16(&tl->s.current, me, __ATOMIC_ACQUIRE);
+}
+
+/**
+ * Release the ticketlock.
+ *
+ * @param tl
+ * A pointer to the ticketlock.
+ */
+__rte_experimental
+static inline void
+rte_ticketlock_unlock(rte_ticketlock_t *tl)
+{
+ uint16_t i = __atomic_load_n(&tl->s.current, __ATOMIC_RELAXED);
+ __atomic_store_n(&tl->s.current, i + 1, __ATOMIC_RELEASE);
+}
+
+/**
+ * Try to take the lock.
+ *
+ * @param tl
+ * A pointer to the ticketlock.
+ * @return
+ * 1 if the lock is successfully taken; 0 otherwise.
+ */
+__rte_experimental
+static inline int
+rte_ticketlock_trylock(rte_ticketlock_t *tl)
+{
+ rte_ticketlock_t old, new;
+ old.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_RELAXED);
+ new.tickets = old.tickets;
+ new.s.next++;
+ if (old.s.next == old.s.current) {
+ if (__atomic_compare_exchange_n(&tl->tickets, &old.tickets,
+ new.tickets, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Test if the lock is taken.
+ *
+ * @param tl
+ * A pointer to the ticketlock.
+ * @return
+ * 1 if the lock is currently taken; 0 otherwise.
+ */
+__rte_experimental
+static inline int
+rte_ticketlock_is_locked(rte_ticketlock_t *tl)
+{
+ rte_ticketlock_t tic;
+ tic.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_ACQUIRE);
+ return (tic.s.current != tic.s.next);
+}
+
+/**
+ * The rte_ticketlock_recursive_t type.
+ */
+#define TICKET_LOCK_INVALID_ID -1
+
+typedef struct {
+ rte_ticketlock_t tl; /**< the actual ticketlock */
+ int user; /**< core id using lock, TICKET_LOCK_INVALID_ID for unused */
+ unsigned int count; /**< count of time this lock has been called */
+} rte_ticketlock_recursive_t;
+
+/**
+ * A static recursive ticketlock initializer.
+ */
+#define RTE_TICKETLOCK_RECURSIVE_INITIALIZER {RTE_TICKETLOCK_INITIALIZER, \
+ TICKET_LOCK_INVALID_ID, 0}
+
+/**
+ * Initialize the recursive ticketlock to an unlocked state.
+ *
+ * @param tlr
+ * A pointer to the recursive ticketlock.
+ */
+__rte_experimental
+static inline void
+rte_ticketlock_recursive_init(rte_ticketlock_recursive_t *tlr)
+{
+ rte_ticketlock_init(&tlr->tl);
+ __atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID, __ATOMIC_RELAXED);
+ tlr->count = 0;
+}
+
+/**
+ * Take the recursive ticketlock.
+ *
+ * @param tlr
+ * A pointer to the recursive ticketlock.
+ */
+__rte_experimental
+static inline void
+rte_ticketlock_recursive_lock(rte_ticketlock_recursive_t *tlr)
+{
+ int id = rte_gettid();
+
+ if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) {
+ rte_ticketlock_lock(&tlr->tl);
+ __atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED);
+ }
+ tlr->count++;
+}
+
+/**
+ * Release the recursive ticketlock.
+ *
+ * @param tlr
+ * A pointer to the recursive ticketlock.
+ */
+__rte_experimental
+static inline void
+rte_ticketlock_recursive_unlock(rte_ticketlock_recursive_t *tlr)
+{
+ if (--(tlr->count) == 0) {
+ __atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID,
+ __ATOMIC_RELAXED);
+ rte_ticketlock_unlock(&tlr->tl);
+ }
+}
+
+/**
+ * Try to take the recursive lock.
+ *
+ * @param tlr
+ * A pointer to the recursive ticketlock.
+ * @return
+ * 1 if the lock is successfully taken; 0 otherwise.
+ */
+__rte_experimental
+static inline int
+rte_ticketlock_recursive_trylock(rte_ticketlock_recursive_t *tlr)
+{
+ int id = rte_gettid();
+
+ if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) {
+ if (rte_ticketlock_trylock(&tlr->tl) == 0)
+ return 0;
+ __atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED);
+ }
+ tlr->count++;
+ return 1;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_TICKETLOCK_H_ */
diff --git a/src/spdk/dpdk/lib/librte_eal/include/generic/rte_vect.h b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_vect.h
new file mode 100644
index 000000000..3fc47979f
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eal/include/generic/rte_vect.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 6WIND S.A.
+ */
+
+#ifndef _RTE_VECT_H_
+#define _RTE_VECT_H_
+
+/**
+ * @file
+ * SIMD vector types
+ *
+ * This file defines types to use vector instructions with generic C code.
+ */
+
+#include <stdint.h>
+
+/* Unsigned vector types */
+
+/**
+ * 64 bits vector size to use with unsigned 8 bits elements.
+ *
+ * a = (rte_v64u8_t){ a0, a1, a2, a3, a4, a5, a6, a7 }
+ */
+typedef uint8_t rte_v64u8_t __attribute__((vector_size(8), aligned(8)));
+
+/**
+ * 64 bits vector size to use with unsigned 16 bits elements.
+ *
+ * a = (rte_v64u16_t){ a0, a1, a2, a3 }
+ */
+typedef uint16_t rte_v64u16_t __attribute__((vector_size(8), aligned(8)));
+
+/**
+ * 64 bits vector size to use with unsigned 32 bits elements.
+ *
+ * a = (rte_v64u32_t){ a0, a1 }
+ */
+typedef uint32_t rte_v64u32_t __attribute__((vector_size(8), aligned(8)));
+
+/**
+ * 128 bits vector size to use with unsigned 8 bits elements.
+ *
+ * a = (rte_v128u8_t){ a00, a01, a02, a03, a04, a05, a06, a07,
+ * a08, a09, a10, a11, a12, a13, a14, a15 }
+ */
+typedef uint8_t rte_v128u8_t __attribute__((vector_size(16), aligned(16)));
+
+/**
+ * 128 bits vector size to use with unsigned 16 bits elements.
+ *
+ * a = (rte_v128u16_t){ a0, a1, a2, a3, a4, a5, a6, a7 }
+ */
+typedef uint16_t rte_v128u16_t __attribute__((vector_size(16), aligned(16)));
+
+/**
+ * 128 bits vector size to use with unsigned 32 bits elements.
+ *
+ * a = (rte_v128u32_t){ a0, a1, a2, a3 }
+ */
+typedef uint32_t rte_v128u32_t __attribute__((vector_size(16), aligned(16)));
+
+/**
+ * 128 bits vector size to use with unsigned 64 bits elements.
+ *
+ * a = (rte_v128u64_t){ a0, a1 }
+ */
+typedef uint64_t rte_v128u64_t __attribute__((vector_size(16), aligned(16)));
+
+/**
+ * 256 bits vector size to use with unsigned 8 bits elements.
+ *
+ * a = (rte_v256u8_t){ a00, a01, a02, a03, a04, a05, a06, a07,
+ * a08, a09, a10, a11, a12, a13, a14, a15,
+ * a16, a17, a18, a19, a20, a21, a22, a23,
+ * a24, a25, a26, a27, a28, a29, a30, a31 }
+ */
+typedef uint8_t rte_v256u8_t __attribute__((vector_size(32), aligned(32)));
+
+/**
+ * 256 bits vector size to use with unsigned 16 bits elements.
+ *
+ * a = (rte_v256u16_t){ a00, a01, a02, a03, a04, a05, a06, a07,
+ * a08, a09, a10, a11, a12, a13, a14, a15 }
+ */
+typedef uint16_t rte_v256u16_t __attribute__((vector_size(32), aligned(32)));
+
+/**
+ * 256 bits vector size to use with unsigned 32 bits elements.
+ *
+ * a = (rte_v256u32_t){ a0, a1, a2, a3, a4, a5, a6, a7 }
+ */
+typedef uint32_t rte_v256u32_t __attribute__((vector_size(32), aligned(32)));
+
+/**
+ * 256 bits vector size to use with unsigned 64 bits elements.
+ *
+ * a = (rte_v256u64_t){ a0, a1, a2, a3 }
+ */
+typedef uint64_t rte_v256u64_t __attribute__((vector_size(32), aligned(32)));
+
+
+/* Signed vector types */
+
+/**
+ * 64 bits vector size to use with 8 bits elements.
+ *
+ * a = (rte_v64s8_t){ a0, a1, a2, a3, a4, a5, a6, a7 }
+ */
+typedef int8_t rte_v64s8_t __attribute__((vector_size(8), aligned(8)));
+
+/**
+ * 64 bits vector size to use with 16 bits elements.
+ *
+ * a = (rte_v64s16_t){ a0, a1, a2, a3 }
+ */
+typedef int16_t rte_v64s16_t __attribute__((vector_size(8), aligned(8)));
+
+/**
+ * 64 bits vector size to use with 32 bits elements.
+ *
+ * a = (rte_v64s32_t){ a0, a1 }
+ */
+typedef int32_t rte_v64s32_t __attribute__((vector_size(8), aligned(8)));
+
+/**
+ * 128 bits vector size to use with 8 bits elements.
+ *
+ * a = (rte_v128s8_t){ a00, a01, a02, a03, a04, a05, a06, a07,
+ * a08, a09, a10, a11, a12, a13, a14, a15 }
+ */
+typedef int8_t rte_v128s8_t __attribute__((vector_size(16), aligned(16)));
+
+/**
+ * 128 bits vector size to use with 16 bits elements.
+ *
+ * a = (rte_v128s16_t){ a0, a1, a2, a3, a4, a5, a6, a7 }
+ */
+typedef int16_t rte_v128s16_t __attribute__((vector_size(16), aligned(16)));
+
+/**
+ * 128 bits vector size to use with 32 bits elements.
+ *
+ * a = (rte_v128s32_t){ a0, a1, a2, a3 }
+ */
+typedef int32_t rte_v128s32_t __attribute__((vector_size(16), aligned(16)));
+
+/**
+ * 128 bits vector size to use with 64 bits elements.
+ *
+ * a = (rte_v128s64_t){ a1, a2 }
+ */
+typedef int64_t rte_v128s64_t __attribute__((vector_size(16), aligned(16)));
+
+/**
+ * 256 bits vector size to use with 8 bits elements.
+ *
+ * a = (rte_v256s8_t){ a00, a01, a02, a03, a04, a05, a06, a07,
+ * a08, a09, a10, a11, a12, a13, a14, a15,
+ * a16, a17, a18, a19, a20, a21, a22, a23,
+ * a24, a25, a26, a27, a28, a29, a30, a31 }
+ */
+typedef int8_t rte_v256s8_t __attribute__((vector_size(32), aligned(32)));
+
+/**
+ * 256 bits vector size to use with 16 bits elements.
+ *
+ * a = (rte_v256s16_t){ a00, a01, a02, a03, a04, a05, a06, a07,
+ * a08, a09, a10, a11, a12, a13, a14, a15 }
+ */
+typedef int16_t rte_v256s16_t __attribute__((vector_size(32), aligned(32)));
+
+/**
+ * 256 bits vector size to use with 32 bits elements.
+ *
+ * a = (rte_v256s32_t){ a0, a1, a2, a3, a4, a5, a6, a7 }
+ */
+typedef int32_t rte_v256s32_t __attribute__((vector_size(32), aligned(32)));
+
+/**
+ * 256 bits vector size to use with 64 bits elements.
+ *
+ * a = (rte_v256s64_t){ a0, a1, a2, a3 }
+ */
+typedef int64_t rte_v256s64_t __attribute__((vector_size(32), aligned(32)));
+
+#endif /* _RTE_VECT_H_ */