summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/mempool
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/drivers/mempool
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/mempool')
-rw-r--r--src/spdk/dpdk/drivers/mempool/Makefile18
-rw-r--r--src/spdk/dpdk/drivers/mempool/bucket/Makefile25
-rw-r--r--src/spdk/dpdk/drivers/mempool/bucket/meson.build9
-rw-r--r--src/spdk/dpdk/drivers/mempool/bucket/rte_mempool_bucket.c634
-rw-r--r--src/spdk/dpdk/drivers/mempool/bucket/rte_mempool_bucket_version.map3
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa/Makefile30
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa/dpaa_mempool.c366
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa/dpaa_mempool.h80
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa/meson.build10
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa/rte_mempool_dpaa_version.map10
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa2/Makefile31
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.c462
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.h68
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h37
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa2/meson.build12
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa2/rte_dpaa2_mempool.h53
-rw-r--r--src/spdk/dpdk/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map15
-rw-r--r--src/spdk/dpdk/drivers/mempool/meson.build7
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx/Makefile39
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx/meson.build8
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx/octeontx_fpavf.c835
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx/octeontx_fpavf.h114
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx/octeontx_pool_logs.h22
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx.c205
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx_version.map3
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx2/Makefile40
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx2/meson.build23
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool.c457
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool.h221
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool_debug.c135
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool_irq.c303
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool_ops.c895
-rw-r--r--src/spdk/dpdk/drivers/mempool/octeontx2/rte_mempool_octeontx2_version.map10
-rw-r--r--src/spdk/dpdk/drivers/mempool/ring/Makefile19
-rw-r--r--src/spdk/dpdk/drivers/mempool/ring/meson.build4
-rw-r--r--src/spdk/dpdk/drivers/mempool/ring/rte_mempool_ring.c136
-rw-r--r--src/spdk/dpdk/drivers/mempool/ring/rte_mempool_ring_version.map3
-rw-r--r--src/spdk/dpdk/drivers/mempool/stack/Makefile22
-rw-r--r--src/spdk/dpdk/drivers/mempool/stack/meson.build6
-rw-r--r--src/spdk/dpdk/drivers/mempool/stack/rte_mempool_stack.c97
-rw-r--r--src/spdk/dpdk/drivers/mempool/stack/rte_mempool_stack_version.map3
41 files changed, 5470 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/mempool/Makefile b/src/spdk/dpdk/drivers/mempool/Makefile
new file mode 100644
index 000000000..29ef73bf4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_BUCKET) += bucket
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa
+endif
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2
+endif
+DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += ring
+DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_STACK) += stack
+DIRS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx
+DIRS-$(CONFIG_RTE_LIBRTE_OCTEONTX2_MEMPOOL) += octeontx2
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/spdk/dpdk/drivers/mempool/bucket/Makefile b/src/spdk/dpdk/drivers/mempool/bucket/Makefile
new file mode 100644
index 000000000..c766c35b2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/bucket/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Copyright (c) 2017-2018 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_bucket.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+
+EXPORT_MAP := rte_mempool_bucket_version.map
+
+SRCS-$(CONFIG_RTE_DRIVER_MEMPOOL_BUCKET) += rte_mempool_bucket.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/mempool/bucket/meson.build b/src/spdk/dpdk/drivers/mempool/bucket/meson.build
new file mode 100644
index 000000000..618d79128
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/bucket/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Copyright (c) 2017-2018 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+
+sources = files('rte_mempool_bucket.c')
diff --git a/src/spdk/dpdk/drivers/mempool/bucket/rte_mempool_bucket.c b/src/spdk/dpdk/drivers/mempool/bucket/rte_mempool_bucket.c
new file mode 100644
index 000000000..5ce1ef16f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/bucket/rte_mempool_bucket.c
@@ -0,0 +1,634 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_errno.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+
+/*
+ * The general idea of the bucket mempool driver is as follows.
+ * We keep track of physically contiguous groups (buckets) of objects
+ * of a certain size. Every such a group has a counter that is
+ * incremented every time an object from that group is enqueued.
+ * Until the bucket is full, no objects from it are eligible for allocation.
+ * If a request is made to dequeue a multiply of bucket size, it is
+ * satisfied by returning the whole buckets, instead of separate objects.
+ */
+
+
+struct bucket_header {
+ unsigned int lcore_id;
+ uint8_t fill_cnt;
+};
+
+struct bucket_stack {
+ unsigned int top;
+ unsigned int limit;
+ void *objects[];
+};
+
+struct bucket_data {
+ unsigned int header_size;
+ unsigned int total_elt_size;
+ unsigned int obj_per_bucket;
+ unsigned int bucket_stack_thresh;
+ uintptr_t bucket_page_mask;
+ struct rte_ring *shared_bucket_ring;
+ struct bucket_stack *buckets[RTE_MAX_LCORE];
+ /*
+ * Multi-producer single-consumer ring to hold objects that are
+ * returned to the mempool at a different lcore than initially
+ * dequeued
+ */
+ struct rte_ring *adoption_buffer_rings[RTE_MAX_LCORE];
+ struct rte_ring *shared_orphan_ring;
+ struct rte_mempool *pool;
+ unsigned int bucket_mem_size;
+};
+
+static struct bucket_stack *
+bucket_stack_create(const struct rte_mempool *mp, unsigned int n_elts)
+{
+ struct bucket_stack *stack;
+
+ stack = rte_zmalloc_socket("bucket_stack",
+ sizeof(struct bucket_stack) +
+ n_elts * sizeof(void *),
+ RTE_CACHE_LINE_SIZE,
+ mp->socket_id);
+ if (stack == NULL)
+ return NULL;
+ stack->limit = n_elts;
+ stack->top = 0;
+
+ return stack;
+}
+
+static void
+bucket_stack_push(struct bucket_stack *stack, void *obj)
+{
+ RTE_ASSERT(stack->top < stack->limit);
+ stack->objects[stack->top++] = obj;
+}
+
+static void *
+bucket_stack_pop_unsafe(struct bucket_stack *stack)
+{
+ RTE_ASSERT(stack->top > 0);
+ return stack->objects[--stack->top];
+}
+
+static void *
+bucket_stack_pop(struct bucket_stack *stack)
+{
+ if (stack->top == 0)
+ return NULL;
+ return bucket_stack_pop_unsafe(stack);
+}
+
+static int
+bucket_enqueue_single(struct bucket_data *bd, void *obj)
+{
+ int rc = 0;
+ uintptr_t addr = (uintptr_t)obj;
+ struct bucket_header *hdr;
+ unsigned int lcore_id = rte_lcore_id();
+
+ addr &= bd->bucket_page_mask;
+ hdr = (struct bucket_header *)addr;
+
+ if (likely(hdr->lcore_id == lcore_id)) {
+ if (hdr->fill_cnt < bd->obj_per_bucket - 1) {
+ hdr->fill_cnt++;
+ } else {
+ hdr->fill_cnt = 0;
+ /* Stack is big enough to put all buckets */
+ bucket_stack_push(bd->buckets[lcore_id], hdr);
+ }
+ } else if (hdr->lcore_id != LCORE_ID_ANY) {
+ struct rte_ring *adopt_ring =
+ bd->adoption_buffer_rings[hdr->lcore_id];
+
+ rc = rte_ring_enqueue(adopt_ring, obj);
+ /* Ring is big enough to put all objects */
+ RTE_ASSERT(rc == 0);
+ } else if (hdr->fill_cnt < bd->obj_per_bucket - 1) {
+ hdr->fill_cnt++;
+ } else {
+ hdr->fill_cnt = 0;
+ rc = rte_ring_enqueue(bd->shared_bucket_ring, hdr);
+ /* Ring is big enough to put all buckets */
+ RTE_ASSERT(rc == 0);
+ }
+
+ return rc;
+}
+
+static int
+bucket_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n)
+{
+ struct bucket_data *bd = mp->pool_data;
+ struct bucket_stack *local_stack = bd->buckets[rte_lcore_id()];
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < n; i++) {
+ rc = bucket_enqueue_single(bd, obj_table[i]);
+ RTE_ASSERT(rc == 0);
+ }
+ if (local_stack->top > bd->bucket_stack_thresh) {
+ rte_ring_enqueue_bulk(bd->shared_bucket_ring,
+ &local_stack->objects
+ [bd->bucket_stack_thresh],
+ local_stack->top -
+ bd->bucket_stack_thresh,
+ NULL);
+ local_stack->top = bd->bucket_stack_thresh;
+ }
+ return rc;
+}
+
+static void **
+bucket_fill_obj_table(const struct bucket_data *bd, void **pstart,
+ void **obj_table, unsigned int n)
+{
+ unsigned int i;
+ uint8_t *objptr = *pstart;
+
+ for (objptr += bd->header_size, i = 0; i < n;
+ i++, objptr += bd->total_elt_size)
+ *obj_table++ = objptr;
+ *pstart = objptr;
+ return obj_table;
+}
+
+static int
+bucket_dequeue_orphans(struct bucket_data *bd, void **obj_table,
+ unsigned int n_orphans)
+{
+ unsigned int i;
+ int rc;
+ uint8_t *objptr;
+
+ rc = rte_ring_dequeue_bulk(bd->shared_orphan_ring, obj_table,
+ n_orphans, NULL);
+ if (unlikely(rc != (int)n_orphans)) {
+ struct bucket_header *hdr;
+
+ objptr = bucket_stack_pop(bd->buckets[rte_lcore_id()]);
+ hdr = (struct bucket_header *)objptr;
+
+ if (objptr == NULL) {
+ rc = rte_ring_dequeue(bd->shared_bucket_ring,
+ (void **)&objptr);
+ if (rc != 0) {
+ rte_errno = ENOBUFS;
+ return -rte_errno;
+ }
+ hdr = (struct bucket_header *)objptr;
+ hdr->lcore_id = rte_lcore_id();
+ }
+ hdr->fill_cnt = 0;
+ bucket_fill_obj_table(bd, (void **)&objptr, obj_table,
+ n_orphans);
+ for (i = n_orphans; i < bd->obj_per_bucket; i++,
+ objptr += bd->total_elt_size) {
+ rc = rte_ring_enqueue(bd->shared_orphan_ring,
+ objptr);
+ if (rc != 0) {
+ RTE_ASSERT(0);
+ rte_errno = -rc;
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+bucket_dequeue_buckets(struct bucket_data *bd, void **obj_table,
+ unsigned int n_buckets)
+{
+ struct bucket_stack *cur_stack = bd->buckets[rte_lcore_id()];
+ unsigned int n_buckets_from_stack = RTE_MIN(n_buckets, cur_stack->top);
+ void **obj_table_base = obj_table;
+
+ n_buckets -= n_buckets_from_stack;
+ while (n_buckets_from_stack-- > 0) {
+ void *obj = bucket_stack_pop_unsafe(cur_stack);
+
+ obj_table = bucket_fill_obj_table(bd, &obj, obj_table,
+ bd->obj_per_bucket);
+ }
+ while (n_buckets-- > 0) {
+ struct bucket_header *hdr;
+
+ if (unlikely(rte_ring_dequeue(bd->shared_bucket_ring,
+ (void **)&hdr) != 0)) {
+ /*
+ * Return the already-dequeued buffers
+ * back to the mempool
+ */
+ bucket_enqueue(bd->pool, obj_table_base,
+ obj_table - obj_table_base);
+ rte_errno = ENOBUFS;
+ return -rte_errno;
+ }
+ hdr->lcore_id = rte_lcore_id();
+ obj_table = bucket_fill_obj_table(bd, (void **)&hdr,
+ obj_table,
+ bd->obj_per_bucket);
+ }
+
+ return 0;
+}
+
+static int
+bucket_adopt_orphans(struct bucket_data *bd)
+{
+ int rc = 0;
+ struct rte_ring *adopt_ring =
+ bd->adoption_buffer_rings[rte_lcore_id()];
+
+ if (unlikely(!rte_ring_empty(adopt_ring))) {
+ void *orphan;
+
+ while (rte_ring_sc_dequeue(adopt_ring, &orphan) == 0) {
+ rc = bucket_enqueue_single(bd, orphan);
+ RTE_ASSERT(rc == 0);
+ }
+ }
+ return rc;
+}
+
+static int
+bucket_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+ struct bucket_data *bd = mp->pool_data;
+ unsigned int n_buckets = n / bd->obj_per_bucket;
+ unsigned int n_orphans = n - n_buckets * bd->obj_per_bucket;
+ int rc = 0;
+
+ bucket_adopt_orphans(bd);
+
+ if (unlikely(n_orphans > 0)) {
+ rc = bucket_dequeue_orphans(bd, obj_table +
+ (n_buckets * bd->obj_per_bucket),
+ n_orphans);
+ if (rc != 0)
+ return rc;
+ }
+
+ if (likely(n_buckets > 0)) {
+ rc = bucket_dequeue_buckets(bd, obj_table, n_buckets);
+ if (unlikely(rc != 0) && n_orphans > 0) {
+ rte_ring_enqueue_bulk(bd->shared_orphan_ring,
+ obj_table + (n_buckets *
+ bd->obj_per_bucket),
+ n_orphans, NULL);
+ }
+ }
+
+ return rc;
+}
+
+static int
+bucket_dequeue_contig_blocks(struct rte_mempool *mp, void **first_obj_table,
+ unsigned int n)
+{
+ struct bucket_data *bd = mp->pool_data;
+ const uint32_t header_size = bd->header_size;
+ struct bucket_stack *cur_stack = bd->buckets[rte_lcore_id()];
+ unsigned int n_buckets_from_stack = RTE_MIN(n, cur_stack->top);
+ struct bucket_header *hdr;
+ void **first_objp = first_obj_table;
+
+ bucket_adopt_orphans(bd);
+
+ n -= n_buckets_from_stack;
+ while (n_buckets_from_stack-- > 0) {
+ hdr = bucket_stack_pop_unsafe(cur_stack);
+ *first_objp++ = (uint8_t *)hdr + header_size;
+ }
+ if (n > 0) {
+ if (unlikely(rte_ring_dequeue_bulk(bd->shared_bucket_ring,
+ first_objp, n, NULL) != n)) {
+ /* Return the already dequeued buckets */
+ while (first_objp-- != first_obj_table) {
+ bucket_stack_push(cur_stack,
+ (uint8_t *)*first_objp -
+ header_size);
+ }
+ rte_errno = ENOBUFS;
+ return -rte_errno;
+ }
+ while (n-- > 0) {
+ hdr = (struct bucket_header *)*first_objp;
+ hdr->lcore_id = rte_lcore_id();
+ *first_objp++ = (uint8_t *)hdr + header_size;
+ }
+ }
+
+ return 0;
+}
+
+static void
+count_underfilled_buckets(struct rte_mempool *mp,
+ void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ __rte_unused unsigned int mem_idx)
+{
+ unsigned int *pcount = opaque;
+ const struct bucket_data *bd = mp->pool_data;
+ unsigned int bucket_page_sz =
+ (unsigned int)(~bd->bucket_page_mask + 1);
+ uintptr_t align;
+ uint8_t *iter;
+
+ align = (uintptr_t)RTE_PTR_ALIGN_CEIL(memhdr->addr, bucket_page_sz) -
+ (uintptr_t)memhdr->addr;
+
+ for (iter = (uint8_t *)memhdr->addr + align;
+ iter < (uint8_t *)memhdr->addr + memhdr->len;
+ iter += bucket_page_sz) {
+ struct bucket_header *hdr = (struct bucket_header *)iter;
+
+ *pcount += hdr->fill_cnt;
+ }
+}
+
+static unsigned int
+bucket_get_count(const struct rte_mempool *mp)
+{
+ const struct bucket_data *bd = mp->pool_data;
+ unsigned int count =
+ bd->obj_per_bucket * rte_ring_count(bd->shared_bucket_ring) +
+ rte_ring_count(bd->shared_orphan_ring);
+ unsigned int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (!rte_lcore_is_enabled(i))
+ continue;
+ count += bd->obj_per_bucket * bd->buckets[i]->top +
+ rte_ring_count(bd->adoption_buffer_rings[i]);
+ }
+
+ rte_mempool_mem_iter((struct rte_mempool *)(uintptr_t)mp,
+ count_underfilled_buckets, &count);
+
+ return count;
+}
+
+static int
+bucket_alloc(struct rte_mempool *mp)
+{
+ int rg_flags = 0;
+ int rc = 0;
+ char rg_name[RTE_RING_NAMESIZE];
+ struct bucket_data *bd;
+ unsigned int i;
+ unsigned int bucket_header_size;
+ size_t pg_sz;
+
+ rc = rte_mempool_get_page_size(mp, &pg_sz);
+ if (rc < 0)
+ return rc;
+
+ bd = rte_zmalloc_socket("bucket_pool", sizeof(*bd),
+ RTE_CACHE_LINE_SIZE, mp->socket_id);
+ if (bd == NULL) {
+ rc = -ENOMEM;
+ goto no_mem_for_data;
+ }
+ bd->pool = mp;
+ if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ bucket_header_size = sizeof(struct bucket_header);
+ else
+ bucket_header_size = RTE_CACHE_LINE_SIZE;
+ RTE_BUILD_BUG_ON(sizeof(struct bucket_header) > RTE_CACHE_LINE_SIZE);
+ bd->header_size = mp->header_size + bucket_header_size;
+ bd->total_elt_size = mp->header_size + mp->elt_size + mp->trailer_size;
+ bd->bucket_mem_size = RTE_MIN(pg_sz,
+ (size_t)(RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB * 1024));
+ bd->obj_per_bucket = (bd->bucket_mem_size - bucket_header_size) /
+ bd->total_elt_size;
+ bd->bucket_page_mask = ~(rte_align64pow2(bd->bucket_mem_size) - 1);
+ /* eventually this should be a tunable parameter */
+ bd->bucket_stack_thresh = (mp->size / bd->obj_per_bucket) * 4 / 3;
+
+ if (mp->flags & MEMPOOL_F_SP_PUT)
+ rg_flags |= RING_F_SP_ENQ;
+ if (mp->flags & MEMPOOL_F_SC_GET)
+ rg_flags |= RING_F_SC_DEQ;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (!rte_lcore_is_enabled(i))
+ continue;
+ bd->buckets[i] =
+ bucket_stack_create(mp, mp->size / bd->obj_per_bucket);
+ if (bd->buckets[i] == NULL) {
+ rc = -ENOMEM;
+ goto no_mem_for_stacks;
+ }
+ rc = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT ".a%u", mp->name, i);
+ if (rc < 0 || rc >= (int)sizeof(rg_name)) {
+ rc = -ENAMETOOLONG;
+ goto no_mem_for_stacks;
+ }
+ bd->adoption_buffer_rings[i] =
+ rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
+ mp->socket_id,
+ rg_flags | RING_F_SC_DEQ);
+ if (bd->adoption_buffer_rings[i] == NULL) {
+ rc = -rte_errno;
+ goto no_mem_for_stacks;
+ }
+ }
+
+ rc = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT ".0", mp->name);
+ if (rc < 0 || rc >= (int)sizeof(rg_name)) {
+ rc = -ENAMETOOLONG;
+ goto invalid_shared_orphan_ring;
+ }
+ bd->shared_orphan_ring =
+ rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
+ mp->socket_id, rg_flags);
+ if (bd->shared_orphan_ring == NULL) {
+ rc = -rte_errno;
+ goto cannot_create_shared_orphan_ring;
+ }
+
+ rc = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT ".1", mp->name);
+ if (rc < 0 || rc >= (int)sizeof(rg_name)) {
+ rc = -ENAMETOOLONG;
+ goto invalid_shared_bucket_ring;
+ }
+ bd->shared_bucket_ring =
+ rte_ring_create(rg_name,
+ rte_align32pow2((mp->size + 1) /
+ bd->obj_per_bucket),
+ mp->socket_id, rg_flags);
+ if (bd->shared_bucket_ring == NULL) {
+ rc = -rte_errno;
+ goto cannot_create_shared_bucket_ring;
+ }
+
+ mp->pool_data = bd;
+
+ return 0;
+
+cannot_create_shared_bucket_ring:
+invalid_shared_bucket_ring:
+ rte_ring_free(bd->shared_orphan_ring);
+cannot_create_shared_orphan_ring:
+invalid_shared_orphan_ring:
+no_mem_for_stacks:
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ rte_free(bd->buckets[i]);
+ rte_ring_free(bd->adoption_buffer_rings[i]);
+ }
+ rte_free(bd);
+no_mem_for_data:
+ rte_errno = -rc;
+ return rc;
+}
+
+static void
+bucket_free(struct rte_mempool *mp)
+{
+ unsigned int i;
+ struct bucket_data *bd = mp->pool_data;
+
+ if (bd == NULL)
+ return;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ rte_free(bd->buckets[i]);
+ rte_ring_free(bd->adoption_buffer_rings[i]);
+ }
+
+ rte_ring_free(bd->shared_orphan_ring);
+ rte_ring_free(bd->shared_bucket_ring);
+
+ rte_free(bd);
+}
+
+static ssize_t
+bucket_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,
+ __rte_unused uint32_t pg_shift, size_t *min_total_elt_size,
+ size_t *align)
+{
+ struct bucket_data *bd = mp->pool_data;
+ unsigned int bucket_page_sz;
+
+ if (bd == NULL)
+ return -EINVAL;
+
+ bucket_page_sz = rte_align32pow2(bd->bucket_mem_size);
+ *align = bucket_page_sz;
+ *min_total_elt_size = bucket_page_sz;
+ /*
+ * Each bucket occupies its own block aligned to
+ * bucket_page_sz, so the required amount of memory is
+ * a multiple of bucket_page_sz.
+ * We also need extra space for a bucket header
+ */
+ return ((obj_num + bd->obj_per_bucket - 1) /
+ bd->obj_per_bucket) * bucket_page_sz;
+}
+
+static int
+bucket_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ struct bucket_data *bd = mp->pool_data;
+ unsigned int bucket_page_sz;
+ unsigned int bucket_header_sz;
+ unsigned int n_objs;
+ uintptr_t align;
+ uint8_t *iter;
+ int rc;
+
+ if (bd == NULL)
+ return -EINVAL;
+
+ bucket_page_sz = rte_align32pow2(bd->bucket_mem_size);
+ align = RTE_PTR_ALIGN_CEIL((uintptr_t)vaddr, bucket_page_sz) -
+ (uintptr_t)vaddr;
+
+ bucket_header_sz = bd->header_size - mp->header_size;
+ if (iova != RTE_BAD_IOVA)
+ iova += align + bucket_header_sz;
+
+ for (iter = (uint8_t *)vaddr + align, n_objs = 0;
+ iter < (uint8_t *)vaddr + len && n_objs < max_objs;
+ iter += bucket_page_sz) {
+ struct bucket_header *hdr = (struct bucket_header *)iter;
+ unsigned int chunk_len = bd->bucket_mem_size;
+
+ if ((size_t)(iter - (uint8_t *)vaddr) + chunk_len > len)
+ chunk_len = len - (iter - (uint8_t *)vaddr);
+ if (chunk_len <= bucket_header_sz)
+ break;
+ chunk_len -= bucket_header_sz;
+
+ hdr->fill_cnt = 0;
+ hdr->lcore_id = LCORE_ID_ANY;
+ rc = rte_mempool_op_populate_helper(mp, 0,
+ RTE_MIN(bd->obj_per_bucket,
+ max_objs - n_objs),
+ iter + bucket_header_sz,
+ iova, chunk_len,
+ obj_cb, obj_cb_arg);
+ if (rc < 0)
+ return rc;
+ n_objs += rc;
+ if (iova != RTE_BAD_IOVA)
+ iova += bucket_page_sz;
+ }
+
+ return n_objs;
+}
+
+static int
+bucket_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info)
+{
+ struct bucket_data *bd = mp->pool_data;
+
+ info->contig_block_size = bd->obj_per_bucket;
+ return 0;
+}
+
+
+static const struct rte_mempool_ops ops_bucket = {
+ .name = "bucket",
+ .alloc = bucket_alloc,
+ .free = bucket_free,
+ .enqueue = bucket_enqueue,
+ .dequeue = bucket_dequeue,
+ .get_count = bucket_get_count,
+ .calc_mem_size = bucket_calc_mem_size,
+ .populate = bucket_populate,
+ .get_info = bucket_get_info,
+ .dequeue_contig_blocks = bucket_dequeue_contig_blocks,
+};
+
+
+MEMPOOL_REGISTER_OPS(ops_bucket);
diff --git a/src/spdk/dpdk/drivers/mempool/bucket/rte_mempool_bucket_version.map b/src/spdk/dpdk/drivers/mempool/bucket/rte_mempool_bucket_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/bucket/rte_mempool_bucket_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa/Makefile b/src/spdk/dpdk/drivers/mempool/dpaa/Makefile
new file mode 100644
index 000000000..144969c62
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_dpaa.a
+
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/
+CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa
+CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
+
+# versioning export map
+EXPORT_MAP := rte_mempool_dpaa_version.map
+
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa_mempool.c
+
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+LDLIBS += -lrte_common_dpaax
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa/dpaa_mempool.c b/src/spdk/dpdk/drivers/mempool/dpaa/dpaa_mempool.c
new file mode 100644
index 000000000..451e2d5d5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa/dpaa_mempool.c
@@ -0,0 +1,366 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017,2019 NXP
+ *
+ */
+
+/* System headers */
+#include <stdio.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+
+#include <dpaa_mempool.h>
+#include <dpaax_iova_table.h>
+
+/* List of all the memseg information locally maintained in dpaa driver. This
+ * is to optimize the PA_to_VA searches until a better mechanism (algo) is
+ * available.
+ */
+struct dpaa_memseg_list rte_dpaa_memsegs
+ = TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs);
+
+struct dpaa_bp_info *rte_dpaa_bpid_info;
+int dpaa_logtype_mempool;
+
+static int
+dpaa_mbuf_create_pool(struct rte_mempool *mp)
+{
+ struct bman_pool *bp;
+ struct bm_buffer bufs[8];
+ struct dpaa_bp_info *bp_info;
+ uint8_t bpid;
+ int num_bufs = 0, ret = 0;
+ struct bman_pool_params params = {
+ .flags = BMAN_POOL_FLAG_DYNAMIC_BPID
+ };
+
+ MEMPOOL_INIT_FUNC_TRACE();
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_MEMPOOL_ERR(
+ "rte_dpaa_portal_init failed with ret: %d",
+ ret);
+ return -1;
+ }
+ }
+ bp = bman_new_pool(&params);
+ if (!bp) {
+ DPAA_MEMPOOL_ERR("bman_new_pool() failed");
+ return -ENODEV;
+ }
+ bpid = bman_get_params(bp)->bpid;
+
+ /* Drain the pool of anything already in it. */
+ do {
+ /* Acquire is all-or-nothing, so we drain in 8s,
+ * then in 1s for the remainder.
+ */
+ if (ret != 1)
+ ret = bman_acquire(bp, bufs, 8, 0);
+ if (ret < 8)
+ ret = bman_acquire(bp, bufs, 1, 0);
+ if (ret > 0)
+ num_bufs += ret;
+ } while (ret > 0);
+ if (num_bufs)
+ DPAA_MEMPOOL_WARN("drained %u bufs from BPID %d",
+ num_bufs, bpid);
+
+ if (rte_dpaa_bpid_info == NULL) {
+ rte_dpaa_bpid_info = (struct dpaa_bp_info *)rte_zmalloc(NULL,
+ sizeof(struct dpaa_bp_info) * DPAA_MAX_BPOOLS,
+ RTE_CACHE_LINE_SIZE);
+ if (rte_dpaa_bpid_info == NULL) {
+ bman_free_pool(bp);
+ return -ENOMEM;
+ }
+ }
+
+ rte_dpaa_bpid_info[bpid].mp = mp;
+ rte_dpaa_bpid_info[bpid].bpid = bpid;
+ rte_dpaa_bpid_info[bpid].size = mp->elt_size;
+ rte_dpaa_bpid_info[bpid].bp = bp;
+ rte_dpaa_bpid_info[bpid].meta_data_size =
+ sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp);
+ rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index;
+ rte_dpaa_bpid_info[bpid].ptov_off = 0;
+ rte_dpaa_bpid_info[bpid].flags = 0;
+
+ bp_info = rte_malloc(NULL,
+ sizeof(struct dpaa_bp_info),
+ RTE_CACHE_LINE_SIZE);
+ if (!bp_info) {
+ DPAA_MEMPOOL_WARN("Memory allocation failed for bp_info");
+ bman_free_pool(bp);
+ return -ENOMEM;
+ }
+
+ rte_memcpy(bp_info, (void *)&rte_dpaa_bpid_info[bpid],
+ sizeof(struct dpaa_bp_info));
+ mp->pool_data = (void *)bp_info;
+
+ DPAA_MEMPOOL_INFO("BMAN pool created for bpid =%d", bpid);
+ return 0;
+}
+
+static void
+dpaa_mbuf_free_pool(struct rte_mempool *mp)
+{
+ struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+
+ MEMPOOL_INIT_FUNC_TRACE();
+
+ if (bp_info) {
+ bman_free_pool(bp_info->bp);
+ DPAA_MEMPOOL_INFO("BMAN pool freed for bpid =%d",
+ bp_info->bpid);
+ rte_free(mp->pool_data);
+ mp->pool_data = NULL;
+ }
+}
+
+static void
+dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr)
+{
+ struct bm_buffer buf;
+ int ret;
+
+ DPAA_MEMPOOL_DPDEBUG("Free 0x%" PRIx64 " to bpid: %d",
+ addr, bp_info->bpid);
+
+ bm_buffer_set64(&buf, addr);
+retry:
+ ret = bman_release(bp_info->bp, &buf, 1, 0);
+ if (ret) {
+ DPAA_MEMPOOL_DEBUG("BMAN busy. Retrying...");
+ cpu_spin(CPU_SPIN_BACKOFF_CYCLES);
+ goto retry;
+ }
+}
+
+static int
+dpaa_mbuf_free_bulk(struct rte_mempool *pool,
+ void *const *obj_table,
+ unsigned int n)
+{
+ struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
+ int ret;
+ unsigned int i = 0;
+
+ DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d",
+ n, bp_info->bpid);
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
+ ret);
+ return 0;
+ }
+ }
+
+ while (i < n) {
+ uint64_t phy = rte_mempool_virt2iova(obj_table[i]);
+
+ if (unlikely(!bp_info->ptov_off)) {
+ /* buffers are from single mem segment */
+ if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) {
+ bp_info->ptov_off = (size_t)obj_table[i] - phy;
+ rte_dpaa_bpid_info[bp_info->bpid].ptov_off
+ = bp_info->ptov_off;
+ }
+ }
+
+ dpaa_buf_free(bp_info,
+ (uint64_t)phy + bp_info->meta_data_size);
+ i = i + 1;
+ }
+
+ DPAA_MEMPOOL_DPDEBUG("freed %d buffers in bpid =%d",
+ n, bp_info->bpid);
+
+ return 0;
+}
+
+static int
+dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
+ void **obj_table,
+ unsigned int count)
+{
+ struct rte_mbuf **m = (struct rte_mbuf **)obj_table;
+ struct bm_buffer bufs[DPAA_MBUF_MAX_ACQ_REL];
+ struct dpaa_bp_info *bp_info;
+ void *bufaddr;
+ int i, ret;
+ unsigned int n = 0;
+
+ bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
+
+ DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d",
+ count, bp_info->bpid);
+
+ if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) {
+ DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers",
+ count);
+ return -1;
+ }
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
+ ret);
+ return -1;
+ }
+ }
+
+ while (n < count) {
+ /* Acquire is all-or-nothing, so we drain in 7s,
+ * then the remainder.
+ */
+ if ((count - n) > DPAA_MBUF_MAX_ACQ_REL) {
+ ret = bman_acquire(bp_info->bp, bufs,
+ DPAA_MBUF_MAX_ACQ_REL, 0);
+ } else {
+ ret = bman_acquire(bp_info->bp, bufs, count - n, 0);
+ }
+ /* In case of less than requested number of buffers available
+ * in pool, qbman_swp_acquire returns 0
+ */
+ if (ret <= 0) {
+ DPAA_MEMPOOL_DPDEBUG("Buffer acquire failed (%d)",
+ ret);
+ /* The API expect the exact number of requested
+ * buffers. Releasing all buffers allocated
+ */
+ dpaa_mbuf_free_bulk(pool, obj_table, n);
+ return -ENOBUFS;
+ }
+ /* assigning mbuf from the acquired objects */
+ for (i = 0; (i < ret) && bufs[i].addr; i++) {
+ /* TODO-errata - objerved that bufs may be null
+ * i.e. first buffer is valid, remaining 6 buffers
+ * may be null.
+ */
+ bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr);
+ m[n] = (struct rte_mbuf *)((char *)bufaddr
+ - bp_info->meta_data_size);
+ DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
+ (void *)bufaddr, (void *)m[n]);
+ n++;
+ }
+ }
+
+ DPAA_MEMPOOL_DPDEBUG("Allocated %d buffers from bpid=%d",
+ n, bp_info->bpid);
+ return 0;
+}
+
+static unsigned int
+dpaa_mbuf_get_count(const struct rte_mempool *mp)
+{
+ struct dpaa_bp_info *bp_info;
+
+ MEMPOOL_INIT_FUNC_TRACE();
+
+ if (!mp || !mp->pool_data) {
+ DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
+ return 0;
+ }
+
+ bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+
+ return bman_query_free_buffers(bp_info->bp);
+}
+
+static int
+dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t paddr, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ struct dpaa_bp_info *bp_info;
+ unsigned int total_elt_sz;
+
+ if (!mp || !mp->pool_data) {
+ DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
+ return 0;
+ }
+
+ /* Update the PA-VA Table */
+ dpaax_iova_table_update(paddr, vaddr, len);
+
+ bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+
+ DPAA_MEMPOOL_DPDEBUG("Req size %" PRIx64 " vs Available %u\n",
+ (uint64_t)len, total_elt_sz * mp->size);
+
+ /* Detect pool area has sufficient space for elements in this memzone */
+ if (len >= total_elt_sz * mp->size)
+ bp_info->flags |= DPAA_MPOOL_SINGLE_SEGMENT;
+ struct dpaa_memseg *ms;
+
+ /* For each memory chunk pinned to the Mempool, a linked list of the
+ * contained memsegs is created for searching when PA to VA
+ * conversion is required.
+ */
+ ms = rte_zmalloc(NULL, sizeof(struct dpaa_memseg), 0);
+ if (!ms) {
+ DPAA_MEMPOOL_ERR("Unable to allocate internal memory.");
+ DPAA_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
+ /* If the element is not added, it would only lead to failure
+ * in searching for the element and the logic would Fallback
+ * to traditional DPDK memseg traversal code. So, this is not
+ * a blocking error - but, error would be printed on screen.
+ */
+ return 0;
+ }
+
+ ms->vaddr = vaddr;
+ ms->iova = paddr;
+ ms->len = len;
+ /* Head insertions are generally faster than tail insertions as the
+ * buffers pinned are picked from rear end.
+ */
+ TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next);
+
+ return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr,
+ len, obj_cb, obj_cb_arg);
+}
+
+static const struct rte_mempool_ops dpaa_mpool_ops = {
+ .name = DPAA_MEMPOOL_OPS_NAME,
+ .alloc = dpaa_mbuf_create_pool,
+ .free = dpaa_mbuf_free_pool,
+ .enqueue = dpaa_mbuf_free_bulk,
+ .dequeue = dpaa_mbuf_alloc_bulk,
+ .get_count = dpaa_mbuf_get_count,
+ .populate = dpaa_populate,
+};
+
+MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);
+
+RTE_INIT(dpaa_mp_init_log)
+{
+ dpaa_logtype_mempool = rte_log_register("mempool.dpaa");
+ if (dpaa_logtype_mempool >= 0)
+ rte_log_set_level(dpaa_logtype_mempool, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa/dpaa_mempool.h b/src/spdk/dpdk/drivers/mempool/dpaa/dpaa_mempool.h
new file mode 100644
index 000000000..dc0058e6d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa/dpaa_mempool.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017,2019 NXP
+ *
+ */
+#ifndef __DPAA_MEMPOOL_H__
+#define __DPAA_MEMPOOL_H__
+
+/* System headers */
+#include <stdio.h>
+#include <stdbool.h>
+#include <inttypes.h>
+#include <unistd.h>
+
+#include <rte_mempool.h>
+
+#include <rte_dpaa_bus.h>
+#include <rte_dpaa_logs.h>
+
+#include <fsl_usd.h>
+#include <fsl_bman.h>
+
+#define CPU_SPIN_BACKOFF_CYCLES 512
+
+/* total number of bpools on SoC */
+#define DPAA_MAX_BPOOLS 256
+
+/* Maximum release/acquire from BMAN */
+#define DPAA_MBUF_MAX_ACQ_REL 8
+
+/* Buffers are allocated from single mem segment i.e. phys contiguous */
+#define DPAA_MPOOL_SINGLE_SEGMENT 0x01
+
+struct dpaa_bp_info {
+ struct rte_mempool *mp;
+ struct bman_pool *bp;
+ uint32_t bpid;
+ uint32_t size;
+ uint32_t meta_data_size;
+ int32_t dpaa_ops_index;
+ int64_t ptov_off;
+ uint8_t flags;
+};
+
+static inline void *
+DPAA_MEMPOOL_PTOV(struct dpaa_bp_info *bp_info __rte_unused, uint64_t addr)
+{
+ return rte_dpaa_mem_ptov(addr);
+}
+
+#define DPAA_MEMPOOL_TO_POOL_INFO(__mp) \
+ ((struct dpaa_bp_info *)__mp->pool_data)
+
+#define DPAA_MEMPOOL_TO_BPID(__mp) \
+ (((struct dpaa_bp_info *)__mp->pool_data)->bpid)
+
+extern struct dpaa_bp_info *rte_dpaa_bpid_info;
+
+#define DPAA_BPID_TO_POOL_INFO(__bpid) (&rte_dpaa_bpid_info[__bpid])
+
+/* Mempool related logs */
+
+#define DPAA_MEMPOOL_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_mempool, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define MEMPOOL_INIT_FUNC_TRACE() DPAA_MEMPOOL_LOG(DEBUG, " >>")
+
+#define DPAA_MEMPOOL_DPDEBUG(fmt, args...) \
+ RTE_LOG_DP(DEBUG, PMD, fmt, ## args)
+#define DPAA_MEMPOOL_DEBUG(fmt, args...) \
+ DPAA_MEMPOOL_LOG(DEBUG, fmt, ## args)
+#define DPAA_MEMPOOL_ERR(fmt, args...) \
+ DPAA_MEMPOOL_LOG(ERR, fmt, ## args)
+#define DPAA_MEMPOOL_INFO(fmt, args...) \
+ DPAA_MEMPOOL_LOG(INFO, fmt, ## args)
+#define DPAA_MEMPOOL_WARN(fmt, args...) \
+ DPAA_MEMPOOL_LOG(WARNING, fmt, ## args)
+
+#endif
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa/meson.build b/src/spdk/dpdk/drivers/mempool/dpaa/meson.build
new file mode 100644
index 000000000..754e6397f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if not is_linux
+ build = false
+ reason = 'only supported on linux'
+endif
+
+deps += ['bus_dpaa']
+sources = files('dpaa_mempool.c')
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa/rte_mempool_dpaa_version.map b/src/spdk/dpdk/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
new file mode 100644
index 000000000..89d7cf495
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
@@ -0,0 +1,10 @@
+DPDK_20.0 {
+ local: *;
+};
+
+INTERNAL {
+ global:
+
+ rte_dpaa_bpid_info;
+ rte_dpaa_memsegs;
+};
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa2/Makefile b/src/spdk/dpdk/drivers/mempool/dpaa2/Makefile
new file mode 100644
index 000000000..04db6d27d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa2/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 NXP
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_dpaa2.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+
+# versioning export map
+EXPORT_MAP := rte_mempool_dpaa2_version.map
+
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2_hw_mempool.c
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+LDLIBS += -lrte_common_dpaax
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL)-include := rte_dpaa2_mempool.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
new file mode 100644
index 000000000..fa9b53e64
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016-2019 NXP
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include "rte_dpaa2_mempool.h"
+
+#include "fslmc_vfio.h"
+#include <fslmc_logs.h>
+#include <mc/fsl_dpbp.h>
+#include <portal/dpaa2_hw_pvt.h>
+#include <portal/dpaa2_hw_dpio.h>
+#include "dpaa2_hw_mempool.h"
+#include "dpaa2_hw_mempool_logs.h"
+
+#include <dpaax_iova_table.h>
+
+struct dpaa2_bp_info *rte_dpaa2_bpid_info;
+static struct dpaa2_bp_list *h_bp_list;
+
+/* Dynamic logging identified for mempool */
+int dpaa2_logtype_mempool;
+
+static int
+rte_hw_mbuf_create_pool(struct rte_mempool *mp)
+{
+ struct dpaa2_bp_list *bp_list;
+ struct dpaa2_dpbp_dev *avail_dpbp;
+ struct dpaa2_bp_info *bp_info;
+ struct dpbp_attr dpbp_attr;
+ uint32_t bpid;
+ int ret;
+
+ avail_dpbp = dpaa2_alloc_dpbp_dev();
+
+ if (rte_dpaa2_bpid_info == NULL) {
+ rte_dpaa2_bpid_info = (struct dpaa2_bp_info *)rte_malloc(NULL,
+ sizeof(struct dpaa2_bp_info) * MAX_BPID,
+ RTE_CACHE_LINE_SIZE);
+ if (rte_dpaa2_bpid_info == NULL)
+ return -ENOMEM;
+ memset(rte_dpaa2_bpid_info, 0,
+ sizeof(struct dpaa2_bp_info) * MAX_BPID);
+ }
+
+ if (!avail_dpbp) {
+ DPAA2_MEMPOOL_ERR("DPAA2 pool not available!");
+ return -ENOENT;
+ }
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_MEMPOOL_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ goto err1;
+ }
+ }
+
+ ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
+ if (ret != 0) {
+ DPAA2_MEMPOOL_ERR("Resource enable failure with err code: %d",
+ ret);
+ goto err1;
+ }
+
+ ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
+ avail_dpbp->token, &dpbp_attr);
+ if (ret != 0) {
+ DPAA2_MEMPOOL_ERR("Resource read failure with err code: %d",
+ ret);
+ goto err2;
+ }
+
+ bp_info = rte_malloc(NULL,
+ sizeof(struct dpaa2_bp_info),
+ RTE_CACHE_LINE_SIZE);
+ if (!bp_info) {
+ DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
+ ret = -ENOMEM;
+ goto err2;
+ }
+
+ /* Allocate the bp_list which will be added into global_bp_list */
+ bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list),
+ RTE_CACHE_LINE_SIZE);
+ if (!bp_list) {
+ DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
+ ret = -ENOMEM;
+ goto err3;
+ }
+
+ /* Set parameters of buffer pool list */
+ bp_list->buf_pool.num_bufs = mp->size;
+ bp_list->buf_pool.size = mp->elt_size
+ - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp);
+ bp_list->buf_pool.bpid = dpbp_attr.bpid;
+ bp_list->buf_pool.h_bpool_mem = NULL;
+ bp_list->buf_pool.dpbp_node = avail_dpbp;
+ /* Identification for our offloaded pool_data structure */
+ bp_list->dpaa2_ops_index = mp->ops_index;
+ bp_list->next = h_bp_list;
+ bp_list->mp = mp;
+
+ bpid = dpbp_attr.bpid;
+
+ rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf)
+ + rte_pktmbuf_priv_size(mp);
+ rte_dpaa2_bpid_info[bpid].bp_list = bp_list;
+ rte_dpaa2_bpid_info[bpid].bpid = bpid;
+
+ rte_memcpy(bp_info, (void *)&rte_dpaa2_bpid_info[bpid],
+ sizeof(struct dpaa2_bp_info));
+ mp->pool_data = (void *)bp_info;
+
+ DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid);
+
+ h_bp_list = bp_list;
+ return 0;
+err3:
+ rte_free(bp_info);
+err2:
+ dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
+err1:
+ dpaa2_free_dpbp_dev(avail_dpbp);
+
+ return ret;
+}
+
+static void
+rte_hw_mbuf_free_pool(struct rte_mempool *mp)
+{
+ struct dpaa2_bp_info *bpinfo;
+ struct dpaa2_bp_list *bp;
+ struct dpaa2_dpbp_dev *dpbp_node;
+
+ if (!mp->pool_data) {
+ DPAA2_MEMPOOL_ERR("Not a valid dpaa2 buffer pool");
+ return;
+ }
+
+ bpinfo = (struct dpaa2_bp_info *)mp->pool_data;
+ bp = bpinfo->bp_list;
+ dpbp_node = bp->buf_pool.dpbp_node;
+
+ dpbp_disable(&(dpbp_node->dpbp), CMD_PRI_LOW, dpbp_node->token);
+
+ if (h_bp_list == bp) {
+ h_bp_list = h_bp_list->next;
+ } else { /* if it is not the first node */
+ struct dpaa2_bp_list *prev = h_bp_list, *temp;
+ temp = h_bp_list->next;
+ while (temp) {
+ if (temp == bp) {
+ prev->next = temp->next;
+ rte_free(bp);
+ break;
+ }
+ prev = temp;
+ temp = temp->next;
+ }
+ }
+
+ rte_free(mp->pool_data);
+ dpaa2_free_dpbp_dev(dpbp_node);
+}
+
+static void
+rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
+ void * const *obj_table,
+ uint32_t bpid,
+ uint32_t meta_data_size,
+ int count)
+{
+ struct qbman_release_desc releasedesc;
+ struct qbman_swp *swp;
+ int ret;
+ int i, n, retry_count;
+ uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret != 0) {
+ DPAA2_MEMPOOL_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ /* Create a release descriptor required for releasing
+ * buffers into QBMAN
+ */
+ qbman_release_desc_clear(&releasedesc);
+ qbman_release_desc_set_bpid(&releasedesc, bpid);
+
+ n = count % DPAA2_MBUF_MAX_ACQ_REL;
+ if (unlikely(!n))
+ goto aligned;
+
+ /* convert mbuf to buffers for the remainder */
+ for (i = 0; i < n ; i++) {
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+ bufs[i] = (uint64_t)rte_mempool_virt2iova(obj_table[i])
+ + meta_data_size;
+#else
+ bufs[i] = (uint64_t)obj_table[i] + meta_data_size;
+#endif
+ }
+
+ /* feed them to bman */
+ retry_count = 0;
+ while ((ret = qbman_swp_release(swp, &releasedesc, bufs, n)) ==
+ -EBUSY) {
+ retry_count++;
+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+ DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?");
+ return;
+ }
+ }
+
+aligned:
+ /* if there are more buffers to free */
+ while (n < count) {
+ /* convert mbuf to buffers */
+ for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+ bufs[i] = (uint64_t)
+ rte_mempool_virt2iova(obj_table[n + i])
+ + meta_data_size;
+#else
+ bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size;
+#endif
+ }
+
+ retry_count = 0;
+ while ((ret = qbman_swp_release(swp, &releasedesc, bufs,
+ DPAA2_MBUF_MAX_ACQ_REL)) == -EBUSY) {
+ retry_count++;
+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+ DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?");
+ return;
+ }
+ }
+ n += DPAA2_MBUF_MAX_ACQ_REL;
+ }
+}
+
+uint16_t
+rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(mp);
+ if (!(bp_info->bp_list)) {
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ return -ENOMEM;
+ }
+
+ return bp_info->bpid;
+}
+
+struct rte_mbuf *
+rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(mp);
+ if (!(bp_info->bp_list)) {
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ return NULL;
+ }
+
+ return (struct rte_mbuf *)((uint8_t *)buf_addr -
+ bp_info->meta_data_size);
+}
+
+int
+rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
+ void **obj_table, unsigned int count)
+{
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
+ static int alloc;
+#endif
+ struct qbman_swp *swp;
+ uint16_t bpid;
+ size_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
+ int i, ret;
+ unsigned int n = 0;
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(pool);
+
+ if (!(bp_info->bp_list)) {
+ DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
+ return -ENOENT;
+ }
+
+ bpid = bp_info->bpid;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret != 0) {
+ DPAA2_MEMPOOL_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return ret;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ while (n < count) {
+ /* Acquire is all-or-nothing, so we drain in 7s,
+ * then the remainder.
+ */
+ if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
+ ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
+ DPAA2_MBUF_MAX_ACQ_REL);
+ } else {
+ ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
+ count - n);
+ }
+ /* In case of less than requested number of buffers available
+ * in pool, qbman_swp_acquire returns 0
+ */
+ if (ret <= 0) {
+ DPAA2_MEMPOOL_DP_DEBUG(
+ "Buffer acquire failed with err code: %d", ret);
+ /* The API expect the exact number of requested bufs */
+ /* Releasing all buffers allocated */
+ rte_dpaa2_mbuf_release(pool, obj_table, bpid,
+ bp_info->meta_data_size, n);
+ return -ENOBUFS;
+ }
+ /* assigning mbuf from the acquired objects */
+ for (i = 0; (i < ret) && bufs[i]; i++) {
+ DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t);
+ obj_table[n] = (struct rte_mbuf *)
+ (bufs[i] - bp_info->meta_data_size);
+ DPAA2_MEMPOOL_DP_DEBUG(
+ "Acquired %p address %p from BMAN\n",
+ (void *)bufs[i], (void *)obj_table[n]);
+ n++;
+ }
+ }
+
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
+ alloc += n;
+ DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n",
+ alloc, count, n);
+#endif
+ return 0;
+}
+
+static int
+rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
+ void * const *obj_table, unsigned int n)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(pool);
+ if (!(bp_info->bp_list)) {
+ DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
+ return -ENOENT;
+ }
+ rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
+ bp_info->meta_data_size, n);
+
+ return 0;
+}
+
+static unsigned int
+rte_hw_mbuf_get_count(const struct rte_mempool *mp)
+{
+ int ret;
+ unsigned int num_of_bufs = 0;
+ struct dpaa2_bp_info *bp_info;
+ struct dpaa2_dpbp_dev *dpbp_node;
+
+ if (!mp || !mp->pool_data) {
+ DPAA2_MEMPOOL_ERR("Invalid mempool provided");
+ return 0;
+ }
+
+ bp_info = (struct dpaa2_bp_info *)mp->pool_data;
+ dpbp_node = bp_info->bp_list->buf_pool.dpbp_node;
+
+ ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
+ dpbp_node->token, &num_of_bufs);
+ if (ret) {
+ DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)",
+ ret);
+ return 0;
+ }
+
+ DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u\n", num_of_bufs);
+
+ return num_of_bufs;
+}
+
+static int
+dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t paddr, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ struct rte_memseg_list *msl;
+ /* The memsegment list exists incase the memory is not external.
+ * So, DMA-Map is required only when memory is provided by user,
+ * i.e. External.
+ */
+ msl = rte_mem_virt2memseg_list(vaddr);
+
+ if (!msl) {
+ DPAA2_MEMPOOL_DEBUG("Memsegment is External.\n");
+ rte_fslmc_vfio_mem_dmamap((size_t)vaddr,
+ (size_t)paddr, (size_t)len);
+ }
+ /* Insert entry into the PA->VA Table */
+ dpaax_iova_table_update(paddr, vaddr, len);
+
+ return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr,
+ len, obj_cb, obj_cb_arg);
+}
+
+static const struct rte_mempool_ops dpaa2_mpool_ops = {
+ .name = DPAA2_MEMPOOL_OPS_NAME,
+ .alloc = rte_hw_mbuf_create_pool,
+ .free = rte_hw_mbuf_free_pool,
+ .enqueue = rte_hw_mbuf_free_bulk,
+ .dequeue = rte_dpaa2_mbuf_alloc_bulk,
+ .get_count = rte_hw_mbuf_get_count,
+ .populate = dpaa2_populate,
+};
+
+MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
+
+RTE_INIT(dpaa2_mempool_init_log)
+{
+ dpaa2_logtype_mempool = rte_log_register("mempool.dpaa2");
+ if (dpaa2_logtype_mempool >= 0)
+ rte_log_set_level(dpaa2_logtype_mempool, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.h b/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
new file mode 100644
index 000000000..53fa1552d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016-2019 NXP
+ *
+ */
+
+#ifndef _DPAA2_HW_DPBP_H_
+#define _DPAA2_HW_DPBP_H_
+
+#define DPAA2_MAX_BUF_POOLS 8
+
+#define DPAA2_INVALID_MBUF_SEQN 0
+
+struct buf_pool_cfg {
+ void *addr;
+ /**< The address from where DPAA2 will carve out the buffers */
+ rte_iova_t phys_addr;
+ /**< Physical address of the memory provided in addr */
+ uint32_t num;
+ /**< Number of buffers */
+ uint32_t size;
+ /**< Size including headroom for each buffer */
+ uint16_t align;
+ /**< Buffer alignment (in bytes) */
+ uint16_t bpid;
+ /**< Autogenerated buffer pool ID for internal use */
+};
+
+struct buf_pool {
+ uint32_t size; /**< Size of the Pool */
+ uint32_t num_bufs; /**< Number of buffers in Pool */
+ uint16_t bpid; /**< Pool ID, from pool configuration */
+ uint8_t *h_bpool_mem; /**< Internal context data */
+ struct dpaa2_dpbp_dev *dpbp_node; /**< Hardware context */
+};
+
+/*!
+ * Buffer pool list configuration structure. User need to give DPAA2 the
+ * valid number of 'num_buf_pools'.
+ */
+struct dpaa2_bp_list_cfg {
+ struct buf_pool_cfg buf_pool; /* Configuration of each buffer pool*/
+};
+
+struct dpaa2_bp_list {
+ struct dpaa2_bp_list *next;
+ struct rte_mempool *mp; /**< DPDK RTE EAL pool reference */
+ int32_t dpaa2_ops_index; /**< Index into DPDK Mempool ops table */
+ struct buf_pool buf_pool;
+};
+
+struct dpaa2_bp_info {
+ uint32_t meta_data_size;
+ uint32_t bpid;
+ struct dpaa2_bp_list *bp_list;
+};
+
+#define mempool_to_bpinfo(mp) ((struct dpaa2_bp_info *)(mp)->pool_data)
+#define mempool_to_bpid(mp) ((mempool_to_bpinfo(mp))->bpid)
+
+extern struct dpaa2_bp_info *rte_dpaa2_bpid_info;
+
+__rte_internal
+int rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
+ void **obj_table, unsigned int count);
+
+#endif /* _DPAA2_HW_DPBP_H_ */
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h b/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h
new file mode 100644
index 000000000..986264319
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#ifndef _DPAA2_HW_MEMPOOL_LOGS_H_
+#define _DPAA2_HW_MEMPOOL_LOGS_H_
+
+extern int dpaa2_logtype_mempool;
+
+#define DPAA2_MEMPOOL_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_mempool, \
+ "mempool/dpaa2: " fmt "\n", ##args)
+
+/* Debug logs are with Function names */
+#define DPAA2_MEMPOOL_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_mempool, \
+ "mempool/dpaa2: %s(): " fmt "\n", __func__, ##args)
+
+#define DPAA2_MEMPOOL_INFO(fmt, args...) \
+ DPAA2_MEMPOOL_LOG(INFO, fmt, ## args)
+#define DPAA2_MEMPOOL_ERR(fmt, args...) \
+ DPAA2_MEMPOOL_LOG(ERR, fmt, ## args)
+#define DPAA2_MEMPOOL_WARN(fmt, args...) \
+ DPAA2_MEMPOOL_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_MEMPOOL_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_MEMPOOL_DP_DEBUG(fmt, args...) \
+ DPAA2_MEMPOOL_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_MEMPOOL_DP_INFO(fmt, args...) \
+ DPAA2_MEMPOOL_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_MEMPOOL_DP_WARN(fmt, args...) \
+ DPAA2_MEMPOOL_DP_LOG(WARNING, fmt, ## args)
+
+#endif /* _DPAA2_HW_MEMPOOL_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa2/meson.build b/src/spdk/dpdk/drivers/mempool/dpaa2/meson.build
new file mode 100644
index 000000000..c3f479afa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa2/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if not is_linux
+ build = false
+ reason = 'only supported on linux'
+endif
+
+deps += ['bus_fslmc']
+sources = files('dpaa2_hw_mempool.c')
+
+install_headers('rte_dpaa2_mempool.h')
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa2/rte_dpaa2_mempool.h b/src/spdk/dpdk/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
new file mode 100644
index 000000000..4a22b7c42
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTE_DPAA2_MEMPOOL_H__
+#define __RTE_DPAA2_MEMPOOL_H__
+
+/**
+ * @file
+ *
+ * NXP specific mempool related functions.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mempool.h>
+
+/**
+ * Get BPID corresponding to the packet pool
+ *
+ * @param mp
+ * memory pool
+ *
+ * @return
+ * BPID of the buffer pool
+ */
+uint16_t
+rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp);
+
+/**
+ * Get MBUF from the corresponding 'buf_addr'
+ *
+ * @param mp
+ * memory pool
+ * @param buf_addr
+ * The 'buf_addr' of the mbuf. This is the start buffer address
+ * of the packet buffer (mbuf).
+ *
+ * @return
+ * - MBUF pointer for success
+ * - NULL in case of error
+ */
+struct rte_mbuf *
+rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_DPAA2_MEMPOOL_H__ */
diff --git a/src/spdk/dpdk/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map b/src/spdk/dpdk/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
new file mode 100644
index 000000000..686b02462
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
@@ -0,0 +1,15 @@
+DPDK_20.0 {
+ global:
+
+ rte_dpaa2_mbuf_from_buf_addr;
+ rte_dpaa2_mbuf_pool_bpid;
+
+ local: *;
+};
+
+INTERNAL {
+ global:
+
+ rte_dpaa2_bpid_info;
+ rte_dpaa2_mbuf_alloc_bulk;
+};
diff --git a/src/spdk/dpdk/drivers/mempool/meson.build b/src/spdk/dpdk/drivers/mempool/meson.build
new file mode 100644
index 000000000..7520e489f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+drivers = ['bucket', 'dpaa', 'dpaa2', 'octeontx', 'octeontx2', 'ring', 'stack']
+std_deps = ['mempool']
+config_flag_fmt = 'RTE_LIBRTE_@0@_MEMPOOL'
+driver_name_fmt = 'rte_mempool_@0@'
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx/Makefile b/src/spdk/dpdk/drivers/mempool/octeontx/Makefile
new file mode 100644
index 000000000..9c840620d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx/Makefile
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_octeontx.a
+
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/
+
+EXPORT_MAP := rte_mempool_octeontx_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_fpavf.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += rte_mempool_octeontx.c
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_rte_mempool_octeontx.o += -fno-prefetch-loop-arrays
+
+ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
+CFLAGS_rte_mempool_octeontx.o += -Ofast
+else
+CFLAGS_rte_mempool_octeontx.o += -O3 -ffast-math
+endif
+
+else
+CFLAGS_rte_mempool_octeontx.o += -Ofast
+endif
+
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring -lrte_mbuf
+LDLIBS += -lrte_bus_pci -lrte_common_octeontx
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx/meson.build b/src/spdk/dpdk/drivers/mempool/octeontx/meson.build
new file mode 100644
index 000000000..3baaf7db2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+sources = files('octeontx_fpavf.c',
+ 'rte_mempool_octeontx.c'
+)
+
+deps += ['mbuf', 'bus_pci', 'common_octeontx']
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_fpavf.c b/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_fpavf.c
new file mode 100644
index 000000000..0ff234913
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -0,0 +1,835 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/mman.h>
+
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_bus_pci.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_spinlock.h>
+#include <rte_mbuf.h>
+
+#include "octeontx_mbox.h"
+#include "octeontx_fpavf.h"
+
+/* FPA Mbox Message */
+#define IDENTIFY 0x0
+
+#define FPA_CONFIGSET 0x1
+#define FPA_CONFIGGET 0x2
+#define FPA_START_COUNT 0x3
+#define FPA_STOP_COUNT 0x4
+#define FPA_ATTACHAURA 0x5
+#define FPA_DETACHAURA 0x6
+#define FPA_SETAURALVL 0x7
+#define FPA_GETAURALVL 0x8
+
+#define FPA_COPROC 0x1
+
+/* fpa mbox struct */
+struct octeontx_mbox_fpa_cfg {
+ int aid;
+ uint64_t pool_cfg;
+ uint64_t pool_stack_base;
+ uint64_t pool_stack_end;
+ uint64_t aura_cfg;
+};
+
+struct __rte_packed gen_req {
+ uint32_t value;
+};
+
+struct __rte_packed idn_req {
+ uint8_t domain_id;
+};
+
+struct __rte_packed gen_resp {
+ uint16_t domain_id;
+ uint16_t vfid;
+};
+
+struct __rte_packed dcfg_resp {
+ uint8_t sso_count;
+ uint8_t ssow_count;
+ uint8_t fpa_count;
+ uint8_t pko_count;
+ uint8_t tim_count;
+ uint8_t net_port_count;
+ uint8_t virt_port_count;
+};
+
+#define FPA_MAX_POOL 32
+#define FPA_PF_PAGE_SZ 4096
+
+#define FPA_LN_SIZE 128
+#define FPA_ROUND_UP(x, size) \
+ ((((unsigned long)(x)) + size-1) & (~(size-1)))
+#define FPA_OBJSZ_2_CACHE_LINE(sz) (((sz) + RTE_CACHE_LINE_MASK) >> 7)
+#define FPA_CACHE_LINE_2_OBJSZ(sz) ((sz) << 7)
+
+#define POOL_ENA (0x1 << 0)
+#define POOL_DIS (0x0 << 0)
+#define POOL_SET_NAT_ALIGN (0x1 << 1)
+#define POOL_DIS_NAT_ALIGN (0x0 << 1)
+#define POOL_STYPE(x) (((x) & 0x1) << 2)
+#define POOL_LTYPE(x) (((x) & 0x3) << 3)
+#define POOL_BUF_OFFSET(x) (((x) & 0x7fffULL) << 16)
+#define POOL_BUF_SIZE(x) (((x) & 0x7ffULL) << 32)
+
+struct fpavf_res {
+ void *pool_stack_base;
+ void *bar0;
+ uint64_t stack_ln_ptr;
+ uint16_t domain_id;
+ uint16_t vf_id; /* gpool_id */
+ uint16_t sz128; /* Block size in cache lines */
+ bool is_inuse;
+};
+
+struct octeontx_fpadev {
+ rte_spinlock_t lock;
+ uint8_t total_gpool_cnt;
+ struct fpavf_res pool[FPA_VF_MAX];
+};
+
+static struct octeontx_fpadev fpadev;
+
+int octeontx_logtype_fpavf;
+int octeontx_logtype_fpavf_mbox;
+
+RTE_INIT(otx_pool_init_log)
+{
+ octeontx_logtype_fpavf = rte_log_register("pmd.mempool.octeontx");
+ if (octeontx_logtype_fpavf >= 0)
+ rte_log_set_level(octeontx_logtype_fpavf, RTE_LOG_NOTICE);
+}
+
+/* lock is taken by caller */
+static int
+octeontx_fpa_gpool_alloc(unsigned int object_size)
+{
+ uint16_t global_domain = octeontx_get_global_domain();
+ struct fpavf_res *res = NULL;
+ unsigned int sz128;
+ int i;
+
+ sz128 = FPA_OBJSZ_2_CACHE_LINE(object_size);
+
+ for (i = 0; i < FPA_VF_MAX; i++) {
+
+ /* Skip VF that is not mapped Or _inuse */
+ if ((fpadev.pool[i].bar0 == NULL) ||
+ (fpadev.pool[i].is_inuse == true) ||
+ (fpadev.pool[i].domain_id != global_domain))
+ continue;
+
+ res = &fpadev.pool[i];
+
+ RTE_ASSERT(res->domain_id != (uint16_t)~0);
+ RTE_ASSERT(res->vf_id != (uint16_t)~0);
+ RTE_ASSERT(res->stack_ln_ptr != 0);
+
+ if (res->sz128 == 0) {
+ res->sz128 = sz128;
+ fpavf_log_dbg("gpool %d blk_sz %d\n", res->vf_id,
+ sz128);
+
+ return res->vf_id;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+static __rte_always_inline struct fpavf_res *
+octeontx_get_fpavf(uint16_t gpool)
+{
+ uint16_t global_domain = octeontx_get_global_domain();
+ int i;
+
+ for (i = 0; i < FPA_VF_MAX; i++) {
+ if (fpadev.pool[i].domain_id != global_domain)
+ continue;
+ if (fpadev.pool[i].vf_id != gpool)
+ continue;
+
+ return &fpadev.pool[i];
+ }
+
+ return NULL;
+}
+
+/* lock is taken by caller */
+static __rte_always_inline uintptr_t
+octeontx_fpa_gpool2handle(uint16_t gpool)
+{
+ struct fpavf_res *res = NULL;
+
+ RTE_ASSERT(gpool < FPA_VF_MAX);
+ res = octeontx_get_fpavf(gpool);
+ if (res == NULL)
+ return 0;
+
+ return (uintptr_t)res->bar0 | gpool;
+}
+
+static __rte_always_inline bool
+octeontx_fpa_handle_valid(uintptr_t handle)
+{
+ struct fpavf_res *res = NULL;
+ uint8_t gpool;
+ int i;
+ bool ret = false;
+
+ if (unlikely(!handle))
+ return ret;
+
+ /* get the gpool */
+ gpool = octeontx_fpa_bufpool_gpool(handle);
+
+ /* get the bar address */
+ handle &= ~(uint64_t)FPA_GPOOL_MASK;
+ for (i = 0; i < FPA_VF_MAX; i++) {
+ if ((uintptr_t)fpadev.pool[i].bar0 != handle)
+ continue;
+
+ /* validate gpool */
+ if (gpool != fpadev.pool[i].vf_id)
+ return false;
+
+ res = &fpadev.pool[i];
+
+ if (res->sz128 == 0 || res->domain_id == (uint16_t)~0 ||
+ res->stack_ln_ptr == 0)
+ ret = false;
+ else
+ ret = true;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
+ signed short buf_offset, unsigned int max_buf_count)
+{
+ void *memptr = NULL;
+ rte_iova_t phys_addr;
+ unsigned int memsz;
+ struct fpavf_res *fpa = NULL;
+ uint64_t reg;
+ struct octeontx_mbox_hdr hdr;
+ struct dcfg_resp resp;
+ struct octeontx_mbox_fpa_cfg cfg;
+ int ret = -1;
+
+ fpa = octeontx_get_fpavf(gpool);
+ if (fpa == NULL)
+ return -EINVAL;
+
+ memsz = FPA_ROUND_UP(max_buf_count / fpa->stack_ln_ptr, FPA_LN_SIZE) *
+ FPA_LN_SIZE;
+
+ /* Round-up to page size */
+ memsz = (memsz + FPA_PF_PAGE_SZ - 1) & ~(uintptr_t)(FPA_PF_PAGE_SZ-1);
+ memptr = rte_malloc(NULL, memsz, RTE_CACHE_LINE_SIZE);
+ if (memptr == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Configure stack */
+ fpa->pool_stack_base = memptr;
+ phys_addr = rte_malloc_virt2iova(memptr);
+
+ buf_size /= FPA_LN_SIZE;
+
+ /* POOL setup */
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_CONFIGSET;
+ hdr.vfid = fpa->vf_id;
+ hdr.res_code = 0;
+
+ buf_offset /= FPA_LN_SIZE;
+ reg = POOL_BUF_SIZE(buf_size) | POOL_BUF_OFFSET(buf_offset) |
+ POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
+ POOL_ENA;
+
+ cfg.aid = FPA_AURA_IDX(gpool);
+ cfg.pool_cfg = reg;
+ cfg.pool_stack_base = phys_addr;
+ cfg.pool_stack_end = phys_addr + memsz;
+ cfg.aura_cfg = (1 << 9);
+
+ ret = octeontx_mbox_send(&hdr, &cfg,
+ sizeof(struct octeontx_mbox_fpa_cfg),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ ret = -EACCES;
+ goto err;
+ }
+
+ fpavf_log_dbg(" vfid %d gpool %d aid %d pool_cfg 0x%x pool_stack_base %" PRIx64 " pool_stack_end %" PRIx64" aura_cfg %" PRIx64 "\n",
+ fpa->vf_id, gpool, cfg.aid, (unsigned int)cfg.pool_cfg,
+ cfg.pool_stack_base, cfg.pool_stack_end, cfg.aura_cfg);
+
+ /* Now pool is in_use */
+ fpa->is_inuse = true;
+
+err:
+ if (ret < 0)
+ rte_free(memptr);
+
+ return ret;
+}
+
+static int
+octeontx_fpapf_pool_destroy(unsigned int gpool_index)
+{
+ struct octeontx_mbox_hdr hdr;
+ struct dcfg_resp resp;
+ struct octeontx_mbox_fpa_cfg cfg;
+ struct fpavf_res *fpa = NULL;
+ int ret = -1;
+
+ fpa = octeontx_get_fpavf(gpool_index);
+ if (fpa == NULL)
+ return -EINVAL;
+
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_CONFIGSET;
+ hdr.vfid = fpa->vf_id;
+ hdr.res_code = 0;
+
+ /* reset and free the pool */
+ cfg.aid = 0;
+ cfg.pool_cfg = 0;
+ cfg.pool_stack_base = 0;
+ cfg.pool_stack_end = 0;
+ cfg.aura_cfg = 0;
+
+ ret = octeontx_mbox_send(&hdr, &cfg,
+ sizeof(struct octeontx_mbox_fpa_cfg),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ ret = -EACCES;
+ goto err;
+ }
+
+ ret = 0;
+err:
+ /* anycase free pool stack memory */
+ rte_free(fpa->pool_stack_base);
+ fpa->pool_stack_base = NULL;
+ return ret;
+}
+
+static int
+octeontx_fpapf_aura_attach(unsigned int gpool_index)
+{
+ struct octeontx_mbox_hdr hdr;
+ struct dcfg_resp resp;
+ struct octeontx_mbox_fpa_cfg cfg;
+ int ret = 0;
+
+ if (gpool_index >= FPA_MAX_POOL) {
+ ret = -EINVAL;
+ goto err;
+ }
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_ATTACHAURA;
+ hdr.vfid = gpool_index;
+ hdr.res_code = 0;
+ memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
+ cfg.aid = FPA_AURA_IDX(gpool_index);
+
+ ret = octeontx_mbox_send(&hdr, &cfg,
+ sizeof(struct octeontx_mbox_fpa_cfg),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ fpavf_log_err("Could not attach fpa ");
+ fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n",
+ FPA_AURA_IDX(gpool_index), gpool_index, ret,
+ hdr.res_code);
+ ret = -EACCES;
+ goto err;
+ }
+err:
+ return ret;
+}
+
+static int
+octeontx_fpapf_aura_detach(unsigned int gpool_index)
+{
+ struct octeontx_mbox_fpa_cfg cfg = {0};
+ struct octeontx_mbox_hdr hdr = {0};
+ int ret = 0;
+
+ if (gpool_index >= FPA_MAX_POOL) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ cfg.aid = FPA_AURA_IDX(gpool_index);
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_DETACHAURA;
+ hdr.vfid = gpool_index;
+ ret = octeontx_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
+ if (ret < 0) {
+ fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
+ FPA_AURA_IDX(gpool_index), ret,
+ hdr.res_code);
+ ret = -EINVAL;
+ }
+
+err:
+ return ret;
+}
+
+int
+octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
+ void *memva, uint16_t gpool)
+{
+ uint64_t va_end;
+
+ if (unlikely(!handle))
+ return -ENODEV;
+
+ va_end = (uintptr_t)memva + memsz;
+ va_end &= ~RTE_CACHE_LINE_MASK;
+
+ /* VHPOOL setup */
+ fpavf_write64((uintptr_t)memva,
+ (void *)((uintptr_t)handle +
+ FPA_VF_VHPOOL_START_ADDR(gpool)));
+ fpavf_write64(va_end,
+ (void *)((uintptr_t)handle +
+ FPA_VF_VHPOOL_END_ADDR(gpool)));
+ return 0;
+}
+
+static int
+octeontx_fpapf_start_count(uint16_t gpool_index)
+{
+ int ret = 0;
+ struct octeontx_mbox_hdr hdr = {0};
+
+ if (gpool_index >= FPA_MAX_POOL) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_START_COUNT;
+ hdr.vfid = gpool_index;
+ ret = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (ret < 0) {
+ fpavf_log_err("Could not start buffer counting for ");
+ fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n",
+ gpool_index, ret, hdr.res_code);
+ ret = -EINVAL;
+ goto err;
+ }
+
+err:
+ return ret;
+}
+
+static __rte_always_inline int
+octeontx_fpavf_free(unsigned int gpool)
+{
+ struct fpavf_res *res = octeontx_get_fpavf(gpool);
+ int ret = 0;
+
+ if (gpool >= FPA_MAX_POOL) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Pool is free */
+ if (res != NULL)
+ res->is_inuse = false;
+
+err:
+ return ret;
+}
+
+static __rte_always_inline int
+octeontx_gpool_free(uint16_t gpool)
+{
+ struct fpavf_res *res = octeontx_get_fpavf(gpool);
+
+ if (res && res->sz128 != 0) {
+ res->sz128 = 0;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Return buffer size for a given pool
+ */
+int
+octeontx_fpa_bufpool_block_size(uintptr_t handle)
+{
+ struct fpavf_res *res = NULL;
+ uint8_t gpool;
+
+ if (unlikely(!octeontx_fpa_handle_valid(handle)))
+ return -EINVAL;
+
+ /* get the gpool */
+ gpool = octeontx_fpa_bufpool_gpool(handle);
+ res = octeontx_get_fpavf(gpool);
+ return res ? FPA_CACHE_LINE_2_OBJSZ(res->sz128) : 0;
+}
+
+int
+octeontx_fpa_bufpool_free_count(uintptr_t handle)
+{
+ uint64_t cnt, limit, avail;
+ uint8_t gpool;
+ uint16_t gaura;
+ uintptr_t pool_bar;
+
+ if (unlikely(!octeontx_fpa_handle_valid(handle)))
+ return -EINVAL;
+
+ /* get the gpool */
+ gpool = octeontx_fpa_bufpool_gpool(handle);
+ /* get the aura */
+ gaura = octeontx_fpa_bufpool_gaura(handle);
+
+ /* Get pool bar address from handle */
+ pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
+
+ cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT(gaura)));
+ limit = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
+
+ avail = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHPOOL_AVAILABLE(gpool)));
+
+ return RTE_MIN(avail, (limit - cnt));
+}
+
+uintptr_t
+octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
+ unsigned int buf_offset, int node_id)
+{
+ unsigned int gpool;
+ unsigned int gaura;
+ uintptr_t gpool_handle;
+ uintptr_t pool_bar;
+ int res;
+
+ RTE_SET_USED(node_id);
+ RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
+
+ octeontx_mbox_init();
+ object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
+ if (object_size > FPA_MAX_OBJ_SIZE) {
+ errno = EINVAL;
+ goto error_end;
+ }
+
+ rte_spinlock_lock(&fpadev.lock);
+ res = octeontx_fpa_gpool_alloc(object_size);
+
+ /* Bail if failed */
+ if (unlikely(res < 0)) {
+ errno = res;
+ goto error_unlock;
+ }
+
+ /* get fpavf */
+ gpool = res;
+
+ /* get pool handle */
+ gpool_handle = octeontx_fpa_gpool2handle(gpool);
+ if (!octeontx_fpa_handle_valid(gpool_handle)) {
+ errno = ENOSPC;
+ goto error_gpool_free;
+ }
+
+ /* Get pool bar address from handle */
+ pool_bar = gpool_handle & ~(uint64_t)FPA_GPOOL_MASK;
+
+ res = octeontx_fpapf_pool_setup(gpool, object_size, buf_offset,
+ object_count);
+ if (res < 0) {
+ errno = res;
+ goto error_gpool_free;
+ }
+
+ /* populate AURA fields */
+ res = octeontx_fpapf_aura_attach(gpool);
+ if (res < 0) {
+ errno = res;
+ goto error_pool_destroy;
+ }
+
+ gaura = FPA_AURA_IDX(gpool);
+
+ /* Release lock */
+ rte_spinlock_unlock(&fpadev.lock);
+
+ /* populate AURA registers */
+ fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT(gaura)));
+ fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
+ fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
+
+ octeontx_fpapf_start_count(gpool);
+
+ return gpool_handle;
+
+error_pool_destroy:
+ octeontx_fpavf_free(gpool);
+ octeontx_fpapf_pool_destroy(gpool);
+error_gpool_free:
+ octeontx_gpool_free(gpool);
+error_unlock:
+ rte_spinlock_unlock(&fpadev.lock);
+error_end:
+ return (uintptr_t)NULL;
+}
+
+/*
+ * Destroy a buffer pool.
+ */
+int
+octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
+{
+ void **node, **curr, *head = NULL;
+ uint64_t sz;
+ uint64_t cnt, avail;
+ uint8_t gpool;
+ uint16_t gaura;
+ uintptr_t pool_bar;
+ int ret;
+
+ RTE_SET_USED(node_id);
+
+ /* Wait for all outstanding writes to be committed */
+ rte_smp_wmb();
+
+ if (unlikely(!octeontx_fpa_handle_valid(handle)))
+ return -EINVAL;
+
+ /* get the pool */
+ gpool = octeontx_fpa_bufpool_gpool(handle);
+ /* get the aura */
+ gaura = octeontx_fpa_bufpool_gaura(handle);
+
+ /* Get pool bar address from handle */
+ pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
+
+ /* Check for no outstanding buffers */
+ cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT(gaura)));
+ if (cnt) {
+ fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt);
+ return -EBUSY;
+ }
+
+ rte_spinlock_lock(&fpadev.lock);
+
+ avail = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHPOOL_AVAILABLE(gpool)));
+
+ /* Prepare to empty the entire POOL */
+ fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
+ fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
+
+ /* Empty the pool */
+ /* Invalidate the POOL */
+ octeontx_gpool_free(gpool);
+
+ /* Process all buffers in the pool */
+ while (avail--) {
+
+ /* Yank a buffer from the pool */
+ node = (void *)(uintptr_t)
+ fpavf_read64((void *)
+ (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gaura)));
+
+ if (node == NULL) {
+ fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
+ gaura, avail);
+ break;
+ }
+
+ /* Imsert it into an ordered linked list */
+ for (curr = &head; curr[0] != NULL; curr = curr[0]) {
+ if ((uintptr_t)node <= (uintptr_t)curr[0])
+ break;
+ }
+ node[0] = curr[0];
+ curr[0] = node;
+ }
+
+ /* Verify the linked list to be a perfect series */
+ sz = octeontx_fpa_bufpool_block_size(handle) << 7;
+ for (curr = head; curr != NULL && curr[0] != NULL;
+ curr = curr[0]) {
+ if (curr == curr[0] ||
+ ((uintptr_t)curr != ((uintptr_t)curr[0] - sz))) {
+ fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n",
+ gpool, curr, curr[0]);
+ }
+ }
+
+ /* Disable pool operation */
+ fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHPOOL_START_ADDR(gpool)));
+ fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHPOOL_END_ADDR(gpool)));
+
+ (void)octeontx_fpapf_pool_destroy(gpool);
+
+ /* Deactivate the AURA */
+ fpavf_write64(0, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
+ fpavf_write64(0, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
+
+ ret = octeontx_fpapf_aura_detach(gpool);
+ if (ret) {
+ fpavf_log_err("Failed to dettach gaura %u. error code=%d\n",
+ gpool, ret);
+ }
+
+ /* Free VF */
+ (void)octeontx_fpavf_free(gpool);
+
+ rte_spinlock_unlock(&fpadev.lock);
+ return 0;
+}
+
+static void
+octeontx_fpavf_setup(void)
+{
+ uint8_t i;
+ static bool init_once;
+
+ if (!init_once) {
+ rte_spinlock_init(&fpadev.lock);
+ fpadev.total_gpool_cnt = 0;
+
+ for (i = 0; i < FPA_VF_MAX; i++) {
+
+ fpadev.pool[i].domain_id = ~0;
+ fpadev.pool[i].stack_ln_ptr = 0;
+ fpadev.pool[i].sz128 = 0;
+ fpadev.pool[i].bar0 = NULL;
+ fpadev.pool[i].pool_stack_base = NULL;
+ fpadev.pool[i].is_inuse = false;
+ }
+ init_once = 1;
+ }
+}
+
+static int
+octeontx_fpavf_identify(void *bar0)
+{
+ uint64_t val;
+ uint16_t domain_id;
+ uint16_t vf_id;
+ uint64_t stack_ln_ptr;
+ static uint16_t vf_idx;
+
+ val = fpavf_read64((void *)((uintptr_t)bar0 +
+ FPA_VF_VHAURA_CNT_THRESHOLD(0)));
+
+ domain_id = (val >> 8) & 0xffff;
+ vf_id = (val >> 24) & 0xffff;
+
+ stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 +
+ FPA_VF_VHPOOL_THRESHOLD(0)));
+ if (vf_idx >= FPA_VF_MAX) {
+ fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id);
+ return -E2BIG;
+ }
+
+ fpadev.pool[vf_idx].domain_id = domain_id;
+ fpadev.pool[vf_idx].vf_id = vf_id;
+ fpadev.pool[vf_idx].bar0 = bar0;
+ fpadev.pool[vf_idx].stack_ln_ptr = stack_ln_ptr;
+
+ /* SUCCESS */
+ return vf_idx++;
+}
+
+/* FPAVF pcie device aka mempool probe */
+static int
+fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint8_t *idreg;
+ int res;
+ struct fpavf_res *fpa = NULL;
+
+ RTE_SET_USED(pci_drv);
+ RTE_SET_USED(fpa);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ fpavf_log_err("Empty bars %p ", pci_dev->mem_resource[0].addr);
+ return -ENODEV;
+ }
+ idreg = pci_dev->mem_resource[0].addr;
+
+ octeontx_fpavf_setup();
+
+ res = octeontx_fpavf_identify(idreg);
+ if (res < 0)
+ return -1;
+
+ fpa = &fpadev.pool[res];
+ fpadev.total_gpool_cnt++;
+ rte_wmb();
+
+ fpavf_log_dbg("total_fpavfs %d bar0 %p domain %d vf %d stk_ln_ptr 0x%x",
+ fpadev.total_gpool_cnt, fpa->bar0, fpa->domain_id,
+ fpa->vf_id, (unsigned int)fpa->stack_ln_ptr);
+
+ return 0;
+}
+
+static const struct rte_pci_id pci_fpavf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_FPA_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_fpavf = {
+ .id_table = pci_fpavf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
+ .probe = fpavf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_fpavf, pci_fpavf);
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_fpavf.h b/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_fpavf.h
new file mode 100644
index 000000000..e27c4377e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_FPAVF_H__
+#define __OCTEONTX_FPAVF_H__
+
+#include <rte_io.h>
+#include "octeontx_pool_logs.h"
+
+/* fpa pool Vendor ID and Device ID */
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_FPA_VF 0xA053
+
+#define FPA_VF_MAX 32
+#define FPA_GPOOL_MASK (FPA_VF_MAX-1)
+#define FPA_GAURA_SHIFT 4
+
+/* FPA VF register offsets */
+#define FPA_VF_INT(x) (0x200ULL | ((x) << 22))
+#define FPA_VF_INT_W1S(x) (0x210ULL | ((x) << 22))
+#define FPA_VF_INT_ENA_W1S(x) (0x220ULL | ((x) << 22))
+#define FPA_VF_INT_ENA_W1C(x) (0x230ULL | ((x) << 22))
+
+#define FPA_VF_VHPOOL_AVAILABLE(vhpool) (0x04150 | ((vhpool)&0x0))
+#define FPA_VF_VHPOOL_THRESHOLD(vhpool) (0x04160 | ((vhpool)&0x0))
+#define FPA_VF_VHPOOL_START_ADDR(vhpool) (0x04200 | ((vhpool)&0x0))
+#define FPA_VF_VHPOOL_END_ADDR(vhpool) (0x04210 | ((vhpool)&0x0))
+
+#define FPA_VF_VHAURA_CNT(vaura) (0x20120 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_CNT_ADD(vaura) (0x20128 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_CNT_LIMIT(vaura) (0x20130 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_CNT_THRESHOLD(vaura) (0x20140 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_OP_ALLOC(vaura) (0x30000 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_OP_FREE(vaura) (0x38000 | ((vaura)&0xf)<<18)
+
+#define FPA_VF_FREE_ADDRS_S(x, y, z) \
+ ((x) | (((y) & 0x1ff) << 3) | ((((z) & 1)) << 14))
+
+#define FPA_AURA_IDX(gpool) (gpool << FPA_GAURA_SHIFT)
+/* FPA VF register offsets from VF_BAR4, size 2 MByte */
+#define FPA_VF_MSIX_VEC_ADDR 0x00000
+#define FPA_VF_MSIX_VEC_CTL 0x00008
+#define FPA_VF_MSIX_PBA 0xF0000
+
+#define FPA_VF0_APERTURE_SHIFT 22
+#define FPA_AURA_SET_SIZE 16
+
+#define FPA_MAX_OBJ_SIZE (128 * 1024)
+#define OCTEONTX_FPAVF_BUF_OFFSET 128
+
+/*
+ * In Cavium OCTEON TX SoC, all accesses to the device registers are
+ * implicitly strongly ordered. So, the relaxed version of IO operation is
+ * safe to use with out any IO memory barriers.
+ */
+#define fpavf_read64 rte_read64_relaxed
+#define fpavf_write64 rte_write64_relaxed
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define fpavf_load_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "ldp %x[x0], %x[x1], [%x[p1]]" \
+ :[x0]"=r"(val0), [x1]"=r"(val1) \
+ :[p1]"r"(addr) \
+ ); })
+
+#define fpavf_store_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "stp %x[x0], %x[x1], [%x[p1]]" \
+ ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+ ); })
+#else /* Un optimized functions for building on non arm64 arch */
+
+#define fpavf_load_pair(val0, val1, addr) \
+do { \
+ val0 = rte_read64(addr); \
+ val1 = rte_read64(((uint8_t *)addr) + 8); \
+} while (0)
+
+#define fpavf_store_pair(val0, val1, addr) \
+do { \
+ rte_write64(val0, addr); \
+ rte_write64(val1, (((uint8_t *)addr) + 8)); \
+} while (0)
+#endif
+
+uintptr_t
+octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
+ unsigned int buf_offset, int node);
+int
+octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
+ void *memva, uint16_t gpool);
+int
+octeontx_fpa_bufpool_destroy(uintptr_t handle, int node);
+int
+octeontx_fpa_bufpool_block_size(uintptr_t handle);
+int
+octeontx_fpa_bufpool_free_count(uintptr_t handle);
+
+static __rte_always_inline uint8_t
+octeontx_fpa_bufpool_gpool(uintptr_t handle)
+{
+ return (uint8_t)handle & FPA_GPOOL_MASK;
+}
+
+static __rte_always_inline uint16_t
+octeontx_fpa_bufpool_gaura(uintptr_t handle)
+{
+ return octeontx_fpa_bufpool_gpool(handle) << FPA_GAURA_SHIFT;
+}
+
+#endif /* __OCTEONTX_FPAVF_H__ */
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_pool_logs.h b/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_pool_logs.h
new file mode 100644
index 000000000..7b4e1b387
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx/octeontx_pool_logs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_POOL_LOGS_H__
+#define __OCTEONTX_POOL_LOGS_H__
+
+#include <rte_debug.h>
+
+#define FPAVF_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, octeontx_logtype_fpavf,\
+ "%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
+
+#define fpavf_log_info(fmt, ...) FPAVF_LOG(INFO, fmt, ##__VA_ARGS__)
+#define fpavf_log_dbg(fmt, ...) FPAVF_LOG(DEBUG, fmt, ##__VA_ARGS__)
+#define fpavf_log_err(fmt, ...) FPAVF_LOG(ERR, fmt, ##__VA_ARGS__)
+#define fpavf_func_trace fpavf_log_dbg
+
+
+extern int octeontx_logtype_fpavf;
+
+#endif /* __OCTEONTX_POOL_LOGS_H__*/
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx.c b/src/spdk/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx.c
new file mode 100644
index 000000000..bd0070020
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+
+#include "octeontx_fpavf.h"
+
+static int
+octeontx_fpavf_alloc(struct rte_mempool *mp)
+{
+ uintptr_t pool;
+ uint32_t memseg_count = mp->size;
+ uint32_t object_size;
+ int rc = 0;
+
+ object_size = mp->elt_size + mp->header_size + mp->trailer_size;
+
+ pool = octeontx_fpa_bufpool_create(object_size, memseg_count,
+ OCTEONTX_FPAVF_BUF_OFFSET,
+ mp->socket_id);
+ rc = octeontx_fpa_bufpool_block_size(pool);
+ if (rc < 0)
+ goto _end;
+
+ if ((uint32_t)rc != object_size)
+ fpavf_log_err("buffer size mismatch: %d instead of %u\n",
+ rc, object_size);
+
+ fpavf_log_info("Pool created %p with .. ", (void *)pool);
+ fpavf_log_info("obj_sz %d, cnt %d\n", object_size, memseg_count);
+
+ /* assign pool handle to mempool */
+ mp->pool_id = (uint64_t)pool;
+
+ return 0;
+
+_end:
+ return rc;
+}
+
+static void
+octeontx_fpavf_free(struct rte_mempool *mp)
+{
+ uintptr_t pool;
+ pool = (uintptr_t)mp->pool_id;
+
+ octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
+}
+
+static __rte_always_inline void *
+octeontx_fpa_bufpool_alloc(uintptr_t handle)
+{
+ return (void *)(uintptr_t)fpavf_read64((void *)(handle +
+ FPA_VF_VHAURA_OP_ALLOC(0)));
+}
+
+static __rte_always_inline void
+octeontx_fpa_bufpool_free(uintptr_t handle, void *buf)
+{
+ uint64_t free_addr = FPA_VF_FREE_ADDRS_S(FPA_VF_VHAURA_OP_FREE(0),
+ 0 /* DWB */, 1 /* FABS */);
+
+ fpavf_write64((uintptr_t)buf, (void *)(uintptr_t)(handle + free_addr));
+}
+
+static int
+octeontx_fpavf_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n)
+{
+ uintptr_t pool;
+ unsigned int index;
+
+ pool = (uintptr_t)mp->pool_id;
+ /* Get pool bar address from handle */
+ pool &= ~(uint64_t)FPA_GPOOL_MASK;
+ for (index = 0; index < n; index++, obj_table++)
+ octeontx_fpa_bufpool_free(pool, *obj_table);
+
+ return 0;
+}
+
+static int
+octeontx_fpavf_dequeue(struct rte_mempool *mp, void **obj_table,
+ unsigned int n)
+{
+ unsigned int index;
+ uintptr_t pool;
+ void *obj;
+
+ pool = (uintptr_t)mp->pool_id;
+ /* Get pool bar address from handle */
+ pool &= ~(uint64_t)FPA_GPOOL_MASK;
+ for (index = 0; index < n; index++, obj_table++) {
+ obj = octeontx_fpa_bufpool_alloc(pool);
+ if (obj == NULL) {
+ /*
+ * Failed to allocate the requested number of objects
+ * from the pool. Current pool implementation requires
+ * completing the entire request or returning error
+ * otherwise.
+ * Free already allocated buffers to the pool.
+ */
+ for (; index > 0; index--) {
+ obj_table--;
+ octeontx_fpa_bufpool_free(pool, *obj_table);
+ }
+ return -ENOMEM;
+ }
+ *obj_table = obj;
+ }
+
+ return 0;
+}
+
+static unsigned int
+octeontx_fpavf_get_count(const struct rte_mempool *mp)
+{
+ uintptr_t pool;
+
+ pool = (uintptr_t)mp->pool_id;
+
+ return octeontx_fpa_bufpool_free_count(pool);
+}
+
+static ssize_t
+octeontx_fpavf_calc_mem_size(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align)
+{
+ ssize_t mem_size;
+ size_t total_elt_sz;
+
+ /* Need space for one more obj on each chunk to fulfill
+ * alignment requirements.
+ */
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+ mem_size = rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
+ total_elt_sz, min_chunk_size,
+ align);
+ if (mem_size >= 0) {
+ /*
+ * Memory area which contains objects must be physically
+ * contiguous.
+ */
+ *min_chunk_size = mem_size;
+ }
+
+ return mem_size;
+}
+
+static int
+octeontx_fpavf_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ size_t total_elt_sz;
+ size_t off;
+ uint8_t gpool;
+ uintptr_t pool_bar;
+ int ret;
+
+ if (iova == RTE_BAD_IOVA)
+ return -EINVAL;
+
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+
+ /* align object start address to a multiple of total_elt_sz */
+ off = total_elt_sz - ((((uintptr_t)vaddr - 1) % total_elt_sz) + 1);
+
+ if (len < off)
+ return -EINVAL;
+
+ vaddr = (char *)vaddr + off;
+ iova += off;
+ len -= off;
+
+ gpool = octeontx_fpa_bufpool_gpool(mp->pool_id);
+ pool_bar = mp->pool_id & ~(uint64_t)FPA_GPOOL_MASK;
+
+ ret = octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool);
+ if (ret < 0)
+ return ret;
+
+ return rte_mempool_op_populate_helper(mp,
+ RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ,
+ max_objs, vaddr, iova, len,
+ obj_cb, obj_cb_arg);
+}
+
+static struct rte_mempool_ops octeontx_fpavf_ops = {
+ .name = "octeontx_fpavf",
+ .alloc = octeontx_fpavf_alloc,
+ .free = octeontx_fpavf_free,
+ .enqueue = octeontx_fpavf_enqueue,
+ .dequeue = octeontx_fpavf_dequeue,
+ .get_count = octeontx_fpavf_get_count,
+ .calc_mem_size = octeontx_fpavf_calc_mem_size,
+ .populate = octeontx_fpavf_populate,
+};
+
+MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops);
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx_version.map b/src/spdk/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx2/Makefile b/src/spdk/dpdk/drivers/mempool/octeontx2/Makefile
new file mode 100644
index 000000000..3828219b1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx2/Makefile
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_octeontx2.a
+
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/bus/pci
+CFLAGS += -O3
+
+ifneq ($(CONFIG_RTE_ARCH_64),y)
+CFLAGS += -Wno-int-to-pointer-cast
+CFLAGS += -Wno-pointer-to-int-cast
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS += -diag-disable 2259
+endif
+endif
+
+EXPORT_MAP := rte_mempool_octeontx2_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX2_MEMPOOL) += \
+ otx2_mempool_ops.c \
+ otx2_mempool.c \
+ otx2_mempool_irq.c \
+ otx2_mempool_debug.c
+
+LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf
+LDLIBS += -lrte_common_octeontx2 -lrte_kvargs -lrte_bus_pci
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx2/meson.build b/src/spdk/dpdk/drivers/mempool/octeontx2/meson.build
new file mode 100644
index 000000000..9fde40f0e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx2/meson.build
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+sources = files('otx2_mempool_ops.c',
+ 'otx2_mempool.c',
+ 'otx2_mempool_irq.c',
+ 'otx2_mempool_debug.c'
+ )
+
+extra_flags = []
+# This integrated controller runs only on a arm64 machine, remove 32bit warnings
+if not dpdk_conf.get('RTE_ARCH_64')
+ extra_flags += ['-Wno-int-to-pointer-cast', '-Wno-pointer-to-int-cast']
+endif
+
+foreach flag: extra_flags
+ if cc.has_argument(flag)
+ cflags += flag
+ endif
+endforeach
+
+deps += ['eal', 'mbuf', 'kvargs', 'bus_pci', 'common_octeontx2', 'mempool']
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool.c b/src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool.c
new file mode 100644
index 000000000..fb630fecf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool.c
@@ -0,0 +1,457 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_atomic.h>
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_io.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_mbuf_pool_ops.h>
+#include <rte_pci.h>
+
+#include "otx2_common.h"
+#include "otx2_dev.h"
+#include "otx2_mempool.h"
+
+#define OTX2_NPA_DEV_NAME RTE_STR(otx2_npa_dev_)
+#define OTX2_NPA_DEV_NAME_LEN (sizeof(OTX2_NPA_DEV_NAME) + PCI_PRI_STR_SIZE)
+
+static inline int
+npa_lf_alloc(struct otx2_npa_lf *lf)
+{
+ struct otx2_mbox *mbox = lf->mbox;
+ struct npa_lf_alloc_req *req;
+ struct npa_lf_alloc_rsp *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_npa_lf_alloc(mbox);
+ req->aura_sz = lf->aura_sz;
+ req->nr_pools = lf->nr_pools;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return NPA_LF_ERR_ALLOC;
+
+ lf->stack_pg_ptrs = rsp->stack_pg_ptrs;
+ lf->stack_pg_bytes = rsp->stack_pg_bytes;
+ lf->qints = rsp->qints;
+
+ return 0;
+}
+
+static int
+npa_lf_free(struct otx2_mbox *mbox)
+{
+ otx2_mbox_alloc_msg_npa_lf_free(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+npa_lf_init(struct otx2_npa_lf *lf, uintptr_t base, uint8_t aura_sz,
+ uint32_t nr_pools, struct otx2_mbox *mbox)
+{
+ uint32_t i, bmp_sz;
+ int rc;
+
+ /* Sanity checks */
+ if (!lf || !base || !mbox || !nr_pools)
+ return NPA_LF_ERR_PARAM;
+
+ if (base & AURA_ID_MASK)
+ return NPA_LF_ERR_BASE_INVALID;
+
+ if (aura_sz == NPA_AURA_SZ_0 || aura_sz >= NPA_AURA_SZ_MAX)
+ return NPA_LF_ERR_PARAM;
+
+ memset(lf, 0x0, sizeof(*lf));
+ lf->base = base;
+ lf->aura_sz = aura_sz;
+ lf->nr_pools = nr_pools;
+ lf->mbox = mbox;
+
+ rc = npa_lf_alloc(lf);
+ if (rc)
+ goto exit;
+
+ bmp_sz = rte_bitmap_get_memory_footprint(nr_pools);
+
+ /* Allocate memory for bitmap */
+ lf->npa_bmp_mem = rte_zmalloc("npa_bmp_mem", bmp_sz,
+ RTE_CACHE_LINE_SIZE);
+ if (lf->npa_bmp_mem == NULL) {
+ rc = -ENOMEM;
+ goto lf_free;
+ }
+
+ /* Initialize pool resource bitmap array */
+ lf->npa_bmp = rte_bitmap_init(nr_pools, lf->npa_bmp_mem, bmp_sz);
+ if (lf->npa_bmp == NULL) {
+ rc = -EINVAL;
+ goto bmap_mem_free;
+ }
+
+ /* Mark all pools available */
+ for (i = 0; i < nr_pools; i++)
+ rte_bitmap_set(lf->npa_bmp, i);
+
+ /* Allocate memory for qint context */
+ lf->npa_qint_mem = rte_zmalloc("npa_qint_mem",
+ sizeof(struct otx2_npa_qint) * nr_pools, 0);
+ if (lf->npa_qint_mem == NULL) {
+ rc = -ENOMEM;
+ goto bmap_free;
+ }
+
+ /* Allocate memory for nap_aura_lim memory */
+ lf->aura_lim = rte_zmalloc("npa_aura_lim_mem",
+ sizeof(struct npa_aura_lim) * nr_pools, 0);
+ if (lf->aura_lim == NULL) {
+ rc = -ENOMEM;
+ goto qint_free;
+ }
+
+ /* Init aura start & end limits */
+ for (i = 0; i < nr_pools; i++) {
+ lf->aura_lim[i].ptr_start = UINT64_MAX;
+ lf->aura_lim[i].ptr_end = 0x0ull;
+ }
+
+ return 0;
+
+qint_free:
+ rte_free(lf->npa_qint_mem);
+bmap_free:
+ rte_bitmap_free(lf->npa_bmp);
+bmap_mem_free:
+ rte_free(lf->npa_bmp_mem);
+lf_free:
+ npa_lf_free(lf->mbox);
+exit:
+ return rc;
+}
+
+static int
+npa_lf_fini(struct otx2_npa_lf *lf)
+{
+ if (!lf)
+ return NPA_LF_ERR_PARAM;
+
+ rte_free(lf->aura_lim);
+ rte_free(lf->npa_qint_mem);
+ rte_bitmap_free(lf->npa_bmp);
+ rte_free(lf->npa_bmp_mem);
+
+ return npa_lf_free(lf->mbox);
+
+}
+
+static inline uint32_t
+otx2_aura_size_to_u32(uint8_t val)
+{
+ if (val == NPA_AURA_SZ_0)
+ return 128;
+ if (val >= NPA_AURA_SZ_MAX)
+ return BIT_ULL(20);
+
+ return 1 << (val + 6);
+}
+
+static int
+parse_max_pools(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+ uint32_t val;
+
+ val = atoi(value);
+ if (val < otx2_aura_size_to_u32(NPA_AURA_SZ_128))
+ val = 128;
+ if (val > otx2_aura_size_to_u32(NPA_AURA_SZ_1M))
+ val = BIT_ULL(20);
+
+ *(uint8_t *)extra_args = rte_log2_u32(val) - 6;
+ return 0;
+}
+
+#define OTX2_MAX_POOLS "max_pools"
+
+static uint8_t
+otx2_parse_aura_size(struct rte_devargs *devargs)
+{
+ uint8_t aura_sz = NPA_AURA_SZ_128;
+ struct rte_kvargs *kvlist;
+
+ if (devargs == NULL)
+ goto exit;
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ goto exit;
+
+ rte_kvargs_process(kvlist, OTX2_MAX_POOLS, &parse_max_pools, &aura_sz);
+ otx2_parse_common_devargs(kvlist);
+ rte_kvargs_free(kvlist);
+exit:
+ return aura_sz;
+}
+
+static inline int
+npa_lf_attach(struct otx2_mbox *mbox)
+{
+ struct rsrc_attach_req *req;
+
+ req = otx2_mbox_alloc_msg_attach_resources(mbox);
+ req->npalf = true;
+
+ return otx2_mbox_process(mbox);
+}
+
+static inline int
+npa_lf_detach(struct otx2_mbox *mbox)
+{
+ struct rsrc_detach_req *req;
+
+ req = otx2_mbox_alloc_msg_detach_resources(mbox);
+ req->npalf = true;
+
+ return otx2_mbox_process(mbox);
+}
+
+static inline int
+npa_lf_get_msix_offset(struct otx2_mbox *mbox, uint16_t *npa_msixoff)
+{
+ struct msix_offset_rsp *msix_rsp;
+ int rc;
+
+ /* Get NPA and NIX MSIX vector offsets */
+ otx2_mbox_alloc_msg_msix_offset(mbox);
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
+
+ *npa_msixoff = msix_rsp->npa_msixoff;
+
+ return rc;
+}
+
+/**
+ * @internal
+ * Finalize NPA LF.
+ */
+int
+otx2_npa_lf_fini(void)
+{
+ struct otx2_idev_cfg *idev;
+ int rc = 0;
+
+ idev = otx2_intra_dev_get_cfg();
+ if (idev == NULL)
+ return -ENOMEM;
+
+ if (rte_atomic16_add_return(&idev->npa_refcnt, -1) == 0) {
+ otx2_npa_unregister_irqs(idev->npa_lf);
+ rc |= npa_lf_fini(idev->npa_lf);
+ rc |= npa_lf_detach(idev->npa_lf->mbox);
+ otx2_npa_set_defaults(idev);
+ }
+
+ return rc;
+}
+
+/**
+ * @internal
+ * Initialize NPA LF.
+ */
+int
+otx2_npa_lf_init(struct rte_pci_device *pci_dev, void *otx2_dev)
+{
+ struct otx2_dev *dev = otx2_dev;
+ struct otx2_idev_cfg *idev;
+ struct otx2_npa_lf *lf;
+ uint16_t npa_msixoff;
+ uint32_t nr_pools;
+ uint8_t aura_sz;
+ int rc;
+
+ idev = otx2_intra_dev_get_cfg();
+ if (idev == NULL)
+ return -ENOMEM;
+
+ /* Is NPA LF initialized by any another driver? */
+ if (rte_atomic16_add_return(&idev->npa_refcnt, 1) == 1) {
+
+ rc = npa_lf_attach(dev->mbox);
+ if (rc)
+ goto fail;
+
+ rc = npa_lf_get_msix_offset(dev->mbox, &npa_msixoff);
+ if (rc)
+ goto npa_detach;
+
+ aura_sz = otx2_parse_aura_size(pci_dev->device.devargs);
+ nr_pools = otx2_aura_size_to_u32(aura_sz);
+
+ lf = &dev->npalf;
+ rc = npa_lf_init(lf, dev->bar2 + (RVU_BLOCK_ADDR_NPA << 20),
+ aura_sz, nr_pools, dev->mbox);
+
+ if (rc)
+ goto npa_detach;
+
+ lf->pf_func = dev->pf_func;
+ lf->npa_msixoff = npa_msixoff;
+ lf->intr_handle = &pci_dev->intr_handle;
+ lf->pci_dev = pci_dev;
+
+ idev->npa_pf_func = dev->pf_func;
+ idev->npa_lf = lf;
+ rte_smp_wmb();
+ rc = otx2_npa_register_irqs(lf);
+ if (rc)
+ goto npa_fini;
+
+ rte_mbuf_set_platform_mempool_ops("octeontx2_npa");
+ otx2_npa_dbg("npa_lf=%p pools=%d sz=%d pf_func=0x%x msix=0x%x",
+ lf, nr_pools, aura_sz, lf->pf_func, npa_msixoff);
+ }
+
+ return 0;
+
+npa_fini:
+ npa_lf_fini(idev->npa_lf);
+npa_detach:
+ npa_lf_detach(dev->mbox);
+fail:
+ rte_atomic16_dec(&idev->npa_refcnt);
+ return rc;
+}
+
+static inline char*
+otx2_npa_dev_to_name(struct rte_pci_device *pci_dev, char *name)
+{
+ snprintf(name, OTX2_NPA_DEV_NAME_LEN,
+ OTX2_NPA_DEV_NAME PCI_PRI_FMT,
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+
+ return name;
+}
+
+static int
+otx2_npa_init(struct rte_pci_device *pci_dev)
+{
+ char name[OTX2_NPA_DEV_NAME_LEN];
+ const struct rte_memzone *mz;
+ struct otx2_dev *dev;
+ int rc = -ENOMEM;
+
+ mz = rte_memzone_reserve_aligned(otx2_npa_dev_to_name(pci_dev, name),
+ sizeof(*dev), SOCKET_ID_ANY,
+ 0, OTX2_ALIGN);
+ if (mz == NULL)
+ goto error;
+
+ dev = mz->addr;
+
+ /* Initialize the base otx2_dev object */
+ rc = otx2_dev_init(pci_dev, dev);
+ if (rc)
+ goto malloc_fail;
+
+ /* Grab the NPA LF if required */
+ rc = otx2_npa_lf_init(pci_dev, dev);
+ if (rc)
+ goto dev_uninit;
+
+ dev->drv_inited = true;
+ return 0;
+
+dev_uninit:
+ otx2_npa_lf_fini();
+ otx2_dev_fini(pci_dev, dev);
+malloc_fail:
+ rte_memzone_free(mz);
+error:
+ otx2_err("Failed to initialize npa device rc=%d", rc);
+ return rc;
+}
+
+static int
+otx2_npa_fini(struct rte_pci_device *pci_dev)
+{
+ char name[OTX2_NPA_DEV_NAME_LEN];
+ const struct rte_memzone *mz;
+ struct otx2_dev *dev;
+
+ mz = rte_memzone_lookup(otx2_npa_dev_to_name(pci_dev, name));
+ if (mz == NULL)
+ return -EINVAL;
+
+ dev = mz->addr;
+ if (!dev->drv_inited)
+ goto dev_fini;
+
+ dev->drv_inited = false;
+ otx2_npa_lf_fini();
+
+dev_fini:
+ if (otx2_npa_lf_active(dev)) {
+ otx2_info("%s: common resource in use by other devices",
+ pci_dev->name);
+ return -EAGAIN;
+ }
+
+ otx2_dev_fini(pci_dev, dev);
+ rte_memzone_free(mz);
+
+ return 0;
+}
+
+static int
+npa_remove(struct rte_pci_device *pci_dev)
+{
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ return otx2_npa_fini(pci_dev);
+}
+
+static int
+npa_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ RTE_SET_USED(pci_drv);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ return otx2_npa_init(pci_dev);
+}
+
+static const struct rte_pci_id pci_npa_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_NPA_PF)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_NPA_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_npa = {
+ .id_table = pci_npa_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
+ .probe = npa_probe,
+ .remove = npa_remove,
+};
+
+RTE_PMD_REGISTER_PCI(mempool_octeontx2, pci_npa);
+RTE_PMD_REGISTER_PCI_TABLE(mempool_octeontx2, pci_npa_map);
+RTE_PMD_REGISTER_KMOD_DEP(mempool_octeontx2, "vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(mempool_octeontx2,
+ OTX2_MAX_POOLS "=<128-1048576>"
+ OTX2_NPA_LOCK_MASK "=<1-65535>");
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool.h b/src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool.h
new file mode 100644
index 000000000..8aa548248
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_MEMPOOL_H__
+#define __OTX2_MEMPOOL_H__
+
+#include <rte_bitmap.h>
+#include <rte_bus_pci.h>
+#include <rte_devargs.h>
+#include <rte_mempool.h>
+
+#include "otx2_common.h"
+#include "otx2_mbox.h"
+
+enum npa_lf_status {
+ NPA_LF_ERR_PARAM = -512,
+ NPA_LF_ERR_ALLOC = -513,
+ NPA_LF_ERR_INVALID_BLOCK_SZ = -514,
+ NPA_LF_ERR_AURA_ID_ALLOC = -515,
+ NPA_LF_ERR_AURA_POOL_INIT = -516,
+ NPA_LF_ERR_AURA_POOL_FINI = -517,
+ NPA_LF_ERR_BASE_INVALID = -518,
+};
+
+struct otx2_npa_lf;
+struct otx2_npa_qint {
+ struct otx2_npa_lf *lf;
+ uint8_t qintx;
+};
+
+struct npa_aura_lim {
+ uint64_t ptr_start;
+ uint64_t ptr_end;
+};
+
+struct otx2_npa_lf {
+ uint16_t qints;
+ uintptr_t base;
+ uint8_t aura_sz;
+ uint16_t pf_func;
+ uint32_t nr_pools;
+ void *npa_bmp_mem;
+ void *npa_qint_mem;
+ uint16_t npa_msixoff;
+ struct otx2_mbox *mbox;
+ uint32_t stack_pg_ptrs;
+ uint32_t stack_pg_bytes;
+ struct rte_bitmap *npa_bmp;
+ struct npa_aura_lim *aura_lim;
+ struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
+};
+
+#define AURA_ID_MASK (BIT_ULL(16) - 1)
+
+/*
+ * Generate 64bit handle to have optimized alloc and free aura operation.
+ * 0 - AURA_ID_MASK for storing the aura_id.
+ * AURA_ID_MASK+1 - (2^64 - 1) for storing the lf base address.
+ * This scheme is valid when OS can give AURA_ID_MASK
+ * aligned address for lf base address.
+ */
+static inline uint64_t
+npa_lf_aura_handle_gen(uint32_t aura_id, uintptr_t addr)
+{
+ uint64_t val;
+
+ val = aura_id & AURA_ID_MASK;
+ return (uint64_t)addr | val;
+}
+
+static inline uint64_t
+npa_lf_aura_handle_to_aura(uint64_t aura_handle)
+{
+ return aura_handle & AURA_ID_MASK;
+}
+
+static inline uintptr_t
+npa_lf_aura_handle_to_base(uint64_t aura_handle)
+{
+ return (uintptr_t)(aura_handle & ~AURA_ID_MASK);
+}
+
+static inline uint64_t
+npa_lf_aura_op_alloc(uint64_t aura_handle, const int drop)
+{
+ uint64_t wdata = npa_lf_aura_handle_to_aura(aura_handle);
+
+ if (drop)
+ wdata |= BIT_ULL(63); /* DROP */
+
+ return otx2_atomic64_add_nosync(wdata,
+ (int64_t *)(npa_lf_aura_handle_to_base(aura_handle) +
+ NPA_LF_AURA_OP_ALLOCX(0)));
+}
+
+static inline void
+npa_lf_aura_op_free(uint64_t aura_handle, const int fabs, uint64_t iova)
+{
+ uint64_t reg = npa_lf_aura_handle_to_aura(aura_handle);
+
+ if (fabs)
+ reg |= BIT_ULL(63); /* FABS */
+
+ otx2_store_pair(iova, reg,
+ npa_lf_aura_handle_to_base(aura_handle) + NPA_LF_AURA_OP_FREE0);
+}
+
+static inline uint64_t
+npa_lf_aura_op_cnt_get(uint64_t aura_handle)
+{
+ uint64_t wdata;
+ uint64_t reg;
+
+ wdata = npa_lf_aura_handle_to_aura(aura_handle) << 44;
+
+ reg = otx2_atomic64_add_nosync(wdata,
+ (int64_t *)(npa_lf_aura_handle_to_base(aura_handle) +
+ NPA_LF_AURA_OP_CNT));
+
+ if (reg & BIT_ULL(42) /* OP_ERR */)
+ return 0;
+ else
+ return reg & 0xFFFFFFFFF;
+}
+
+static inline void
+npa_lf_aura_op_cnt_set(uint64_t aura_handle, const int sign, uint64_t count)
+{
+ uint64_t reg = count & (BIT_ULL(36) - 1);
+
+ if (sign)
+ reg |= BIT_ULL(43); /* CNT_ADD */
+
+ reg |= (npa_lf_aura_handle_to_aura(aura_handle) << 44);
+
+ otx2_write64(reg,
+ npa_lf_aura_handle_to_base(aura_handle) + NPA_LF_AURA_OP_CNT);
+}
+
+static inline uint64_t
+npa_lf_aura_op_limit_get(uint64_t aura_handle)
+{
+ uint64_t wdata;
+ uint64_t reg;
+
+ wdata = npa_lf_aura_handle_to_aura(aura_handle) << 44;
+
+ reg = otx2_atomic64_add_nosync(wdata,
+ (int64_t *)(npa_lf_aura_handle_to_base(aura_handle) +
+ NPA_LF_AURA_OP_LIMIT));
+
+ if (reg & BIT_ULL(42) /* OP_ERR */)
+ return 0;
+ else
+ return reg & 0xFFFFFFFFF;
+}
+
+static inline void
+npa_lf_aura_op_limit_set(uint64_t aura_handle, uint64_t limit)
+{
+ uint64_t reg = limit & (BIT_ULL(36) - 1);
+
+ reg |= (npa_lf_aura_handle_to_aura(aura_handle) << 44);
+
+ otx2_write64(reg,
+ npa_lf_aura_handle_to_base(aura_handle) + NPA_LF_AURA_OP_LIMIT);
+}
+
+static inline uint64_t
+npa_lf_aura_op_available(uint64_t aura_handle)
+{
+ uint64_t wdata;
+ uint64_t reg;
+
+ wdata = npa_lf_aura_handle_to_aura(aura_handle) << 44;
+
+ reg = otx2_atomic64_add_nosync(wdata,
+ (int64_t *)(npa_lf_aura_handle_to_base(
+ aura_handle) + NPA_LF_POOL_OP_AVAILABLE));
+
+ if (reg & BIT_ULL(42) /* OP_ERR */)
+ return 0;
+ else
+ return reg & 0xFFFFFFFFF;
+}
+
+static inline void
+npa_lf_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+ uint64_t end_iova)
+{
+ uint64_t reg = npa_lf_aura_handle_to_aura(aura_handle);
+ struct otx2_npa_lf *lf = otx2_npa_lf_obj_get();
+ struct npa_aura_lim *lim = lf->aura_lim;
+
+ lim[reg].ptr_start = RTE_MIN(lim[reg].ptr_start, start_iova);
+ lim[reg].ptr_end = RTE_MAX(lim[reg].ptr_end, end_iova);
+
+ otx2_store_pair(lim[reg].ptr_start, reg,
+ npa_lf_aura_handle_to_base(aura_handle) +
+ NPA_LF_POOL_OP_PTR_START0);
+ otx2_store_pair(lim[reg].ptr_end, reg,
+ npa_lf_aura_handle_to_base(aura_handle) +
+ NPA_LF_POOL_OP_PTR_END0);
+}
+
+/* NPA LF */
+__rte_internal
+int otx2_npa_lf_init(struct rte_pci_device *pci_dev, void *otx2_dev);
+__rte_internal
+int otx2_npa_lf_fini(void);
+
+/* IRQ */
+int otx2_npa_register_irqs(struct otx2_npa_lf *lf);
+void otx2_npa_unregister_irqs(struct otx2_npa_lf *lf);
+
+/* Debug */
+int otx2_mempool_ctx_dump(struct otx2_npa_lf *lf);
+
+#endif /* __OTX2_MEMPOOL_H__ */
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool_debug.c b/src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool_debug.c
new file mode 100644
index 000000000..279ea2e25
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool_debug.c
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "otx2_mempool.h"
+
+#define npa_dump(fmt, ...) fprintf(stderr, fmt "\n", ##__VA_ARGS__)
+
+static inline void
+npa_lf_pool_dump(__otx2_io struct npa_pool_s *pool)
+{
+ npa_dump("W0: Stack base\t\t0x%"PRIx64"", pool->stack_base);
+ npa_dump("W1: ena \t\t%d\nW1: nat_align \t\t%d\nW1: stack_caching \t%d",
+ pool->ena, pool->nat_align, pool->stack_caching);
+ npa_dump("W1: stack_way_mask\t%d\nW1: buf_offset\t\t%d",
+ pool->stack_way_mask, pool->buf_offset);
+ npa_dump("W1: buf_size \t\t%d", pool->buf_size);
+
+ npa_dump("W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d",
+ pool->stack_max_pages, pool->stack_pages);
+
+ npa_dump("W3: op_pc \t\t0x%"PRIx64"", (uint64_t)pool->op_pc);
+
+ npa_dump("W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d",
+ pool->stack_offset, pool->shift, pool->avg_level);
+ npa_dump("W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d",
+ pool->avg_con, pool->fc_ena, pool->fc_stype);
+ npa_dump("W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d",
+ pool->fc_hyst_bits, pool->fc_up_crossing);
+ npa_dump("W4: update_time\t\t%d\n", pool->update_time);
+
+ npa_dump("W5: fc_addr\t\t0x%"PRIx64"\n", pool->fc_addr);
+
+ npa_dump("W6: ptr_start\t\t0x%"PRIx64"\n", pool->ptr_start);
+
+ npa_dump("W7: ptr_end\t\t0x%"PRIx64"\n", pool->ptr_end);
+ npa_dump("W8: err_int\t\t%d\nW8: err_int_ena\t\t%d",
+ pool->err_int, pool->err_int_ena);
+ npa_dump("W8: thresh_int\t\t%d", pool->thresh_int);
+
+ npa_dump("W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d",
+ pool->thresh_int_ena, pool->thresh_up);
+ npa_dump("W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d",
+ pool->thresh_qint_idx, pool->err_qint_idx);
+}
+
+static inline void
+npa_lf_aura_dump(__otx2_io struct npa_aura_s *aura)
+{
+ npa_dump("W0: Pool addr\t\t0x%"PRIx64"\n", aura->pool_addr);
+
+ npa_dump("W1: ena\t\t\t%d\nW1: pool caching\t%d\nW1: pool way mask\t%d",
+ aura->ena, aura->pool_caching, aura->pool_way_mask);
+ npa_dump("W1: avg con\t\t%d\nW1: pool drop ena\t%d",
+ aura->avg_con, aura->pool_drop_ena);
+ npa_dump("W1: aura drop ena\t%d", aura->aura_drop_ena);
+ npa_dump("W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\nW1: aura shift\t\t%d",
+ aura->bp_ena, aura->aura_drop, aura->shift);
+ npa_dump("W1: avg_level\t\t%d\n", aura->avg_level);
+
+ npa_dump("W2: count\t\t%"PRIx64"\nW2: nix0_bpid\t\t%d",
+ (uint64_t)aura->count, aura->nix0_bpid);
+ npa_dump("W2: nix1_bpid\t\t%d", aura->nix1_bpid);
+
+ npa_dump("W3: limit\t\t%"PRIx64"\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
+ (uint64_t)aura->limit, aura->bp, aura->fc_ena);
+ npa_dump("W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d",
+ aura->fc_up_crossing, aura->fc_stype);
+
+ npa_dump("W3: fc_hyst_bits\t%d", aura->fc_hyst_bits);
+
+ npa_dump("W4: fc_addr\t\t0x%"PRIx64"\n", aura->fc_addr);
+
+ npa_dump("W5: pool_drop\t\t%d\nW5: update_time\t\t%d",
+ aura->pool_drop, aura->update_time);
+ npa_dump("W5: err_int\t\t%d", aura->err_int);
+ npa_dump("W5: err_int_ena\t\t%d\nW5: thresh_int\t\t%d",
+ aura->err_int_ena, aura->thresh_int);
+ npa_dump("W5: thresh_int_ena\t%d", aura->thresh_int_ena);
+
+ npa_dump("W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d",
+ aura->thresh_up, aura->thresh_qint_idx);
+ npa_dump("W5: err_qint_idx\t%d", aura->err_qint_idx);
+
+ npa_dump("W6: thresh\t\t%"PRIx64"\n", (uint64_t)aura->thresh);
+}
+
+int
+otx2_mempool_ctx_dump(struct otx2_npa_lf *lf)
+{
+ struct npa_aq_enq_req *aq;
+ struct npa_aq_enq_rsp *rsp;
+ uint32_t q;
+ int rc = 0;
+
+ for (q = 0; q < lf->nr_pools; q++) {
+ /* Skip disabled POOL */
+ if (rte_bitmap_get(lf->npa_bmp, q))
+ continue;
+
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(lf->mbox);
+ aq->aura_id = q;
+ aq->ctype = NPA_AQ_CTYPE_POOL;
+ aq->op = NPA_AQ_INSTOP_READ;
+
+ rc = otx2_mbox_process_msg(lf->mbox, (void *)&rsp);
+ if (rc) {
+ otx2_err("Failed to get pool(%d) context", q);
+ return rc;
+ }
+ npa_dump("============== pool=%d ===============\n", q);
+ npa_lf_pool_dump(&rsp->pool);
+ }
+
+ for (q = 0; q < lf->nr_pools; q++) {
+ /* Skip disabled AURA */
+ if (rte_bitmap_get(lf->npa_bmp, q))
+ continue;
+
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(lf->mbox);
+ aq->aura_id = q;
+ aq->ctype = NPA_AQ_CTYPE_AURA;
+ aq->op = NPA_AQ_INSTOP_READ;
+
+ rc = otx2_mbox_process_msg(lf->mbox, (void *)&rsp);
+ if (rc) {
+ otx2_err("Failed to get aura(%d) context", q);
+ return rc;
+ }
+ npa_dump("============== aura=%d ===============\n", q);
+ npa_lf_aura_dump(&rsp->aura);
+ }
+
+ return rc;
+}
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool_irq.c b/src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool_irq.c
new file mode 100644
index 000000000..5fa22b961
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool_irq.c
@@ -0,0 +1,303 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_bus_pci.h>
+
+#include "otx2_common.h"
+#include "otx2_irq.h"
+#include "otx2_mempool.h"
+
+static void
+npa_lf_err_irq(void *param)
+{
+ struct otx2_npa_lf *lf = (struct otx2_npa_lf *)param;
+ uint64_t intr;
+
+ intr = otx2_read64(lf->base + NPA_LF_ERR_INT);
+ if (intr == 0)
+ return;
+
+ otx2_err("Err_intr=0x%" PRIx64 "", intr);
+
+ /* Clear interrupt */
+ otx2_write64(intr, lf->base + NPA_LF_ERR_INT);
+}
+
+static int
+npa_lf_register_err_irq(struct otx2_npa_lf *lf)
+{
+ struct rte_intr_handle *handle = lf->intr_handle;
+ int rc, vec;
+
+ vec = lf->npa_msixoff + NPA_LF_INT_VEC_ERR_INT;
+
+ /* Clear err interrupt */
+ otx2_write64(~0ull, lf->base + NPA_LF_ERR_INT_ENA_W1C);
+ /* Register err interrupt vector */
+ rc = otx2_register_irq(handle, npa_lf_err_irq, lf, vec);
+
+ /* Enable hw interrupt */
+ otx2_write64(~0ull, lf->base + NPA_LF_ERR_INT_ENA_W1S);
+
+ return rc;
+}
+
+static void
+npa_lf_unregister_err_irq(struct otx2_npa_lf *lf)
+{
+ struct rte_intr_handle *handle = lf->intr_handle;
+ int vec;
+
+ vec = lf->npa_msixoff + NPA_LF_INT_VEC_ERR_INT;
+
+ /* Clear err interrupt */
+ otx2_write64(~0ull, lf->base + NPA_LF_ERR_INT_ENA_W1C);
+ otx2_unregister_irq(handle, npa_lf_err_irq, lf, vec);
+}
+
+static void
+npa_lf_ras_irq(void *param)
+{
+ struct otx2_npa_lf *lf = (struct otx2_npa_lf *)param;
+ uint64_t intr;
+
+ intr = otx2_read64(lf->base + NPA_LF_RAS);
+ if (intr == 0)
+ return;
+
+ otx2_err("Ras_intr=0x%" PRIx64 "", intr);
+
+ /* Clear interrupt */
+ otx2_write64(intr, lf->base + NPA_LF_RAS);
+}
+
+static int
+npa_lf_register_ras_irq(struct otx2_npa_lf *lf)
+{
+ struct rte_intr_handle *handle = lf->intr_handle;
+ int rc, vec;
+
+ vec = lf->npa_msixoff + NPA_LF_INT_VEC_POISON;
+
+ /* Clear err interrupt */
+ otx2_write64(~0ull, lf->base + NPA_LF_RAS_ENA_W1C);
+ /* Set used interrupt vectors */
+ rc = otx2_register_irq(handle, npa_lf_ras_irq, lf, vec);
+ /* Enable hw interrupt */
+ otx2_write64(~0ull, lf->base + NPA_LF_RAS_ENA_W1S);
+
+ return rc;
+}
+
+static void
+npa_lf_unregister_ras_irq(struct otx2_npa_lf *lf)
+{
+ int vec;
+ struct rte_intr_handle *handle = lf->intr_handle;
+
+ vec = lf->npa_msixoff + NPA_LF_INT_VEC_POISON;
+
+ /* Clear err interrupt */
+ otx2_write64(~0ull, lf->base + NPA_LF_RAS_ENA_W1C);
+ otx2_unregister_irq(handle, npa_lf_ras_irq, lf, vec);
+}
+
+static inline uint8_t
+npa_lf_q_irq_get_and_clear(struct otx2_npa_lf *lf, uint32_t q,
+ uint32_t off, uint64_t mask)
+{
+ uint64_t reg, wdata;
+ uint8_t qint;
+
+ wdata = (uint64_t)q << 44;
+ reg = otx2_atomic64_add_nosync(wdata, (int64_t *)(lf->base + off));
+
+ if (reg & BIT_ULL(42) /* OP_ERR */) {
+ otx2_err("Failed execute irq get off=0x%x", off);
+ return 0;
+ }
+
+ qint = reg & 0xff;
+ wdata &= mask;
+ otx2_write64(wdata | qint, lf->base + off);
+
+ return qint;
+}
+
+static inline uint8_t
+npa_lf_pool_irq_get_and_clear(struct otx2_npa_lf *lf, uint32_t p)
+{
+ return npa_lf_q_irq_get_and_clear(lf, p, NPA_LF_POOL_OP_INT, ~0xff00);
+}
+
+static inline uint8_t
+npa_lf_aura_irq_get_and_clear(struct otx2_npa_lf *lf, uint32_t a)
+{
+ return npa_lf_q_irq_get_and_clear(lf, a, NPA_LF_AURA_OP_INT, ~0xff00);
+}
+
+static void
+npa_lf_q_irq(void *param)
+{
+ struct otx2_npa_qint *qint = (struct otx2_npa_qint *)param;
+ struct otx2_npa_lf *lf = qint->lf;
+ uint8_t irq, qintx = qint->qintx;
+ uint32_t q, pool, aura;
+ uint64_t intr;
+
+ intr = otx2_read64(lf->base + NPA_LF_QINTX_INT(qintx));
+ if (intr == 0)
+ return;
+
+ otx2_err("queue_intr=0x%" PRIx64 " qintx=%d", intr, qintx);
+
+ /* Handle pool queue interrupts */
+ for (q = 0; q < lf->nr_pools; q++) {
+ /* Skip disabled POOL */
+ if (rte_bitmap_get(lf->npa_bmp, q))
+ continue;
+
+ pool = q % lf->qints;
+ irq = npa_lf_pool_irq_get_and_clear(lf, pool);
+
+ if (irq & BIT_ULL(NPA_POOL_ERR_INT_OVFLS))
+ otx2_err("Pool=%d NPA_POOL_ERR_INT_OVFLS", pool);
+
+ if (irq & BIT_ULL(NPA_POOL_ERR_INT_RANGE))
+ otx2_err("Pool=%d NPA_POOL_ERR_INT_RANGE", pool);
+
+ if (irq & BIT_ULL(NPA_POOL_ERR_INT_PERR))
+ otx2_err("Pool=%d NPA_POOL_ERR_INT_PERR", pool);
+ }
+
+ /* Handle aura queue interrupts */
+ for (q = 0; q < lf->nr_pools; q++) {
+
+ /* Skip disabled AURA */
+ if (rte_bitmap_get(lf->npa_bmp, q))
+ continue;
+
+ aura = q % lf->qints;
+ irq = npa_lf_aura_irq_get_and_clear(lf, aura);
+
+ if (irq & BIT_ULL(NPA_AURA_ERR_INT_AURA_ADD_OVER))
+ otx2_err("Aura=%d NPA_AURA_ERR_INT_ADD_OVER", aura);
+
+ if (irq & BIT_ULL(NPA_AURA_ERR_INT_AURA_ADD_UNDER))
+ otx2_err("Aura=%d NPA_AURA_ERR_INT_ADD_UNDER", aura);
+
+ if (irq & BIT_ULL(NPA_AURA_ERR_INT_AURA_FREE_UNDER))
+ otx2_err("Aura=%d NPA_AURA_ERR_INT_FREE_UNDER", aura);
+
+ if (irq & BIT_ULL(NPA_AURA_ERR_INT_POOL_DIS))
+ otx2_err("Aura=%d NPA_AURA_ERR_POOL_DIS", aura);
+ }
+
+ /* Clear interrupt */
+ otx2_write64(intr, lf->base + NPA_LF_QINTX_INT(qintx));
+ otx2_mempool_ctx_dump(lf);
+}
+
+static int
+npa_lf_register_queue_irqs(struct otx2_npa_lf *lf)
+{
+ struct rte_intr_handle *handle = lf->intr_handle;
+ int vec, q, qs, rc = 0;
+
+ /* Figure out max qintx required */
+ qs = RTE_MIN(lf->qints, lf->nr_pools);
+
+ for (q = 0; q < qs; q++) {
+ vec = lf->npa_msixoff + NPA_LF_INT_VEC_QINT_START + q;
+
+ /* Clear QINT CNT */
+ otx2_write64(0, lf->base + NPA_LF_QINTX_CNT(q));
+
+ /* Clear interrupt */
+ otx2_write64(~0ull, lf->base + NPA_LF_QINTX_ENA_W1C(q));
+
+ struct otx2_npa_qint *qintmem = lf->npa_qint_mem;
+ qintmem += q;
+
+ qintmem->lf = lf;
+ qintmem->qintx = q;
+
+ /* Sync qints_mem update */
+ rte_smp_wmb();
+
+ /* Register queue irq vector */
+ rc = otx2_register_irq(handle, npa_lf_q_irq, qintmem, vec);
+ if (rc)
+ break;
+
+ otx2_write64(0, lf->base + NPA_LF_QINTX_CNT(q));
+ otx2_write64(0, lf->base + NPA_LF_QINTX_INT(q));
+ /* Enable QINT interrupt */
+ otx2_write64(~0ull, lf->base + NPA_LF_QINTX_ENA_W1S(q));
+ }
+
+ return rc;
+}
+
+static void
+npa_lf_unregister_queue_irqs(struct otx2_npa_lf *lf)
+{
+ struct rte_intr_handle *handle = lf->intr_handle;
+ int vec, q, qs;
+
+ /* Figure out max qintx required */
+ qs = RTE_MIN(lf->qints, lf->nr_pools);
+
+ for (q = 0; q < qs; q++) {
+ vec = lf->npa_msixoff + NPA_LF_INT_VEC_QINT_START + q;
+
+ /* Clear QINT CNT */
+ otx2_write64(0, lf->base + NPA_LF_QINTX_CNT(q));
+ otx2_write64(0, lf->base + NPA_LF_QINTX_INT(q));
+
+ /* Clear interrupt */
+ otx2_write64(~0ull, lf->base + NPA_LF_QINTX_ENA_W1C(q));
+
+ struct otx2_npa_qint *qintmem = lf->npa_qint_mem;
+ qintmem += q;
+
+ /* Unregister queue irq vector */
+ otx2_unregister_irq(handle, npa_lf_q_irq, qintmem, vec);
+
+ qintmem->lf = NULL;
+ qintmem->qintx = 0;
+ }
+}
+
+int
+otx2_npa_register_irqs(struct otx2_npa_lf *lf)
+{
+ int rc;
+
+ if (lf->npa_msixoff == MSIX_VECTOR_INVALID) {
+ otx2_err("Invalid NPALF MSIX vector offset vector: 0x%x",
+ lf->npa_msixoff);
+ return -EINVAL;
+ }
+
+ /* Register lf err interrupt */
+ rc = npa_lf_register_err_irq(lf);
+ /* Register RAS interrupt */
+ rc |= npa_lf_register_ras_irq(lf);
+ /* Register queue interrupts */
+ rc |= npa_lf_register_queue_irqs(lf);
+
+ return rc;
+}
+
+void
+otx2_npa_unregister_irqs(struct otx2_npa_lf *lf)
+{
+ npa_lf_unregister_err_irq(lf);
+ npa_lf_unregister_ras_irq(lf);
+ npa_lf_unregister_queue_irqs(lf);
+}
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool_ops.c b/src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool_ops.c
new file mode 100644
index 000000000..5229a7cfb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx2/otx2_mempool_ops.c
@@ -0,0 +1,895 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_mempool.h>
+#include <rte_vect.h>
+
+#include "otx2_mempool.h"
+
+static int __rte_hot
+otx2_npa_enq(struct rte_mempool *mp, void * const *obj_table, unsigned int n)
+{
+ unsigned int index; const uint64_t aura_handle = mp->pool_id;
+ const uint64_t reg = npa_lf_aura_handle_to_aura(aura_handle);
+ const uint64_t addr = npa_lf_aura_handle_to_base(aura_handle) +
+ NPA_LF_AURA_OP_FREE0;
+
+ for (index = 0; index < n; index++)
+ otx2_store_pair((uint64_t)obj_table[index], reg, addr);
+
+ return 0;
+}
+
+static __rte_noinline int
+npa_lf_aura_op_alloc_one(const int64_t wdata, int64_t * const addr,
+ void **obj_table, uint8_t i)
+{
+ uint8_t retry = 4;
+
+ do {
+ obj_table[i] = (void *)otx2_atomic64_add_nosync(wdata, addr);
+ if (obj_table[i] != NULL)
+ return 0;
+
+ } while (retry--);
+
+ return -ENOENT;
+}
+
+#if defined(RTE_ARCH_ARM64)
+static __rte_noinline int
+npa_lf_aura_op_search_alloc(const int64_t wdata, int64_t * const addr,
+ void **obj_table, unsigned int n)
+{
+ uint8_t i;
+
+ for (i = 0; i < n; i++) {
+ if (obj_table[i] != NULL)
+ continue;
+ if (npa_lf_aura_op_alloc_one(wdata, addr, obj_table, i))
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static __rte_noinline int
+npa_lf_aura_op_alloc_bulk(const int64_t wdata, int64_t * const addr,
+ unsigned int n, void **obj_table)
+{
+ register const uint64_t wdata64 __asm("x26") = wdata;
+ register const uint64_t wdata128 __asm("x27") = wdata;
+ uint64x2_t failed = vdupq_n_u64(~0);
+
+ switch (n) {
+ case 32:
+ {
+ asm volatile (
+ ".cpu generic+lse\n"
+ "casp x0, x1, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x2, x3, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x4, x5, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x6, x7, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x8, x9, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x10, x11, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x12, x13, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x14, x15, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x16, x17, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x18, x19, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x20, x21, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x22, x23, %[wdata64], %[wdata128], [%[loc]]\n"
+ "fmov d16, x0\n"
+ "fmov v16.D[1], x1\n"
+ "casp x0, x1, %[wdata64], %[wdata128], [%[loc]]\n"
+ "fmov d17, x2\n"
+ "fmov v17.D[1], x3\n"
+ "casp x2, x3, %[wdata64], %[wdata128], [%[loc]]\n"
+ "fmov d18, x4\n"
+ "fmov v18.D[1], x5\n"
+ "casp x4, x5, %[wdata64], %[wdata128], [%[loc]]\n"
+ "fmov d19, x6\n"
+ "fmov v19.D[1], x7\n"
+ "casp x6, x7, %[wdata64], %[wdata128], [%[loc]]\n"
+ "and %[failed].16B, %[failed].16B, v16.16B\n"
+ "and %[failed].16B, %[failed].16B, v17.16B\n"
+ "and %[failed].16B, %[failed].16B, v18.16B\n"
+ "and %[failed].16B, %[failed].16B, v19.16B\n"
+ "fmov d20, x8\n"
+ "fmov v20.D[1], x9\n"
+ "fmov d21, x10\n"
+ "fmov v21.D[1], x11\n"
+ "fmov d22, x12\n"
+ "fmov v22.D[1], x13\n"
+ "fmov d23, x14\n"
+ "fmov v23.D[1], x15\n"
+ "and %[failed].16B, %[failed].16B, v20.16B\n"
+ "and %[failed].16B, %[failed].16B, v21.16B\n"
+ "and %[failed].16B, %[failed].16B, v22.16B\n"
+ "and %[failed].16B, %[failed].16B, v23.16B\n"
+ "st1 { v16.2d, v17.2d, v18.2d, v19.2d}, [%[dst]], 64\n"
+ "st1 { v20.2d, v21.2d, v22.2d, v23.2d}, [%[dst]], 64\n"
+ "fmov d16, x16\n"
+ "fmov v16.D[1], x17\n"
+ "fmov d17, x18\n"
+ "fmov v17.D[1], x19\n"
+ "fmov d18, x20\n"
+ "fmov v18.D[1], x21\n"
+ "fmov d19, x22\n"
+ "fmov v19.D[1], x23\n"
+ "and %[failed].16B, %[failed].16B, v16.16B\n"
+ "and %[failed].16B, %[failed].16B, v17.16B\n"
+ "and %[failed].16B, %[failed].16B, v18.16B\n"
+ "and %[failed].16B, %[failed].16B, v19.16B\n"
+ "fmov d20, x0\n"
+ "fmov v20.D[1], x1\n"
+ "fmov d21, x2\n"
+ "fmov v21.D[1], x3\n"
+ "fmov d22, x4\n"
+ "fmov v22.D[1], x5\n"
+ "fmov d23, x6\n"
+ "fmov v23.D[1], x7\n"
+ "and %[failed].16B, %[failed].16B, v20.16B\n"
+ "and %[failed].16B, %[failed].16B, v21.16B\n"
+ "and %[failed].16B, %[failed].16B, v22.16B\n"
+ "and %[failed].16B, %[failed].16B, v23.16B\n"
+ "st1 { v16.2d, v17.2d, v18.2d, v19.2d}, [%[dst]], 64\n"
+ "st1 { v20.2d, v21.2d, v22.2d, v23.2d}, [%[dst]], 64\n"
+ : "+Q" (*addr), [failed] "=&w" (failed)
+ : [wdata64] "r" (wdata64), [wdata128] "r" (wdata128),
+ [dst] "r" (obj_table), [loc] "r" (addr)
+ : "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+ "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16",
+ "x17", "x18", "x19", "x20", "x21", "x22", "x23", "v16", "v17",
+ "v18", "v19", "v20", "v21", "v22", "v23"
+ );
+ break;
+ }
+ case 16:
+ {
+ asm volatile (
+ ".cpu generic+lse\n"
+ "casp x0, x1, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x2, x3, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x4, x5, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x6, x7, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x8, x9, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x10, x11, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x12, x13, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x14, x15, %[wdata64], %[wdata128], [%[loc]]\n"
+ "fmov d16, x0\n"
+ "fmov v16.D[1], x1\n"
+ "fmov d17, x2\n"
+ "fmov v17.D[1], x3\n"
+ "fmov d18, x4\n"
+ "fmov v18.D[1], x5\n"
+ "fmov d19, x6\n"
+ "fmov v19.D[1], x7\n"
+ "and %[failed].16B, %[failed].16B, v16.16B\n"
+ "and %[failed].16B, %[failed].16B, v17.16B\n"
+ "and %[failed].16B, %[failed].16B, v18.16B\n"
+ "and %[failed].16B, %[failed].16B, v19.16B\n"
+ "fmov d20, x8\n"
+ "fmov v20.D[1], x9\n"
+ "fmov d21, x10\n"
+ "fmov v21.D[1], x11\n"
+ "fmov d22, x12\n"
+ "fmov v22.D[1], x13\n"
+ "fmov d23, x14\n"
+ "fmov v23.D[1], x15\n"
+ "and %[failed].16B, %[failed].16B, v20.16B\n"
+ "and %[failed].16B, %[failed].16B, v21.16B\n"
+ "and %[failed].16B, %[failed].16B, v22.16B\n"
+ "and %[failed].16B, %[failed].16B, v23.16B\n"
+ "st1 { v16.2d, v17.2d, v18.2d, v19.2d}, [%[dst]], 64\n"
+ "st1 { v20.2d, v21.2d, v22.2d, v23.2d}, [%[dst]], 64\n"
+ : "+Q" (*addr), [failed] "=&w" (failed)
+ : [wdata64] "r" (wdata64), [wdata128] "r" (wdata128),
+ [dst] "r" (obj_table), [loc] "r" (addr)
+ : "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+ "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "v16",
+ "v17", "v18", "v19", "v20", "v21", "v22", "v23"
+ );
+ break;
+ }
+ case 8:
+ {
+ asm volatile (
+ ".cpu generic+lse\n"
+ "casp x0, x1, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x2, x3, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x4, x5, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x6, x7, %[wdata64], %[wdata128], [%[loc]]\n"
+ "fmov d16, x0\n"
+ "fmov v16.D[1], x1\n"
+ "fmov d17, x2\n"
+ "fmov v17.D[1], x3\n"
+ "fmov d18, x4\n"
+ "fmov v18.D[1], x5\n"
+ "fmov d19, x6\n"
+ "fmov v19.D[1], x7\n"
+ "and %[failed].16B, %[failed].16B, v16.16B\n"
+ "and %[failed].16B, %[failed].16B, v17.16B\n"
+ "and %[failed].16B, %[failed].16B, v18.16B\n"
+ "and %[failed].16B, %[failed].16B, v19.16B\n"
+ "st1 { v16.2d, v17.2d, v18.2d, v19.2d}, [%[dst]], 64\n"
+ : "+Q" (*addr), [failed] "=&w" (failed)
+ : [wdata64] "r" (wdata64), [wdata128] "r" (wdata128),
+ [dst] "r" (obj_table), [loc] "r" (addr)
+ : "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+ "v16", "v17", "v18", "v19"
+ );
+ break;
+ }
+ case 4:
+ {
+ asm volatile (
+ ".cpu generic+lse\n"
+ "casp x0, x1, %[wdata64], %[wdata128], [%[loc]]\n"
+ "casp x2, x3, %[wdata64], %[wdata128], [%[loc]]\n"
+ "fmov d16, x0\n"
+ "fmov v16.D[1], x1\n"
+ "fmov d17, x2\n"
+ "fmov v17.D[1], x3\n"
+ "and %[failed].16B, %[failed].16B, v16.16B\n"
+ "and %[failed].16B, %[failed].16B, v17.16B\n"
+ "st1 { v16.2d, v17.2d}, [%[dst]], 32\n"
+ : "+Q" (*addr), [failed] "=&w" (failed)
+ : [wdata64] "r" (wdata64), [wdata128] "r" (wdata128),
+ [dst] "r" (obj_table), [loc] "r" (addr)
+ : "memory", "x0", "x1", "x2", "x3", "v16", "v17"
+ );
+ break;
+ }
+ case 2:
+ {
+ asm volatile (
+ ".cpu generic+lse\n"
+ "casp x0, x1, %[wdata64], %[wdata128], [%[loc]]\n"
+ "fmov d16, x0\n"
+ "fmov v16.D[1], x1\n"
+ "and %[failed].16B, %[failed].16B, v16.16B\n"
+ "st1 { v16.2d}, [%[dst]], 16\n"
+ : "+Q" (*addr), [failed] "=&w" (failed)
+ : [wdata64] "r" (wdata64), [wdata128] "r" (wdata128),
+ [dst] "r" (obj_table), [loc] "r" (addr)
+ : "memory", "x0", "x1", "v16"
+ );
+ break;
+ }
+ case 1:
+ return npa_lf_aura_op_alloc_one(wdata, addr, obj_table, 0);
+ }
+
+ if (unlikely(!(vgetq_lane_u64(failed, 0) & vgetq_lane_u64(failed, 1))))
+ return npa_lf_aura_op_search_alloc(wdata, addr, (void **)
+ ((char *)obj_table - (sizeof(uint64_t) * n)), n);
+
+ return 0;
+}
+
+static __rte_noinline void
+otx2_npa_clear_alloc(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; i++) {
+ if (obj_table[i] != NULL) {
+ otx2_npa_enq(mp, &obj_table[i], 1);
+ obj_table[i] = NULL;
+ }
+ }
+}
+
+static __rte_noinline int __rte_hot
+otx2_npa_deq_arm64(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+ const int64_t wdata = npa_lf_aura_handle_to_aura(mp->pool_id);
+ void **obj_table_bak = obj_table;
+ const unsigned int nfree = n;
+ unsigned int parts;
+
+ int64_t * const addr = (int64_t * const)
+ (npa_lf_aura_handle_to_base(mp->pool_id) +
+ NPA_LF_AURA_OP_ALLOCX(0));
+ while (n) {
+ parts = n > 31 ? 32 : rte_align32prevpow2(n);
+ n -= parts;
+ if (unlikely(npa_lf_aura_op_alloc_bulk(wdata, addr,
+ parts, obj_table))) {
+ otx2_npa_clear_alloc(mp, obj_table_bak, nfree - n);
+ return -ENOENT;
+ }
+ obj_table += parts;
+ }
+
+ return 0;
+}
+
+#else
+
+static inline int __rte_hot
+otx2_npa_deq(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+ const int64_t wdata = npa_lf_aura_handle_to_aura(mp->pool_id);
+ unsigned int index;
+ uint64_t obj;
+
+ int64_t * const addr = (int64_t *)
+ (npa_lf_aura_handle_to_base(mp->pool_id) +
+ NPA_LF_AURA_OP_ALLOCX(0));
+ for (index = 0; index < n; index++, obj_table++) {
+ obj = npa_lf_aura_op_alloc_one(wdata, addr, obj_table, 0);
+ if (obj == 0) {
+ for (; index > 0; index--) {
+ obj_table--;
+ otx2_npa_enq(mp, obj_table, 1);
+ }
+ return -ENOENT;
+ }
+ *obj_table = (void *)obj;
+ }
+
+ return 0;
+}
+
+#endif
+
+static unsigned int
+otx2_npa_get_count(const struct rte_mempool *mp)
+{
+ return (unsigned int)npa_lf_aura_op_available(mp->pool_id);
+}
+
+static int
+npa_lf_aura_pool_init(struct otx2_mbox *mbox, uint32_t aura_id,
+ struct npa_aura_s *aura, struct npa_pool_s *pool)
+{
+ struct npa_aq_enq_req *aura_init_req, *pool_init_req;
+ struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp;
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ struct otx2_idev_cfg *idev;
+ int rc, off;
+
+ idev = otx2_intra_dev_get_cfg();
+ if (idev == NULL)
+ return -ENOMEM;
+
+ aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+
+ aura_init_req->aura_id = aura_id;
+ aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_init_req->op = NPA_AQ_INSTOP_INIT;
+ otx2_mbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura));
+
+ pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+
+ pool_init_req->aura_id = aura_id;
+ pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
+ pool_init_req->op = NPA_AQ_INSTOP_INIT;
+ otx2_mbox_memcpy(&pool_init_req->pool, pool, sizeof(*pool));
+
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (rc < 0)
+ return rc;
+
+ off = mbox->rx_start +
+ RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ aura_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
+ off = mbox->rx_start + aura_init_rsp->hdr.next_msgoff;
+ pool_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
+
+ if (rc == 2 && aura_init_rsp->hdr.rc == 0 && pool_init_rsp->hdr.rc == 0)
+ return 0;
+ else
+ return NPA_LF_ERR_AURA_POOL_INIT;
+
+ if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
+ return 0;
+
+ aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ aura_init_req->aura_id = aura_id;
+ aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_init_req->op = NPA_AQ_INSTOP_LOCK;
+
+ pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ if (!pool_init_req) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (rc < 0) {
+ otx2_err("Failed to LOCK AURA context");
+ return -ENOMEM;
+ }
+
+ pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ if (!pool_init_req) {
+ otx2_err("Failed to LOCK POOL context");
+ return -ENOMEM;
+ }
+ }
+ pool_init_req->aura_id = aura_id;
+ pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
+ pool_init_req->op = NPA_AQ_INSTOP_LOCK;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to lock POOL ctx to NDC");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+npa_lf_aura_pool_fini(struct otx2_mbox *mbox,
+ uint32_t aura_id,
+ uint64_t aura_handle)
+{
+ struct npa_aq_enq_req *aura_req, *pool_req;
+ struct npa_aq_enq_rsp *aura_rsp, *pool_rsp;
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ struct ndc_sync_op *ndc_req;
+ struct otx2_idev_cfg *idev;
+ int rc, off;
+
+ idev = otx2_intra_dev_get_cfg();
+ if (idev == NULL)
+ return -EINVAL;
+
+ /* Procedure for disabling an aura/pool */
+ rte_delay_us(10);
+ npa_lf_aura_op_alloc(aura_handle, 0);
+
+ pool_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ pool_req->aura_id = aura_id;
+ pool_req->ctype = NPA_AQ_CTYPE_POOL;
+ pool_req->op = NPA_AQ_INSTOP_WRITE;
+ pool_req->pool.ena = 0;
+ pool_req->pool_mask.ena = ~pool_req->pool_mask.ena;
+
+ aura_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ aura_req->aura_id = aura_id;
+ aura_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_req->op = NPA_AQ_INSTOP_WRITE;
+ aura_req->aura.ena = 0;
+ aura_req->aura_mask.ena = ~aura_req->aura_mask.ena;
+
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (rc < 0)
+ return rc;
+
+ off = mbox->rx_start +
+ RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ pool_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
+
+ off = mbox->rx_start + pool_rsp->hdr.next_msgoff;
+ aura_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
+
+ if (rc != 2 || aura_rsp->hdr.rc != 0 || pool_rsp->hdr.rc != 0)
+ return NPA_LF_ERR_AURA_POOL_FINI;
+
+ /* Sync NDC-NPA for LF */
+ ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
+ ndc_req->npa_lf_sync = 1;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc) {
+ otx2_err("Error on NDC-NPA LF sync, rc %d", rc);
+ return NPA_LF_ERR_AURA_POOL_FINI;
+ }
+
+ if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
+ return 0;
+
+ aura_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ aura_req->aura_id = aura_id;
+ aura_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_req->op = NPA_AQ_INSTOP_UNLOCK;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to unlock AURA ctx to NDC");
+ return -EINVAL;
+ }
+
+ pool_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ pool_req->aura_id = aura_id;
+ pool_req->ctype = NPA_AQ_CTYPE_POOL;
+ pool_req->op = NPA_AQ_INSTOP_UNLOCK;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to unlock POOL ctx to NDC");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline char*
+npa_lf_stack_memzone_name(struct otx2_npa_lf *lf, int pool_id, char *name)
+{
+ snprintf(name, RTE_MEMZONE_NAMESIZE, "otx2_npa_stack_%x_%d",
+ lf->pf_func, pool_id);
+
+ return name;
+}
+
+static inline const struct rte_memzone *
+npa_lf_stack_dma_alloc(struct otx2_npa_lf *lf, char *name,
+ int pool_id, size_t size)
+{
+ return rte_memzone_reserve_aligned(
+ npa_lf_stack_memzone_name(lf, pool_id, name), size, 0,
+ RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
+}
+
+static inline int
+npa_lf_stack_dma_free(struct otx2_npa_lf *lf, char *name, int pool_id)
+{
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(npa_lf_stack_memzone_name(lf, pool_id, name));
+ if (mz == NULL)
+ return -EINVAL;
+
+ return rte_memzone_free(mz);
+}
+
+static inline int
+bitmap_ctzll(uint64_t slab)
+{
+ if (slab == 0)
+ return 0;
+
+ return __builtin_ctzll(slab);
+}
+
+static int
+npa_lf_aura_pool_pair_alloc(struct otx2_npa_lf *lf, const uint32_t block_size,
+ const uint32_t block_count, struct npa_aura_s *aura,
+ struct npa_pool_s *pool, uint64_t *aura_handle)
+{
+ int rc, aura_id, pool_id, stack_size, alloc_size;
+ char name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+ uint64_t slab;
+ uint32_t pos;
+
+ /* Sanity check */
+ if (!lf || !block_size || !block_count ||
+ !pool || !aura || !aura_handle)
+ return NPA_LF_ERR_PARAM;
+
+ /* Block size should be cache line aligned and in range of 128B-128KB */
+ if (block_size % OTX2_ALIGN || block_size < 128 ||
+ block_size > 128 * 1024)
+ return NPA_LF_ERR_INVALID_BLOCK_SZ;
+
+ pos = slab = 0;
+ /* Scan from the beginning */
+ __rte_bitmap_scan_init(lf->npa_bmp);
+ /* Scan bitmap to get the free pool */
+ rc = rte_bitmap_scan(lf->npa_bmp, &pos, &slab);
+ /* Empty bitmap */
+ if (rc == 0) {
+ otx2_err("Mempools exhausted, 'max_pools' devargs to increase");
+ return -ERANGE;
+ }
+
+ /* Get aura_id from resource bitmap */
+ aura_id = pos + bitmap_ctzll(slab);
+ /* Mark pool as reserved */
+ rte_bitmap_clear(lf->npa_bmp, aura_id);
+
+ /* Configuration based on each aura has separate pool(aura-pool pair) */
+ pool_id = aura_id;
+ rc = (aura_id < 0 || pool_id >= (int)lf->nr_pools || aura_id >=
+ (int)BIT_ULL(6 + lf->aura_sz)) ? NPA_LF_ERR_AURA_ID_ALLOC : 0;
+ if (rc)
+ goto exit;
+
+ /* Allocate stack memory */
+ stack_size = (block_count + lf->stack_pg_ptrs - 1) / lf->stack_pg_ptrs;
+ alloc_size = stack_size * lf->stack_pg_bytes;
+
+ mz = npa_lf_stack_dma_alloc(lf, name, pool_id, alloc_size);
+ if (mz == NULL) {
+ rc = -ENOMEM;
+ goto aura_res_put;
+ }
+
+ /* Update aura fields */
+ aura->pool_addr = pool_id;/* AF will translate to associated poolctx */
+ aura->ena = 1;
+ aura->shift = __builtin_clz(block_count) - 8;
+ aura->limit = block_count;
+ aura->pool_caching = 1;
+ aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
+ /* Many to one reduction */
+ aura->err_qint_idx = aura_id % lf->qints;
+
+ /* Update pool fields */
+ pool->stack_base = mz->iova;
+ pool->ena = 1;
+ pool->buf_size = block_size / OTX2_ALIGN;
+ pool->stack_max_pages = stack_size;
+ pool->shift = __builtin_clz(block_count) - 8;
+ pool->ptr_start = 0;
+ pool->ptr_end = ~0;
+ pool->stack_caching = 1;
+ pool->err_int_ena = BIT(NPA_POOL_ERR_INT_OVFLS);
+ pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_RANGE);
+ pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_PERR);
+
+ /* Many to one reduction */
+ pool->err_qint_idx = pool_id % lf->qints;
+
+ /* Issue AURA_INIT and POOL_INIT op */
+ rc = npa_lf_aura_pool_init(lf->mbox, aura_id, aura, pool);
+ if (rc)
+ goto stack_mem_free;
+
+ *aura_handle = npa_lf_aura_handle_gen(aura_id, lf->base);
+
+ /* Update aura count */
+ npa_lf_aura_op_cnt_set(*aura_handle, 0, block_count);
+ /* Read it back to make sure aura count is updated */
+ npa_lf_aura_op_cnt_get(*aura_handle);
+
+ return 0;
+
+stack_mem_free:
+ rte_memzone_free(mz);
+aura_res_put:
+ rte_bitmap_set(lf->npa_bmp, aura_id);
+exit:
+ return rc;
+}
+
+static int
+npa_lf_aura_pool_pair_free(struct otx2_npa_lf *lf, uint64_t aura_handle)
+{
+ char name[RTE_MEMZONE_NAMESIZE];
+ int aura_id, pool_id, rc;
+
+ if (!lf || !aura_handle)
+ return NPA_LF_ERR_PARAM;
+
+ aura_id = pool_id = npa_lf_aura_handle_to_aura(aura_handle);
+ rc = npa_lf_aura_pool_fini(lf->mbox, aura_id, aura_handle);
+ rc |= npa_lf_stack_dma_free(lf, name, pool_id);
+
+ rte_bitmap_set(lf->npa_bmp, aura_id);
+
+ return rc;
+}
+
+static int
+npa_lf_aura_range_update_check(uint64_t aura_handle)
+{
+ uint64_t aura_id = npa_lf_aura_handle_to_aura(aura_handle);
+ struct otx2_npa_lf *lf = otx2_npa_lf_obj_get();
+ struct npa_aura_lim *lim = lf->aura_lim;
+ __otx2_io struct npa_pool_s *pool;
+ struct npa_aq_enq_req *req;
+ struct npa_aq_enq_rsp *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_npa_aq_enq(lf->mbox);
+
+ req->aura_id = aura_id;
+ req->ctype = NPA_AQ_CTYPE_POOL;
+ req->op = NPA_AQ_INSTOP_READ;
+
+ rc = otx2_mbox_process_msg(lf->mbox, (void *)&rsp);
+ if (rc) {
+ otx2_err("Failed to get pool(0x%"PRIx64") context", aura_id);
+ return rc;
+ }
+
+ pool = &rsp->pool;
+
+ if (lim[aura_id].ptr_start != pool->ptr_start ||
+ lim[aura_id].ptr_end != pool->ptr_end) {
+ otx2_err("Range update failed on pool(0x%"PRIx64")", aura_id);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int
+otx2_npa_alloc(struct rte_mempool *mp)
+{
+ uint32_t block_size, block_count;
+ uint64_t aura_handle = 0;
+ struct otx2_npa_lf *lf;
+ struct npa_aura_s aura;
+ struct npa_pool_s pool;
+ size_t padding;
+ int rc;
+
+ lf = otx2_npa_lf_obj_get();
+ if (lf == NULL) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ block_size = mp->elt_size + mp->header_size + mp->trailer_size;
+ /*
+ * OCTEON TX2 has 8 sets, 41 ways L1D cache, VA<9:7> bits dictate
+ * the set selection.
+ * Add additional padding to ensure that the element size always
+ * occupies odd number of cachelines to ensure even distribution
+ * of elements among L1D cache sets.
+ */
+ padding = ((block_size / RTE_CACHE_LINE_SIZE) % 2) ? 0 :
+ RTE_CACHE_LINE_SIZE;
+ mp->trailer_size += padding;
+ block_size += padding;
+
+ block_count = mp->size;
+
+ if (block_size % OTX2_ALIGN != 0) {
+ otx2_err("Block size should be multiple of 128B");
+ rc = -ERANGE;
+ goto error;
+ }
+
+ memset(&aura, 0, sizeof(struct npa_aura_s));
+ memset(&pool, 0, sizeof(struct npa_pool_s));
+ pool.nat_align = 1;
+ pool.buf_offset = 1;
+
+ if ((uint32_t)pool.buf_offset * OTX2_ALIGN != mp->header_size) {
+ otx2_err("Unsupported mp->header_size=%d", mp->header_size);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /* Use driver specific mp->pool_config to override aura config */
+ if (mp->pool_config != NULL)
+ memcpy(&aura, mp->pool_config, sizeof(struct npa_aura_s));
+
+ rc = npa_lf_aura_pool_pair_alloc(lf, block_size, block_count,
+ &aura, &pool, &aura_handle);
+ if (rc) {
+ otx2_err("Failed to alloc pool or aura rc=%d", rc);
+ goto error;
+ }
+
+ /* Store aura_handle for future queue operations */
+ mp->pool_id = aura_handle;
+ otx2_npa_dbg("lf=%p block_sz=%d block_count=%d aura_handle=0x%"PRIx64,
+ lf, block_size, block_count, aura_handle);
+
+ /* Just hold the reference of the object */
+ otx2_npa_lf_obj_ref();
+ return 0;
+error:
+ return rc;
+}
+
+static void
+otx2_npa_free(struct rte_mempool *mp)
+{
+ struct otx2_npa_lf *lf = otx2_npa_lf_obj_get();
+ int rc = 0;
+
+ otx2_npa_dbg("lf=%p aura_handle=0x%"PRIx64, lf, mp->pool_id);
+ if (lf != NULL)
+ rc = npa_lf_aura_pool_pair_free(lf, mp->pool_id);
+
+ if (rc)
+ otx2_err("Failed to free pool or aura rc=%d", rc);
+
+ /* Release the reference of npalf */
+ otx2_npa_lf_fini();
+}
+
+static ssize_t
+otx2_npa_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,
+ uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
+{
+ size_t total_elt_sz;
+
+ /* Need space for one more obj on each chunk to fulfill
+ * alignment requirements.
+ */
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+ return rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
+ total_elt_sz, min_chunk_size,
+ align);
+}
+
+static uint8_t
+otx2_npa_l1d_way_set_get(uint64_t iova)
+{
+ return (iova >> rte_log2_u32(RTE_CACHE_LINE_SIZE)) & 0x7;
+}
+
+static int
+otx2_npa_populate(struct rte_mempool *mp, unsigned int max_objs, void *vaddr,
+ rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+#define OTX2_L1D_NB_SETS 8
+ uint64_t distribution[OTX2_L1D_NB_SETS];
+ rte_iova_t start_iova;
+ size_t total_elt_sz;
+ uint8_t set;
+ size_t off;
+ int i;
+
+ if (iova == RTE_BAD_IOVA)
+ return -EINVAL;
+
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+
+ /* Align object start address to a multiple of total_elt_sz */
+ off = total_elt_sz - ((((uintptr_t)vaddr - 1) % total_elt_sz) + 1);
+
+ if (len < off)
+ return -EINVAL;
+
+
+ vaddr = (char *)vaddr + off;
+ iova += off;
+ len -= off;
+
+ memset(distribution, 0, sizeof(uint64_t) * OTX2_L1D_NB_SETS);
+ start_iova = iova;
+ while (start_iova < iova + len) {
+ set = otx2_npa_l1d_way_set_get(start_iova + mp->header_size);
+ distribution[set]++;
+ start_iova += total_elt_sz;
+ }
+
+ otx2_npa_dbg("iova %"PRIx64", aligned iova %"PRIx64"", iova - off,
+ iova);
+ otx2_npa_dbg("length %"PRIu64", aligned length %"PRIu64"",
+ (uint64_t)(len + off), (uint64_t)len);
+ otx2_npa_dbg("element size %"PRIu64"", (uint64_t)total_elt_sz);
+ otx2_npa_dbg("requested objects %"PRIu64", possible objects %"PRIu64"",
+ (uint64_t)max_objs, (uint64_t)(len / total_elt_sz));
+ otx2_npa_dbg("L1D set distribution :");
+ for (i = 0; i < OTX2_L1D_NB_SETS; i++)
+ otx2_npa_dbg("set[%d] : objects : %"PRIu64"", i,
+ distribution[i]);
+
+ npa_lf_aura_op_range_set(mp->pool_id, iova, iova + len);
+
+ if (npa_lf_aura_range_update_check(mp->pool_id) < 0)
+ return -EBUSY;
+
+ return rte_mempool_op_populate_helper(mp,
+ RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ,
+ max_objs, vaddr, iova, len,
+ obj_cb, obj_cb_arg);
+}
+
+static struct rte_mempool_ops otx2_npa_ops = {
+ .name = "octeontx2_npa",
+ .alloc = otx2_npa_alloc,
+ .free = otx2_npa_free,
+ .enqueue = otx2_npa_enq,
+ .get_count = otx2_npa_get_count,
+ .calc_mem_size = otx2_npa_calc_mem_size,
+ .populate = otx2_npa_populate,
+#if defined(RTE_ARCH_ARM64)
+ .dequeue = otx2_npa_deq_arm64,
+#else
+ .dequeue = otx2_npa_deq,
+#endif
+};
+
+MEMPOOL_REGISTER_OPS(otx2_npa_ops);
diff --git a/src/spdk/dpdk/drivers/mempool/octeontx2/rte_mempool_octeontx2_version.map b/src/spdk/dpdk/drivers/mempool/octeontx2/rte_mempool_octeontx2_version.map
new file mode 100644
index 000000000..8691efdfd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/octeontx2/rte_mempool_octeontx2_version.map
@@ -0,0 +1,10 @@
+DPDK_20.0 {
+ local: *;
+};
+
+INTERNAL {
+ global:
+
+ otx2_npa_lf_fini;
+ otx2_npa_lf_init;
+};
diff --git a/src/spdk/dpdk/drivers/mempool/ring/Makefile b/src/spdk/dpdk/drivers/mempool/ring/Makefile
new file mode 100644
index 000000000..8624502da
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/ring/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_ring.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+
+EXPORT_MAP := rte_mempool_ring_version.map
+
+SRCS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += rte_mempool_ring.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/mempool/ring/meson.build b/src/spdk/dpdk/drivers/mempool/ring/meson.build
new file mode 100644
index 000000000..a021e908c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/ring/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = files('rte_mempool_ring.c')
diff --git a/src/spdk/dpdk/drivers/mempool/ring/rte_mempool_ring.c b/src/spdk/dpdk/drivers/mempool/ring/rte_mempool_ring.c
new file mode 100644
index 000000000..bc123fc52
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/ring/rte_mempool_ring.c
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_errno.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+
+static int
+common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+{
+ return rte_ring_mp_enqueue_bulk(mp->pool_data,
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+{
+ return rte_ring_sp_enqueue_bulk(mp->pool_data,
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+ return rte_ring_mc_dequeue_bulk(mp->pool_data,
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+ return rte_ring_sc_dequeue_bulk(mp->pool_data,
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static unsigned
+common_ring_get_count(const struct rte_mempool *mp)
+{
+ return rte_ring_count(mp->pool_data);
+}
+
+
+static int
+common_ring_alloc(struct rte_mempool *mp)
+{
+ int rg_flags = 0, ret;
+ char rg_name[RTE_RING_NAMESIZE];
+ struct rte_ring *r;
+
+ ret = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT, mp->name);
+ if (ret < 0 || ret >= (int)sizeof(rg_name)) {
+ rte_errno = ENAMETOOLONG;
+ return -rte_errno;
+ }
+
+ /* ring flags */
+ if (mp->flags & MEMPOOL_F_SP_PUT)
+ rg_flags |= RING_F_SP_ENQ;
+ if (mp->flags & MEMPOOL_F_SC_GET)
+ rg_flags |= RING_F_SC_DEQ;
+
+ /*
+ * Allocate the ring that will be used to store objects.
+ * Ring functions will return appropriate errors if we are
+ * running as a secondary process etc., so no checks made
+ * in this function for that condition.
+ */
+ r = rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
+ mp->socket_id, rg_flags);
+ if (r == NULL)
+ return -rte_errno;
+
+ mp->pool_data = r;
+
+ return 0;
+}
+
+static void
+common_ring_free(struct rte_mempool *mp)
+{
+ rte_ring_free(mp->pool_data);
+}
+
+/*
+ * The following 4 declarations of mempool ops structs address
+ * the need for the backward compatible mempool handlers for
+ * single/multi producers and single/multi consumers as dictated by the
+ * flags provided to the rte_mempool_create function
+ */
+static const struct rte_mempool_ops ops_mp_mc = {
+ .name = "ring_mp_mc",
+ .alloc = common_ring_alloc,
+ .free = common_ring_free,
+ .enqueue = common_ring_mp_enqueue,
+ .dequeue = common_ring_mc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
+static const struct rte_mempool_ops ops_sp_sc = {
+ .name = "ring_sp_sc",
+ .alloc = common_ring_alloc,
+ .free = common_ring_free,
+ .enqueue = common_ring_sp_enqueue,
+ .dequeue = common_ring_sc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
+static const struct rte_mempool_ops ops_mp_sc = {
+ .name = "ring_mp_sc",
+ .alloc = common_ring_alloc,
+ .free = common_ring_free,
+ .enqueue = common_ring_mp_enqueue,
+ .dequeue = common_ring_sc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
+static const struct rte_mempool_ops ops_sp_mc = {
+ .name = "ring_sp_mc",
+ .alloc = common_ring_alloc,
+ .free = common_ring_free,
+ .enqueue = common_ring_sp_enqueue,
+ .dequeue = common_ring_mc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
+MEMPOOL_REGISTER_OPS(ops_mp_mc);
+MEMPOOL_REGISTER_OPS(ops_sp_sc);
+MEMPOOL_REGISTER_OPS(ops_mp_sc);
+MEMPOOL_REGISTER_OPS(ops_sp_mc);
diff --git a/src/spdk/dpdk/drivers/mempool/ring/rte_mempool_ring_version.map b/src/spdk/dpdk/drivers/mempool/ring/rte_mempool_ring_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/ring/rte_mempool_ring_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/mempool/stack/Makefile b/src/spdk/dpdk/drivers/mempool/stack/Makefile
new file mode 100644
index 000000000..ccfd97040
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/stack/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_stack.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# Headers
+CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
+LDLIBS += -lrte_eal -lrte_mempool -lrte_stack
+
+EXPORT_MAP := rte_mempool_stack_version.map
+
+SRCS-$(CONFIG_RTE_DRIVER_MEMPOOL_STACK) += rte_mempool_stack.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/mempool/stack/meson.build b/src/spdk/dpdk/drivers/mempool/stack/meson.build
new file mode 100644
index 000000000..580dde79e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/stack/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2019 Intel Corporation
+
+sources = files('rte_mempool_stack.c')
+
+deps += ['stack']
diff --git a/src/spdk/dpdk/drivers/mempool/stack/rte_mempool_stack.c b/src/spdk/dpdk/drivers/mempool/stack/rte_mempool_stack.c
new file mode 100644
index 000000000..7e85c8d6b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/stack/rte_mempool_stack.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2019 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_stack.h>
+
+static int
+__stack_alloc(struct rte_mempool *mp, uint32_t flags)
+{
+ char name[RTE_STACK_NAMESIZE];
+ struct rte_stack *s;
+ int ret;
+
+ ret = snprintf(name, sizeof(name),
+ RTE_MEMPOOL_MZ_FORMAT, mp->name);
+ if (ret < 0 || ret >= (int)sizeof(name)) {
+ rte_errno = ENAMETOOLONG;
+ return -rte_errno;
+ }
+
+ s = rte_stack_create(name, mp->size, mp->socket_id, flags);
+ if (s == NULL)
+ return -rte_errno;
+
+ mp->pool_data = s;
+
+ return 0;
+}
+
+static int
+stack_alloc(struct rte_mempool *mp)
+{
+ return __stack_alloc(mp, 0);
+}
+
+static int
+lf_stack_alloc(struct rte_mempool *mp)
+{
+ return __stack_alloc(mp, RTE_STACK_F_LF);
+}
+
+static int
+stack_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n)
+{
+ struct rte_stack *s = mp->pool_data;
+
+ return rte_stack_push(s, obj_table, n) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+stack_dequeue(struct rte_mempool *mp, void **obj_table,
+ unsigned int n)
+{
+ struct rte_stack *s = mp->pool_data;
+
+ return rte_stack_pop(s, obj_table, n) == 0 ? -ENOBUFS : 0;
+}
+
+static unsigned
+stack_get_count(const struct rte_mempool *mp)
+{
+ struct rte_stack *s = mp->pool_data;
+
+ return rte_stack_count(s);
+}
+
+static void
+stack_free(struct rte_mempool *mp)
+{
+ struct rte_stack *s = mp->pool_data;
+
+ rte_stack_free(s);
+}
+
+static struct rte_mempool_ops ops_stack = {
+ .name = "stack",
+ .alloc = stack_alloc,
+ .free = stack_free,
+ .enqueue = stack_enqueue,
+ .dequeue = stack_dequeue,
+ .get_count = stack_get_count
+};
+
+static struct rte_mempool_ops ops_lf_stack = {
+ .name = "lf_stack",
+ .alloc = lf_stack_alloc,
+ .free = stack_free,
+ .enqueue = stack_enqueue,
+ .dequeue = stack_dequeue,
+ .get_count = stack_get_count
+};
+
+MEMPOOL_REGISTER_OPS(ops_stack);
+MEMPOOL_REGISTER_OPS(ops_lf_stack);
diff --git a/src/spdk/dpdk/drivers/mempool/stack/rte_mempool_stack_version.map b/src/spdk/dpdk/drivers/mempool/stack/rte_mempool_stack_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/mempool/stack/rte_mempool_stack_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};