summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/bus
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/spdk/dpdk/drivers/bus
parentInitial commit. (diff)
downloadceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz
ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/bus')
-rw-r--r--src/spdk/dpdk/drivers/bus/Makefile15
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/Makefile52
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman.c581
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c611
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/fman/netcfg_layer.c160
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/fman/of.c587
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.c361
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.h541
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_driver.c290
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_priv.h92
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_alloc.c71
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.c103
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.h28
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/process.c298
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.c2755
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.h913
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_driver.c362
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_priv.h278
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/dpaa_bus.c646
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/compat.h384
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_bits.h39
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_list.h75
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_rbtree.h117
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fman.h425
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fsl_bman.h342
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman.h154
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman_crc64.h230
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fsl_qman.h2057
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fsl_usd.h78
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/netcfg.h63
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/of.h159
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/process.h77
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/meson.build29
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/rte_bus_dpaa_version.map104
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_bus.h192
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_logs.h98
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/Makefile48
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/fslmc_bus.c528
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/fslmc_logs.h41
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/fslmc_vfio.c789
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/fslmc_vfio.h54
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/dpbp.c346
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/dpci.c440
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/dpcon.c331
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/dpdmai.c429
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/dpio.c470
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/dpmng.c84
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpbp.h95
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h98
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpci.h220
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpci_cmd.h132
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpcon.h89
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h75
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpdmai.h189
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h107
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpio.h132
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpio_cmd.h115
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpmng.h54
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h35
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_mc_cmd.h175
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_mc_sys.h63
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/mc/mc_sys.c84
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/meson.build27
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c123
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c187
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c527
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h52
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h379
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/include/compat.h102
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h132
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h30
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h1186
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_debug.c66
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_portal.c1425
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_portal.h144
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_sys.h380
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_sys_decl.h53
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/rte_bus_fslmc_version.map118
-rw-r--r--src/spdk/dpdk/drivers/bus/fslmc/rte_fslmc.h216
-rw-r--r--src/spdk/dpdk/drivers/bus/ifpga/Makefile32
-rw-r--r--src/spdk/dpdk/drivers/bus/ifpga/ifpga_bus.c467
-rw-r--r--src/spdk/dpdk/drivers/bus/ifpga/ifpga_common.c88
-rw-r--r--src/spdk/dpdk/drivers/bus/ifpga/ifpga_common.h18
-rw-r--r--src/spdk/dpdk/drivers/bus/ifpga/ifpga_logs.h31
-rw-r--r--src/spdk/dpdk/drivers/bus/ifpga/meson.build8
-rw-r--r--src/spdk/dpdk/drivers/bus/ifpga/rte_bus_ifpga.h149
-rw-r--r--src/spdk/dpdk/drivers/bus/ifpga/rte_bus_ifpga_version.map10
-rw-r--r--src/spdk/dpdk/drivers/bus/meson.build7
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/Makefile38
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/bsd/Makefile4
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/bsd/pci.c651
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/linux/Makefile8
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/linux/pci.c926
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/linux/pci_init.h88
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/linux/pci_uio.c562
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/linux/pci_vfio.c794
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/meson.build19
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/pci_common.c443
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/pci_common_uio.c206
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/private.h169
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/rte_bus_pci.h315
-rw-r--r--src/spdk/dpdk/drivers/bus/pci/rte_bus_pci_version.map18
-rw-r--r--src/spdk/dpdk/drivers/bus/vdev/Makefile30
-rw-r--r--src/spdk/dpdk/drivers/bus/vdev/meson.build7
-rw-r--r--src/spdk/dpdk/drivers/bus/vdev/rte_bus_vdev.h162
-rw-r--r--src/spdk/dpdk/drivers/bus/vdev/rte_bus_vdev_version.map18
-rw-r--r--src/spdk/dpdk/drivers/bus/vdev/vdev.c549
-rw-r--r--src/spdk/dpdk/drivers/bus/vdev/vdev_logs.h16
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/Makefile36
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/linux/Makefile3
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/linux/vmbus_bus.c355
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/linux/vmbus_uio.c398
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/meson.build18
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/private.h132
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/rte_bus_vmbus.h407
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/rte_bus_vmbus_version.map29
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/rte_vmbus_reg.h344
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/vmbus_bufring.c244
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/vmbus_channel.c405
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/vmbus_common.c286
-rw-r--r--src/spdk/dpdk/drivers/bus/vmbus/vmbus_common_uio.c232
121 files changed, 32459 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/bus/Makefile b/src/spdk/dpdk/drivers/bus/Makefile
new file mode 100644
index 00000000..cea3b55e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += dpaa
+ifeq ($(CONFIG_RTE_EAL_VFIO),y)
+DIRS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga
+DIRS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci
+DIRS-$(CONFIG_RTE_LIBRTE_VDEV_BUS) += vdev
+DIRS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/Makefile b/src/spdk/dpdk/drivers/bus/dpaa/Makefile
new file mode 100644
index 00000000..bffaa9d9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/Makefile
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+RTE_BUS_DPAA=$(RTE_SDK)/drivers/bus/dpaa
+
+#
+# library name
+#
+LIB = librte_bus_dpaa.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -Wno-pointer-arith
+CFLAGS += -Wno-cast-qual
+CFLAGS += -D _GNU_SOURCE
+CFLAGS += -I$(RTE_BUS_DPAA)/
+CFLAGS += -I$(RTE_BUS_DPAA)/include
+CFLAGS += -I$(RTE_BUS_DPAA)/base/qbman
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+
+# versioning export map
+EXPORT_MAP := rte_bus_dpaa_version.map
+
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
+ dpaa_bus.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
+ base/fman/fman.c \
+ base/fman/fman_hw.c \
+ base/fman/of.c \
+ base/fman/netcfg_layer.c \
+ base/qbman/process.c \
+ base/qbman/bman.c \
+ base/qbman/bman_driver.c \
+ base/qbman/qman.c \
+ base/qbman/qman_driver.c \
+ base/qbman/dpaa_alloc.c \
+ base/qbman/dpaa_sys.c
+
+# Link Pthread
+LDLIBS += -lpthread
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman.c b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman.c
new file mode 100644
index 00000000..bdb70042
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman.c
@@ -0,0 +1,581 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <ifaddrs.h>
+
+/* This header declares the driver interface we implement */
+#include <fman.h>
+#include <of.h>
+#include <rte_dpaa_logs.h>
+
+#define QMI_PORT_REGS_OFFSET 0x400
+
+/* CCSR map address to access ccsr based register */
+void *fman_ccsr_map;
+/* fman version info */
+u16 fman_ip_rev;
+static int get_once;
+u32 fman_dealloc_bufs_mask_hi;
+u32 fman_dealloc_bufs_mask_lo;
+
+int fman_ccsr_map_fd = -1;
+static COMPAT_LIST_HEAD(__ifs);
+
+/* This is the (const) global variable that callers have read-only access to.
+ * Internally, we have read-write access directly to __ifs.
+ */
+const struct list_head *fman_if_list = &__ifs;
+
+static void
+if_destructor(struct __fman_if *__if)
+{
+ struct fman_if_bpool *bp, *tmpbp;
+
+ if (!__if)
+ return;
+
+ if (__if->__if.mac_type == fman_offline)
+ goto cleanup;
+
+ list_for_each_entry_safe(bp, tmpbp, &__if->__if.bpool_list, node) {
+ list_del(&bp->node);
+ free(bp);
+ }
+cleanup:
+ free(__if);
+}
+
+static int
+fman_get_ip_rev(const struct device_node *fman_node)
+{
+ const uint32_t *fman_addr;
+ uint64_t phys_addr;
+ uint64_t regs_size;
+ uint32_t ip_rev_1;
+ int _errno;
+
+ fman_addr = of_get_address(fman_node, 0, &regs_size, NULL);
+ if (!fman_addr) {
+ pr_err("of_get_address cannot return fman address\n");
+ return -EINVAL;
+ }
+ phys_addr = of_translate_address(fman_node, fman_addr);
+ if (!phys_addr) {
+ pr_err("of_translate_address failed\n");
+ return -EINVAL;
+ }
+ fman_ccsr_map = mmap(NULL, regs_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fman_ccsr_map_fd, phys_addr);
+ if (fman_ccsr_map == MAP_FAILED) {
+ pr_err("Can not map FMan ccsr base");
+ return -EINVAL;
+ }
+
+ ip_rev_1 = in_be32(fman_ccsr_map + FMAN_IP_REV_1);
+ fman_ip_rev = (ip_rev_1 & FMAN_IP_REV_1_MAJOR_MASK) >>
+ FMAN_IP_REV_1_MAJOR_SHIFT;
+
+ _errno = munmap(fman_ccsr_map, regs_size);
+ if (_errno)
+ pr_err("munmap() of FMan ccsr failed");
+
+ return 0;
+}
+
+static int
+fman_get_mac_index(uint64_t regs_addr_host, uint8_t *mac_idx)
+{
+ int ret = 0;
+
+ /*
+ * MAC1 : E_0000h
+ * MAC2 : E_2000h
+ * MAC3 : E_4000h
+ * MAC4 : E_6000h
+ * MAC5 : E_8000h
+ * MAC6 : E_A000h
+ * MAC7 : E_C000h
+ * MAC8 : E_E000h
+ * MAC9 : F_0000h
+ * MAC10: F_2000h
+ */
+ switch (regs_addr_host) {
+ case 0xE0000:
+ *mac_idx = 1;
+ break;
+ case 0xE2000:
+ *mac_idx = 2;
+ break;
+ case 0xE4000:
+ *mac_idx = 3;
+ break;
+ case 0xE6000:
+ *mac_idx = 4;
+ break;
+ case 0xE8000:
+ *mac_idx = 5;
+ break;
+ case 0xEA000:
+ *mac_idx = 6;
+ break;
+ case 0xEC000:
+ *mac_idx = 7;
+ break;
+ case 0xEE000:
+ *mac_idx = 8;
+ break;
+ case 0xF0000:
+ *mac_idx = 9;
+ break;
+ case 0xF2000:
+ *mac_idx = 10;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int
+fman_if_init(const struct device_node *dpa_node)
+{
+ const char *rprop, *mprop;
+ uint64_t phys_addr;
+ struct __fman_if *__if;
+ struct fman_if_bpool *bpool;
+
+ const phandle *mac_phandle, *ports_phandle, *pools_phandle;
+ const phandle *tx_channel_id = NULL, *mac_addr, *cell_idx;
+ const phandle *rx_phandle, *tx_phandle;
+ uint64_t tx_phandle_host[4] = {0};
+ uint64_t rx_phandle_host[4] = {0};
+ uint64_t regs_addr_host = 0;
+ uint64_t cell_idx_host = 0;
+
+ const struct device_node *mac_node = NULL, *tx_node;
+ const struct device_node *pool_node, *fman_node, *rx_node;
+ const uint32_t *regs_addr = NULL;
+ const char *mname, *fname;
+ const char *dname = dpa_node->full_name;
+ size_t lenp;
+ int _errno;
+ const char *char_prop;
+ uint32_t na;
+
+ if (of_device_is_available(dpa_node) == false)
+ return 0;
+
+ rprop = "fsl,qman-frame-queues-rx";
+ mprop = "fsl,fman-mac";
+
+ /* Allocate an object for this network interface */
+ __if = malloc(sizeof(*__if));
+ if (!__if) {
+ FMAN_ERR(-ENOMEM, "malloc(%zu)\n", sizeof(*__if));
+ goto err;
+ }
+ memset(__if, 0, sizeof(*__if));
+ INIT_LIST_HEAD(&__if->__if.bpool_list);
+ strncpy(__if->node_path, dpa_node->full_name, PATH_MAX - 1);
+ __if->node_path[PATH_MAX - 1] = '\0';
+
+ /* Obtain the MAC node used by this interface except macless */
+ mac_phandle = of_get_property(dpa_node, mprop, &lenp);
+ if (!mac_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no %s\n", dname, mprop);
+ goto err;
+ }
+ assert(lenp == sizeof(phandle));
+ mac_node = of_find_node_by_phandle(*mac_phandle);
+ if (!mac_node) {
+ FMAN_ERR(-ENXIO, "%s: bad 'fsl,fman-mac\n", dname);
+ goto err;
+ }
+ mname = mac_node->full_name;
+
+ /* Map the CCSR regs for the MAC node */
+ regs_addr = of_get_address(mac_node, 0, &__if->regs_size, NULL);
+ if (!regs_addr) {
+ FMAN_ERR(-EINVAL, "of_get_address(%s)\n", mname);
+ goto err;
+ }
+ phys_addr = of_translate_address(mac_node, regs_addr);
+ if (!phys_addr) {
+ FMAN_ERR(-EINVAL, "of_translate_address(%s, %p)\n",
+ mname, regs_addr);
+ goto err;
+ }
+ __if->ccsr_map = mmap(NULL, __if->regs_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ fman_ccsr_map_fd, phys_addr);
+ if (__if->ccsr_map == MAP_FAILED) {
+ FMAN_ERR(-errno, "mmap(0x%"PRIx64")\n", phys_addr);
+ goto err;
+ }
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues). Convert to host byte order */
+ regs_addr_host = of_read_number(regs_addr, na);
+
+
+ /* Get the index of the Fman this i/f belongs to */
+ fman_node = of_get_parent(mac_node);
+ na = of_n_addr_cells(mac_node);
+ if (!fman_node) {
+ FMAN_ERR(-ENXIO, "of_get_parent(%s)\n", mname);
+ goto err;
+ }
+ fname = fman_node->full_name;
+ cell_idx = of_get_property(fman_node, "cell-index", &lenp);
+ if (!cell_idx) {
+ FMAN_ERR(-ENXIO, "%s: no cell-index)\n", fname);
+ goto err;
+ }
+ assert(lenp == sizeof(*cell_idx));
+ cell_idx_host = of_read_number(cell_idx, lenp / sizeof(phandle));
+ __if->__if.fman_idx = cell_idx_host;
+ if (!get_once) {
+ _errno = fman_get_ip_rev(fman_node);
+ if (_errno) {
+ FMAN_ERR(-ENXIO, "%s: ip_rev is not available\n",
+ fname);
+ goto err;
+ }
+ }
+
+ if (fman_ip_rev >= FMAN_V3) {
+ /*
+ * Set A2V, OVOM, EBD bits in contextA to allow external
+ * buffer deallocation by fman.
+ */
+ fman_dealloc_bufs_mask_hi = FMAN_V3_CONTEXTA_EN_A2V |
+ FMAN_V3_CONTEXTA_EN_OVOM;
+ fman_dealloc_bufs_mask_lo = FMAN_V3_CONTEXTA_EN_EBD;
+ } else {
+ fman_dealloc_bufs_mask_hi = 0;
+ fman_dealloc_bufs_mask_lo = 0;
+ }
+ /* Is the MAC node 1G, 10G? */
+ __if->__if.is_memac = 0;
+
+ if (of_device_is_compatible(mac_node, "fsl,fman-1g-mac"))
+ __if->__if.mac_type = fman_mac_1g;
+ else if (of_device_is_compatible(mac_node, "fsl,fman-10g-mac"))
+ __if->__if.mac_type = fman_mac_10g;
+ else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
+ __if->__if.is_memac = 1;
+ char_prop = of_get_property(mac_node, "phy-connection-type",
+ NULL);
+ if (!char_prop) {
+ printf("memac: unknown MII type assuming 1G\n");
+ /* Right now forcing memac to 1g in case of error*/
+ __if->__if.mac_type = fman_mac_1g;
+ } else {
+ if (strstr(char_prop, "sgmii"))
+ __if->__if.mac_type = fman_mac_1g;
+ else if (strstr(char_prop, "rgmii")) {
+ __if->__if.mac_type = fman_mac_1g;
+ __if->__if.is_rgmii = 1;
+ } else if (strstr(char_prop, "xgmii"))
+ __if->__if.mac_type = fman_mac_10g;
+ }
+ } else {
+ FMAN_ERR(-EINVAL, "%s: unknown MAC type\n", mname);
+ goto err;
+ }
+
+ /*
+ * For MAC ports, we cannot rely on cell-index. In
+ * T2080, two of the 10G ports on single FMAN have same
+ * duplicate cell-indexes as the other two 10G ports on
+ * same FMAN. Hence, we now rely upon addresses of the
+ * ports from device tree to deduce the index.
+ */
+
+ _errno = fman_get_mac_index(regs_addr_host, &__if->__if.mac_idx);
+ if (_errno) {
+ FMAN_ERR(-EINVAL, "Invalid register address: %" PRIx64,
+ regs_addr_host);
+ goto err;
+ }
+
+ /* Extract the MAC address for private and shared interfaces */
+ mac_addr = of_get_property(mac_node, "local-mac-address",
+ &lenp);
+ if (!mac_addr) {
+ FMAN_ERR(-EINVAL, "%s: no local-mac-address\n",
+ mname);
+ goto err;
+ }
+ memcpy(&__if->__if.mac_addr, mac_addr, ETHER_ADDR_LEN);
+
+ /* Extract the Tx port (it's the second of the two port handles)
+ * and get its channel ID
+ */
+ ports_phandle = of_get_property(mac_node, "fsl,port-handles",
+ &lenp);
+ if (!ports_phandle)
+ ports_phandle = of_get_property(mac_node, "fsl,fman-ports",
+ &lenp);
+ if (!ports_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,port-handles\n",
+ mname);
+ goto err;
+ }
+ assert(lenp == (2 * sizeof(phandle)));
+ tx_node = of_find_node_by_phandle(ports_phandle[1]);
+ if (!tx_node) {
+ FMAN_ERR(-ENXIO, "%s: bad fsl,port-handle[1]\n", mname);
+ goto err;
+ }
+ /* Extract the channel ID (from tx-port-handle) */
+ tx_channel_id = of_get_property(tx_node, "fsl,qman-channel-id",
+ &lenp);
+ if (!tx_channel_id) {
+ FMAN_ERR(-EINVAL, "%s: no fsl-qman-channel-id\n",
+ tx_node->full_name);
+ goto err;
+ }
+
+ rx_node = of_find_node_by_phandle(ports_phandle[0]);
+ if (!rx_node) {
+ FMAN_ERR(-ENXIO, "%s: bad fsl,port-handle[0]\n", mname);
+ goto err;
+ }
+ regs_addr = of_get_address(rx_node, 0, &__if->regs_size, NULL);
+ if (!regs_addr) {
+ FMAN_ERR(-EINVAL, "of_get_address(%s)\n", mname);
+ goto err;
+ }
+ phys_addr = of_translate_address(rx_node, regs_addr);
+ if (!phys_addr) {
+ FMAN_ERR(-EINVAL, "of_translate_address(%s, %p)\n",
+ mname, regs_addr);
+ goto err;
+ }
+ __if->bmi_map = mmap(NULL, __if->regs_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ fman_ccsr_map_fd, phys_addr);
+ if (__if->bmi_map == MAP_FAILED) {
+ FMAN_ERR(-errno, "mmap(0x%"PRIx64")\n", phys_addr);
+ goto err;
+ }
+
+ /* No channel ID for MAC-less */
+ assert(lenp == sizeof(*tx_channel_id));
+ na = of_n_addr_cells(mac_node);
+ __if->__if.tx_channel_id = of_read_number(tx_channel_id, na);
+
+ /* Extract the Rx FQIDs. (Note, the device representation is silly,
+ * there are "counts" that must always be 1.)
+ */
+ rx_phandle = of_get_property(dpa_node, rprop, &lenp);
+ if (!rx_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,qman-frame-queues-rx\n", dname);
+ goto err;
+ }
+
+ assert(lenp == (4 * sizeof(phandle)));
+
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues). Convert to host byte order */
+ rx_phandle_host[0] = of_read_number(&rx_phandle[0], na);
+ rx_phandle_host[1] = of_read_number(&rx_phandle[1], na);
+ rx_phandle_host[2] = of_read_number(&rx_phandle[2], na);
+ rx_phandle_host[3] = of_read_number(&rx_phandle[3], na);
+
+ assert((rx_phandle_host[1] == 1) && (rx_phandle_host[3] == 1));
+ __if->__if.fqid_rx_err = rx_phandle_host[0];
+ __if->__if.fqid_rx_def = rx_phandle_host[2];
+
+ /* Extract the Tx FQIDs */
+ tx_phandle = of_get_property(dpa_node,
+ "fsl,qman-frame-queues-tx", &lenp);
+ if (!tx_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,qman-frame-queues-tx\n", dname);
+ goto err;
+ }
+
+ assert(lenp == (4 * sizeof(phandle)));
+ /*TODO: Fix for other cases also */
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues). Convert to host byte order */
+ tx_phandle_host[0] = of_read_number(&tx_phandle[0], na);
+ tx_phandle_host[1] = of_read_number(&tx_phandle[1], na);
+ tx_phandle_host[2] = of_read_number(&tx_phandle[2], na);
+ tx_phandle_host[3] = of_read_number(&tx_phandle[3], na);
+ assert((tx_phandle_host[1] == 1) && (tx_phandle_host[3] == 1));
+ __if->__if.fqid_tx_err = tx_phandle_host[0];
+ __if->__if.fqid_tx_confirm = tx_phandle_host[2];
+
+ /* Obtain the buffer pool nodes used by this interface */
+ pools_phandle = of_get_property(dpa_node, "fsl,bman-buffer-pools",
+ &lenp);
+ if (!pools_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,bman-buffer-pools\n", dname);
+ goto err;
+ }
+ /* For each pool, parse the corresponding node and add a pool object
+ * to the interface's "bpool_list"
+ */
+ assert(lenp && !(lenp % sizeof(phandle)));
+ while (lenp) {
+ size_t proplen;
+ const phandle *prop;
+ uint64_t bpid_host = 0;
+ uint64_t bpool_host[6] = {0};
+ const char *pname;
+ /* Allocate an object for the pool */
+ bpool = malloc(sizeof(*bpool));
+ if (!bpool) {
+ FMAN_ERR(-ENOMEM, "malloc(%zu)\n", sizeof(*bpool));
+ goto err;
+ }
+ /* Find the pool node */
+ pool_node = of_find_node_by_phandle(*pools_phandle);
+ if (!pool_node) {
+ FMAN_ERR(-ENXIO, "%s: bad fsl,bman-buffer-pools\n",
+ dname);
+ free(bpool);
+ goto err;
+ }
+ pname = pool_node->full_name;
+ /* Extract the BPID property */
+ prop = of_get_property(pool_node, "fsl,bpid", &proplen);
+ if (!prop) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,bpid\n", pname);
+ free(bpool);
+ goto err;
+ }
+ assert(proplen == sizeof(*prop));
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues).
+ * Convert to host byte-order
+ */
+ bpid_host = of_read_number(prop, na);
+ bpool->bpid = bpid_host;
+ /* Extract the cfg property (count/size/addr). "fsl,bpool-cfg"
+ * indicates for the Bman driver to seed the pool.
+ * "fsl,bpool-ethernet-cfg" is used by the network driver. The
+ * two are mutually exclusive, so check for either of them.
+ */
+ prop = of_get_property(pool_node, "fsl,bpool-cfg",
+ &proplen);
+ if (!prop)
+ prop = of_get_property(pool_node,
+ "fsl,bpool-ethernet-cfg",
+ &proplen);
+ if (!prop) {
+ /* It's OK for there to be no bpool-cfg */
+ bpool->count = bpool->size = bpool->addr = 0;
+ } else {
+ assert(proplen == (6 * sizeof(*prop)));
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues).
+ * Convert to host byte order
+ */
+ bpool_host[0] = of_read_number(&prop[0], na);
+ bpool_host[1] = of_read_number(&prop[1], na);
+ bpool_host[2] = of_read_number(&prop[2], na);
+ bpool_host[3] = of_read_number(&prop[3], na);
+ bpool_host[4] = of_read_number(&prop[4], na);
+ bpool_host[5] = of_read_number(&prop[5], na);
+
+ bpool->count = ((uint64_t)bpool_host[0] << 32) |
+ bpool_host[1];
+ bpool->size = ((uint64_t)bpool_host[2] << 32) |
+ bpool_host[3];
+ bpool->addr = ((uint64_t)bpool_host[4] << 32) |
+ bpool_host[5];
+ }
+ /* Parsing of the pool is complete, add it to the interface
+ * list.
+ */
+ list_add_tail(&bpool->node, &__if->__if.bpool_list);
+ lenp -= sizeof(phandle);
+ pools_phandle++;
+ }
+
+ /* Parsing of the network interface is complete, add it to the list */
+ DPAA_BUS_LOG(DEBUG, "Found %s, Tx Channel = %x, FMAN = %x,"
+ "Port ID = %x",
+ dname, __if->__if.tx_channel_id, __if->__if.fman_idx,
+ __if->__if.mac_idx);
+
+ list_add_tail(&__if->__if.node, &__ifs);
+ return 0;
+err:
+ if_destructor(__if);
+ return _errno;
+}
+
+int
+fman_init(void)
+{
+ const struct device_node *dpa_node;
+ int _errno;
+
+ /* If multiple dependencies try to initialise the Fman driver, don't
+ * panic.
+ */
+ if (fman_ccsr_map_fd != -1)
+ return 0;
+
+ fman_ccsr_map_fd = open(FMAN_DEVICE_PATH, O_RDWR);
+ if (unlikely(fman_ccsr_map_fd < 0)) {
+ DPAA_BUS_LOG(ERR, "Unable to open (/dev/mem)");
+ return fman_ccsr_map_fd;
+ }
+
+ for_each_compatible_node(dpa_node, NULL, "fsl,dpa-ethernet-init") {
+ _errno = fman_if_init(dpa_node);
+ if (_errno) {
+ FMAN_ERR(_errno, "if_init(%s)\n", dpa_node->full_name);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ fman_finish();
+ return _errno;
+}
+
+void
+fman_finish(void)
+{
+ struct __fman_if *__if, *tmpif;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ list_for_each_entry_safe(__if, tmpif, &__ifs, __if.node) {
+ int _errno;
+
+ /* disable Rx and Tx */
+ if ((__if->__if.mac_type == fman_mac_1g) &&
+ (!__if->__if.is_memac))
+ out_be32(__if->ccsr_map + 0x100,
+ in_be32(__if->ccsr_map + 0x100) & ~(u32)0x5);
+ else
+ out_be32(__if->ccsr_map + 8,
+ in_be32(__if->ccsr_map + 8) & ~(u32)3);
+ /* release the mapping */
+ _errno = munmap(__if->ccsr_map, __if->regs_size);
+ if (unlikely(_errno < 0))
+ fprintf(stderr, "%s:%d:%s(): munmap() = %d (%s)\n",
+ __FILE__, __LINE__, __func__,
+ -errno, strerror(errno));
+ printf("Tearing down %s\n", __if->node_path);
+ list_del(&__if->__if.node);
+ free(__if);
+ }
+
+ close(fman_ccsr_map_fd);
+ fman_ccsr_map_fd = -1;
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c
new file mode 100644
index 00000000..4ebbc3d3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -0,0 +1,611 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <ifaddrs.h>
+#include <fman.h>
+/* This header declares things about Fman hardware itself (the format of status
+ * words and an inline implementation of CRC64). We include it only in order to
+ * instantiate the one global variable it depends on.
+ */
+#include <fsl_fman.h>
+#include <fsl_fman_crc64.h>
+#include <fsl_bman.h>
+
+#define FMAN_SP_SG_DISABLE 0x80000000
+#define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16
+
+/* Instantiate the global variable that the inline CRC64 implementation (in
+ * <fsl_fman.h>) depends on.
+ */
+DECLARE_FMAN_CRC64_TABLE();
+
+#define ETH_ADDR_TO_UINT64(eth_addr) \
+ (uint64_t)(((uint64_t)(eth_addr)[0] << 40) | \
+ ((uint64_t)(eth_addr)[1] << 32) | \
+ ((uint64_t)(eth_addr)[2] << 24) | \
+ ((uint64_t)(eth_addr)[3] << 16) | \
+ ((uint64_t)(eth_addr)[4] << 8) | \
+ ((uint64_t)(eth_addr)[5]))
+
+void
+fman_if_set_mcast_filter_table(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *hashtable_ctrl;
+ uint32_t i;
+
+ hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
+ for (i = 0; i < 64; i++)
+ out_be32(hashtable_ctrl, i|HASH_CTRL_MCAST_EN);
+}
+
+void
+fman_if_reset_mcast_filter_table(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *hashtable_ctrl;
+ uint32_t i;
+
+ hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
+ for (i = 0; i < 64; i++)
+ out_be32(hashtable_ctrl, i & ~HASH_CTRL_MCAST_EN);
+}
+
+static
+uint32_t get_mac_hash_code(uint64_t eth_addr)
+{
+ uint64_t mask1, mask2;
+ uint32_t xorVal = 0;
+ uint8_t i, j;
+
+ for (i = 0; i < 6; i++) {
+ mask1 = eth_addr & (uint64_t)0x01;
+ eth_addr >>= 1;
+
+ for (j = 0; j < 7; j++) {
+ mask2 = eth_addr & (uint64_t)0x01;
+ mask1 ^= mask2;
+ eth_addr >>= 1;
+ }
+
+ xorVal |= (mask1 << (5 - i));
+ }
+
+ return xorVal;
+}
+
+int
+fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth)
+{
+ uint64_t eth_addr;
+ void *hashtable_ctrl;
+ uint32_t hash;
+
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ eth_addr = ETH_ADDR_TO_UINT64(eth);
+
+ if (!(eth_addr & GROUP_ADDRESS))
+ return -1;
+
+ hash = get_mac_hash_code(eth_addr) & HASH_CTRL_ADDR_MASK;
+ hash = hash | HASH_CTRL_MCAST_EN;
+
+ hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
+ out_be32(hashtable_ctrl, hash);
+
+ return 0;
+}
+
+int
+fman_if_get_primary_mac_addr(struct fman_if *p, uint8_t *eth)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *mac_reg =
+ &((struct memac_regs *)__if->ccsr_map)->mac_addr0.mac_addr_l;
+ u32 val = in_be32(mac_reg);
+
+ eth[0] = (val & 0x000000ff) >> 0;
+ eth[1] = (val & 0x0000ff00) >> 8;
+ eth[2] = (val & 0x00ff0000) >> 16;
+ eth[3] = (val & 0xff000000) >> 24;
+
+ mac_reg = &((struct memac_regs *)__if->ccsr_map)->mac_addr0.mac_addr_u;
+ val = in_be32(mac_reg);
+
+ eth[4] = (val & 0x000000ff) >> 0;
+ eth[5] = (val & 0x0000ff00) >> 8;
+
+ return 0;
+}
+
+void
+fman_if_clear_mac_addr(struct fman_if *p, uint8_t addr_num)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+ void *reg;
+
+ if (addr_num) {
+ reg = &((struct memac_regs *)m->ccsr_map)->
+ mac_addr[addr_num-1].mac_addr_l;
+ out_be32(reg, 0x0);
+ reg = &((struct memac_regs *)m->ccsr_map)->
+ mac_addr[addr_num-1].mac_addr_u;
+ out_be32(reg, 0x0);
+ } else {
+ reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_l;
+ out_be32(reg, 0x0);
+ reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_u;
+ out_be32(reg, 0x0);
+ }
+}
+
+int
+fman_if_add_mac_addr(struct fman_if *p, uint8_t *eth, uint8_t addr_num)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+
+ void *reg;
+ u32 val;
+
+ memcpy(&m->__if.mac_addr, eth, ETHER_ADDR_LEN);
+
+ if (addr_num)
+ reg = &((struct memac_regs *)m->ccsr_map)->
+ mac_addr[addr_num-1].mac_addr_l;
+ else
+ reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_l;
+
+ val = (m->__if.mac_addr.addr_bytes[0] |
+ (m->__if.mac_addr.addr_bytes[1] << 8) |
+ (m->__if.mac_addr.addr_bytes[2] << 16) |
+ (m->__if.mac_addr.addr_bytes[3] << 24));
+ out_be32(reg, val);
+
+ if (addr_num)
+ reg = &((struct memac_regs *)m->ccsr_map)->
+ mac_addr[addr_num-1].mac_addr_u;
+ else
+ reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_u;
+
+ val = ((m->__if.mac_addr.addr_bytes[4] << 0) |
+ (m->__if.mac_addr.addr_bytes[5] << 8));
+ out_be32(reg, val);
+
+ return 0;
+}
+
+void
+fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ u32 value = 0;
+ void *cmdcfg;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Set Rx Ignore Pause Frames */
+ cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
+ if (enable)
+ value = in_be32(cmdcfg) | CMD_CFG_PAUSE_IGNORE;
+ else
+ value = in_be32(cmdcfg) & ~CMD_CFG_PAUSE_IGNORE;
+
+ out_be32(cmdcfg, value);
+}
+
+void
+fman_if_conf_max_frame_len(struct fman_if *p, unsigned int max_frame_len)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ unsigned int *maxfrm;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Set Max frame length */
+ maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
+ out_be32(maxfrm, (MAXFRM_RX_MASK & max_frame_len));
+}
+
+void
+fman_if_stats_get(struct fman_if *p, struct rte_eth_stats *stats)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+ struct memac_regs *regs = m->ccsr_map;
+
+ /* read recved packet count */
+ stats->ipackets = ((u64)in_be32(&regs->rfrm_u)) << 32 |
+ in_be32(&regs->rfrm_l);
+ stats->ibytes = ((u64)in_be32(&regs->roct_u)) << 32 |
+ in_be32(&regs->roct_l);
+ stats->ierrors = ((u64)in_be32(&regs->rerr_u)) << 32 |
+ in_be32(&regs->rerr_l);
+
+ /* read xmited packet count */
+ stats->opackets = ((u64)in_be32(&regs->tfrm_u)) << 32 |
+ in_be32(&regs->tfrm_l);
+ stats->obytes = ((u64)in_be32(&regs->toct_u)) << 32 |
+ in_be32(&regs->toct_l);
+ stats->oerrors = ((u64)in_be32(&regs->terr_u)) << 32 |
+ in_be32(&regs->terr_l);
+}
+
+void
+fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+ struct memac_regs *regs = m->ccsr_map;
+ int i;
+ uint64_t base_offset = offsetof(struct memac_regs, reoct_l);
+
+ for (i = 0; i < n; i++)
+ value[i] = ((u64)in_be32((char *)regs
+ + base_offset + 8 * i + 4)) << 32 |
+ ((u64)in_be32((char *)regs
+ + base_offset + 8 * i));
+}
+
+void
+fman_if_stats_reset(struct fman_if *p)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+ struct memac_regs *regs = m->ccsr_map;
+ uint32_t tmp;
+
+ tmp = in_be32(&regs->statn_config);
+
+ tmp |= STATS_CFG_CLR;
+
+ out_be32(&regs->statn_config, tmp);
+
+ while (in_be32(&regs->statn_config) & STATS_CFG_CLR)
+ ;
+}
+
+void
+fman_if_promiscuous_enable(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *cmdcfg;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Enable Rx promiscuous mode */
+ cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
+ out_be32(cmdcfg, in_be32(cmdcfg) | CMD_CFG_PROMIS_EN);
+}
+
+void
+fman_if_promiscuous_disable(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *cmdcfg;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Disable Rx promiscuous mode */
+ cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
+ out_be32(cmdcfg, in_be32(cmdcfg) & (~CMD_CFG_PROMIS_EN));
+}
+
+void
+fman_if_enable_rx(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* enable Rx and Tx */
+ out_be32(__if->ccsr_map + 8, in_be32(__if->ccsr_map + 8) | 3);
+}
+
+void
+fman_if_disable_rx(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* only disable Rx, not Tx */
+ out_be32(__if->ccsr_map + 8, in_be32(__if->ccsr_map + 8) & ~(u32)2);
+}
+
+void
+fman_if_loopback_enable(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Enable loopback mode */
+ if ((__if->__if.is_memac) && (__if->__if.is_rgmii)) {
+ unsigned int *ifmode =
+ &((struct memac_regs *)__if->ccsr_map)->if_mode;
+ out_be32(ifmode, in_be32(ifmode) | IF_MODE_RLP);
+ } else{
+ unsigned int *cmdcfg =
+ &((struct memac_regs *)__if->ccsr_map)->command_config;
+ out_be32(cmdcfg, in_be32(cmdcfg) | CMD_CFG_LOOPBACK_EN);
+ }
+}
+
+void
+fman_if_loopback_disable(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+ /* Disable loopback mode */
+ if ((__if->__if.is_memac) && (__if->__if.is_rgmii)) {
+ unsigned int *ifmode =
+ &((struct memac_regs *)__if->ccsr_map)->if_mode;
+ out_be32(ifmode, in_be32(ifmode) & ~IF_MODE_RLP);
+ } else {
+ unsigned int *cmdcfg =
+ &((struct memac_regs *)__if->ccsr_map)->command_config;
+ out_be32(cmdcfg, in_be32(cmdcfg) & ~CMD_CFG_LOOPBACK_EN);
+ }
+}
+
+void
+fman_if_set_bp(struct fman_if *fm_if, unsigned num __always_unused,
+ int bpid, size_t bufsize)
+{
+ u32 fmbm_ebmpi;
+ u32 ebmpi_val_ace = 0xc0000000;
+ u32 ebmpi_mask = 0xffc00000;
+
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_ebmpi =
+ in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ebmpi[0]);
+ fmbm_ebmpi = ebmpi_val_ace | (fmbm_ebmpi & ebmpi_mask) | (bpid << 16) |
+ (bufsize);
+
+ out_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ebmpi[0],
+ fmbm_ebmpi);
+}
+
+int
+fman_if_get_fc_threshold(struct fman_if *fm_if)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_mpd;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_mpd = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_mpd;
+ return in_be32(fmbm_mpd);
+}
+
+int
+fman_if_set_fc_threshold(struct fman_if *fm_if, u32 high_water,
+ u32 low_water, u32 bpid)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_mpd;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_mpd = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_mpd;
+ out_be32(fmbm_mpd, FMAN_ENABLE_BPOOL_DEPLETION);
+ return bm_pool_set_hw_threshold(bpid, low_water, high_water);
+
+}
+
+int
+fman_if_get_fc_quanta(struct fman_if *fm_if)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ return in_be32(&((struct memac_regs *)__if->ccsr_map)->pause_quanta[0]);
+}
+
+int
+fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ out_be32(&((struct memac_regs *)__if->ccsr_map)->pause_quanta[0],
+ pause_quanta);
+ return 0;
+}
+
+int
+fman_if_get_fdoff(struct fman_if *fm_if)
+{
+ u32 fmbm_rebm;
+ int fdoff;
+
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
+
+ fdoff = (fmbm_rebm >> FMAN_SP_EXT_BUF_MARG_START_SHIFT) & 0x1ff;
+
+ return fdoff;
+}
+
+void
+fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ unsigned int *fmbm_refqid =
+ &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_refqid;
+ out_be32(fmbm_refqid, err_fqid);
+}
+
+int
+fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params *icp)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ int val = 0;
+ int iceof_mask = 0x001f0000;
+ int icsz_mask = 0x0000001f;
+ int iciof_mask = 0x00000f00;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ unsigned int *fmbm_ricp =
+ &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp;
+ val = in_be32(fmbm_ricp);
+
+ icp->iceof = (val & iceof_mask) >> 12;
+ icp->iciof = (val & iciof_mask) >> 4;
+ icp->icsz = (val & icsz_mask) << 4;
+
+ return 0;
+}
+
+int
+fman_if_set_ic_params(struct fman_if *fm_if,
+ const struct fman_if_ic_params *icp)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ int val = 0;
+ int iceof_mask = 0x001f0000;
+ int icsz_mask = 0x0000001f;
+ int iciof_mask = 0x00000f00;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ val |= (icp->iceof << 12) & iceof_mask;
+ val |= (icp->iciof << 4) & iciof_mask;
+ val |= (icp->icsz >> 4) & icsz_mask;
+
+ unsigned int *fmbm_ricp =
+ &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp;
+ out_be32(fmbm_ricp, val);
+
+ return 0;
+}
+
+void
+fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_rebm;
+ int val = 0;
+ int fmbm_mask = 0x01ff0000;
+
+ val = fd_offset << FMAN_SP_EXT_BUF_MARG_START_SHIFT;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm;
+
+ out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
+}
+
+void
+fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *reg_maxfrm;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ reg_maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
+
+ out_be32(reg_maxfrm, (in_be32(reg_maxfrm) & 0xFFFF0000) | max_frm);
+}
+
+uint16_t
+fman_if_get_maxfrm(struct fman_if *fm_if)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *reg_maxfrm;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ reg_maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
+
+ return (in_be32(reg_maxfrm) | 0x0000FFFF);
+}
+
+/* MSB in fmbm_rebm register
+ * 0 - If BMI cannot store the frame in a single buffer it may select a buffer
+ * of smaller size and store the frame in scatter gather (S/G) buffers
+ * 1 - Scatter gather format is not enabled for frame storage. If BMI cannot
+ * store the frame in a single buffer, the frame is discarded.
+ */
+
+int
+fman_if_get_sg_enable(struct fman_if *fm_if)
+{
+ u32 fmbm_rebm;
+
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
+
+ return (fmbm_rebm & FMAN_SP_SG_DISABLE) ? 0 : 1;
+}
+
+void
+fman_if_set_sg(struct fman_if *fm_if, int enable)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_rebm;
+ int val;
+ int fmbm_mask = FMAN_SP_SG_DISABLE;
+
+ if (enable)
+ val = 0;
+ else
+ val = FMAN_SP_SG_DISABLE;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm;
+
+ out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
+}
+
+void
+fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmqm_pndn;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmqm_pndn = &((struct fman_port_qmi_regs *)__if->qmi_map)->fmqm_pndn;
+
+ out_be32(fmqm_pndn, nia);
+}
+
+void
+fman_if_discard_rx_errors(struct fman_if *fm_if)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_rfsdm, *fmbm_rfsem;
+
+ fmbm_rfsem = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsem;
+ out_be32(fmbm_rfsem, 0);
+
+ /* Configure the discard mask to discard the error packets which have
+ * DMA errors, Frame size error, Header error etc. The mask 0x010CE3F0
+ * is to configured discard all the errors which come in the FD[STATUS]
+ */
+ fmbm_rfsdm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsdm;
+ out_be32(fmbm_rfsdm, 0x010CE3F0);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/fman/netcfg_layer.c b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/netcfg_layer.c
new file mode 100644
index 00000000..031c6f1a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/netcfg_layer.c
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+#include <inttypes.h>
+#include <of.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <error.h>
+#include <net/if_arp.h>
+#include <assert.h>
+#include <unistd.h>
+
+#include <rte_malloc.h>
+
+#include <rte_dpaa_logs.h>
+#include <netcfg.h>
+
+/* This data structure contaings all configurations information
+ * related to usages of DPA devices.
+ */
+struct netcfg_info *netcfg;
+/* fd to open a socket for making ioctl request to disable/enable shared
+ * interfaces.
+ */
+static int skfd = -1;
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+void
+dump_netcfg(struct netcfg_info *cfg_ptr)
+{
+ int i;
+
+ printf(".......... DPAA Configuration ..........\n\n");
+
+ /* Network interfaces */
+ printf("Network interfaces: %d\n", cfg_ptr->num_ethports);
+ for (i = 0; i < cfg_ptr->num_ethports; i++) {
+ struct fman_if_bpool *bpool;
+ struct fm_eth_port_cfg *p_cfg = &cfg_ptr->port_cfg[i];
+ struct fman_if *__if = p_cfg->fman_if;
+
+ printf("\n+ Fman %d, MAC %d (%s);\n",
+ __if->fman_idx, __if->mac_idx,
+ (__if->mac_type == fman_mac_1g) ? "1G" : "10G");
+
+ printf("\tmac_addr: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ (&__if->mac_addr)->addr_bytes[0],
+ (&__if->mac_addr)->addr_bytes[1],
+ (&__if->mac_addr)->addr_bytes[2],
+ (&__if->mac_addr)->addr_bytes[3],
+ (&__if->mac_addr)->addr_bytes[4],
+ (&__if->mac_addr)->addr_bytes[5]);
+
+ printf("\ttx_channel_id: 0x%02x\n",
+ __if->tx_channel_id);
+
+ printf("\tfqid_rx_def: 0x%x\n", p_cfg->rx_def);
+ printf("\tfqid_rx_err: 0x%x\n", __if->fqid_rx_err);
+
+ printf("\tfqid_tx_err: 0x%x\n", __if->fqid_tx_err);
+ printf("\tfqid_tx_confirm: 0x%x\n", __if->fqid_tx_confirm);
+ fman_if_for_each_bpool(bpool, __if)
+ printf("\tbuffer pool: (bpid=%d, count=%"PRId64
+ " size=%"PRId64", addr=0x%"PRIx64")\n",
+ bpool->bpid, bpool->count, bpool->size,
+ bpool->addr);
+ }
+}
+#endif /* RTE_LIBRTE_DPAA_DEBUG_DRIVER */
+
+struct netcfg_info *
+netcfg_acquire(void)
+{
+ struct fman_if *__if;
+ int _errno, idx = 0;
+ uint8_t num_ports = 0;
+ uint8_t num_cfg_ports = 0;
+ size_t size;
+
+ /* Extract dpa configuration from fman driver and FMC configuration
+ * for command-line interfaces.
+ */
+
+ /* Open a basic socket to enable/disable shared
+ * interfaces.
+ */
+ skfd = socket(AF_PACKET, SOCK_RAW, 0);
+ if (unlikely(skfd < 0)) {
+ error(0, errno, "%s(): open(SOCK_RAW)", __func__);
+ return NULL;
+ }
+
+ /* Initialise the Fman driver */
+ _errno = fman_init();
+ if (_errno) {
+ DPAA_BUS_LOG(ERR, "FMAN driver init failed (%d)", errno);
+ close(skfd);
+ skfd = -1;
+ return NULL;
+ }
+
+ /* Number of MAC ports */
+ list_for_each_entry(__if, fman_if_list, node)
+ num_ports++;
+
+ if (!num_ports) {
+ DPAA_BUS_LOG(ERR, "FMAN ports not available");
+ return NULL;
+ }
+ /* Allocate space for all enabled mac ports */
+ size = sizeof(*netcfg) +
+ (num_ports * sizeof(struct fm_eth_port_cfg));
+
+ netcfg = calloc(1, size);
+ if (unlikely(netcfg == NULL)) {
+ DPAA_BUS_LOG(ERR, "Unable to allocat mem for netcfg");
+ goto error;
+ }
+
+ netcfg->num_ethports = num_ports;
+
+ list_for_each_entry(__if, fman_if_list, node) {
+ struct fm_eth_port_cfg *cfg = &netcfg->port_cfg[idx];
+ /* Hook in the fman driver interface */
+ cfg->fman_if = __if;
+ cfg->rx_def = __if->fqid_rx_def;
+ num_cfg_ports++;
+ idx++;
+ }
+
+ if (!num_cfg_ports) {
+ DPAA_BUS_LOG(ERR, "No FMAN ports found");
+ goto error;
+ } else if (num_ports != num_cfg_ports)
+ netcfg->num_ethports = num_cfg_ports;
+
+ return netcfg;
+
+error:
+ if (netcfg) {
+ free(netcfg);
+ netcfg = NULL;
+ }
+
+ return NULL;
+}
+
+void
+netcfg_release(struct netcfg_info *cfg_ptr)
+{
+ free(cfg_ptr);
+ /* Close socket for shared interfaces */
+ if (skfd >= 0) {
+ close(skfd);
+ skfd = -1;
+ }
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/fman/of.c b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/of.c
new file mode 100644
index 00000000..a7f3174e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/of.c
@@ -0,0 +1,587 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <of.h>
+#include <rte_dpaa_logs.h>
+
+static int alive;
+static struct dt_dir root_dir;
+static const char *base_dir;
+static COMPAT_LIST_HEAD(linear);
+
+static int
+of_open_dir(const char *relative_path, struct dirent ***d)
+{
+ int ret;
+ char full_path[PATH_MAX];
+
+ snprintf(full_path, PATH_MAX, "%s/%s", base_dir, relative_path);
+ ret = scandir(full_path, d, 0, versionsort);
+ if (ret < 0)
+ DPAA_BUS_LOG(ERR, "Failed to open directory %s",
+ full_path);
+ return ret;
+}
+
+static void
+of_close_dir(struct dirent **d, int num)
+{
+ while (num--)
+ free(d[num]);
+ free(d);
+}
+
+static int
+of_open_file(const char *relative_path)
+{
+ int ret;
+ char full_path[PATH_MAX];
+
+ snprintf(full_path, PATH_MAX, "%s/%s", base_dir, relative_path);
+ ret = open(full_path, O_RDONLY);
+ if (ret < 0)
+ DPAA_BUS_LOG(ERR, "Failed to open directory %s",
+ full_path);
+ return ret;
+}
+
+static void
+process_file(struct dirent *dent, struct dt_dir *parent)
+{
+ int fd;
+ struct dt_file *f = malloc(sizeof(*f));
+
+ if (!f) {
+ DPAA_BUS_LOG(DEBUG, "Unable to allocate memory for file node");
+ return;
+ }
+ f->node.is_file = 1;
+ snprintf(f->node.node.name, NAME_MAX, "%s", dent->d_name);
+ snprintf(f->node.node.full_name, PATH_MAX, "%s/%s",
+ parent->node.node.full_name, dent->d_name);
+ f->parent = parent;
+ fd = of_open_file(f->node.node.full_name);
+ if (fd < 0) {
+ DPAA_BUS_LOG(DEBUG, "Unable to open file node");
+ free(f);
+ return;
+ }
+ f->len = read(fd, f->buf, OF_FILE_BUF_MAX);
+ close(fd);
+ if (f->len < 0) {
+ DPAA_BUS_LOG(DEBUG, "Unable to read file node");
+ free(f);
+ return;
+ }
+ list_add_tail(&f->node.list, &parent->files);
+}
+
+static const struct dt_dir *
+node2dir(const struct device_node *n)
+{
+ struct dt_node *dn = container_of((struct device_node *)n,
+ struct dt_node, node);
+ const struct dt_dir *d = container_of(dn, struct dt_dir, node);
+
+ assert(!dn->is_file);
+ return d;
+}
+
+/* process_dir() calls iterate_dir(), but the latter will also call the former
+ * when recursing into sub-directories, so a predeclaration is needed.
+ */
+static int process_dir(const char *relative_path, struct dt_dir *dt);
+
+static int
+iterate_dir(struct dirent **d, int num, struct dt_dir *dt)
+{
+ int loop;
+ /* Iterate the directory contents */
+ for (loop = 0; loop < num; loop++) {
+ struct dt_dir *subdir;
+ int ret;
+ /* Ignore dot files of all types (especially "..") */
+ if (d[loop]->d_name[0] == '.')
+ continue;
+ switch (d[loop]->d_type) {
+ case DT_REG:
+ process_file(d[loop], dt);
+ break;
+ case DT_DIR:
+ subdir = malloc(sizeof(*subdir));
+ if (!subdir) {
+ perror("malloc");
+ return -ENOMEM;
+ }
+ snprintf(subdir->node.node.name, NAME_MAX, "%s",
+ d[loop]->d_name);
+ snprintf(subdir->node.node.full_name, PATH_MAX,
+ "%s/%s", dt->node.node.full_name,
+ d[loop]->d_name);
+ subdir->parent = dt;
+ ret = process_dir(subdir->node.node.full_name, subdir);
+ if (ret)
+ return ret;
+ list_add_tail(&subdir->node.list, &dt->subdirs);
+ break;
+ default:
+ DPAA_BUS_LOG(DEBUG, "Ignoring invalid dt entry %s/%s",
+ dt->node.node.full_name, d[loop]->d_name);
+ }
+ }
+ return 0;
+}
+
+static int
+process_dir(const char *relative_path, struct dt_dir *dt)
+{
+ struct dirent **d;
+ int ret, num;
+
+ dt->node.is_file = 0;
+ INIT_LIST_HEAD(&dt->subdirs);
+ INIT_LIST_HEAD(&dt->files);
+ ret = of_open_dir(relative_path, &d);
+ if (ret < 0)
+ return ret;
+ num = ret;
+ ret = iterate_dir(d, num, dt);
+ of_close_dir(d, num);
+ return (ret < 0) ? ret : 0;
+}
+
+static void
+linear_dir(struct dt_dir *d)
+{
+ struct dt_file *f;
+ struct dt_dir *dd;
+
+ d->compatible = NULL;
+ d->status = NULL;
+ d->lphandle = NULL;
+ d->a_cells = NULL;
+ d->s_cells = NULL;
+ d->reg = NULL;
+ list_for_each_entry(f, &d->files, node.list) {
+ if (!strcmp(f->node.node.name, "compatible")) {
+ if (d->compatible)
+ DPAA_BUS_LOG(DEBUG, "Duplicate compatible in"
+ " %s", d->node.node.full_name);
+ d->compatible = f;
+ } else if (!strcmp(f->node.node.name, "status")) {
+ if (d->status)
+ DPAA_BUS_LOG(DEBUG, "Duplicate status in %s",
+ d->node.node.full_name);
+ d->status = f;
+ } else if (!strcmp(f->node.node.name, "linux,phandle")) {
+ if (d->lphandle)
+ DPAA_BUS_LOG(DEBUG, "Duplicate lphandle in %s",
+ d->node.node.full_name);
+ d->lphandle = f;
+ } else if (!strcmp(f->node.node.name, "phandle")) {
+ if (d->lphandle)
+ DPAA_BUS_LOG(DEBUG, "Duplicate lphandle in %s",
+ d->node.node.full_name);
+ d->lphandle = f;
+ } else if (!strcmp(f->node.node.name, "#address-cells")) {
+ if (d->a_cells)
+ DPAA_BUS_LOG(DEBUG, "Duplicate a_cells in %s",
+ d->node.node.full_name);
+ d->a_cells = f;
+ } else if (!strcmp(f->node.node.name, "#size-cells")) {
+ if (d->s_cells)
+ DPAA_BUS_LOG(DEBUG, "Duplicate s_cells in %s",
+ d->node.node.full_name);
+ d->s_cells = f;
+ } else if (!strcmp(f->node.node.name, "reg")) {
+ if (d->reg)
+ DPAA_BUS_LOG(DEBUG, "Duplicate reg in %s",
+ d->node.node.full_name);
+ d->reg = f;
+ }
+ }
+
+ list_for_each_entry(dd, &d->subdirs, node.list) {
+ list_add_tail(&dd->linear, &linear);
+ linear_dir(dd);
+ }
+}
+
+int
+of_init_path(const char *dt_path)
+{
+ int ret;
+
+ base_dir = dt_path;
+
+ /* This needs to be singleton initialization */
+ DPAA_BUS_HWWARN(alive, "Double-init of device-tree driver!");
+
+ /* Prepare root node (the remaining fields are set in process_dir()) */
+ root_dir.node.node.name[0] = '\0';
+ root_dir.node.node.full_name[0] = '\0';
+ INIT_LIST_HEAD(&root_dir.node.list);
+ root_dir.parent = NULL;
+
+ /* Kick things off... */
+ ret = process_dir("", &root_dir);
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "Unable to parse device tree");
+ return ret;
+ }
+
+ /* Now make a flat, linear list of directories */
+ linear_dir(&root_dir);
+ alive = 1;
+ return 0;
+}
+
+static void
+destroy_dir(struct dt_dir *d)
+{
+ struct dt_file *f, *tmpf;
+ struct dt_dir *dd, *tmpd;
+
+ list_for_each_entry_safe(f, tmpf, &d->files, node.list) {
+ list_del(&f->node.list);
+ free(f);
+ }
+ list_for_each_entry_safe(dd, tmpd, &d->subdirs, node.list) {
+ destroy_dir(dd);
+ list_del(&dd->node.list);
+ free(dd);
+ }
+}
+
+void
+of_finish(void)
+{
+ DPAA_BUS_HWWARN(!alive, "Double-finish of device-tree driver!");
+
+ destroy_dir(&root_dir);
+ INIT_LIST_HEAD(&linear);
+ alive = 0;
+}
+
+static const struct dt_dir *
+next_linear(const struct dt_dir *f)
+{
+ if (f->linear.next == &linear)
+ return NULL;
+ return list_entry(f->linear.next, struct dt_dir, linear);
+}
+
+static int
+check_compatible(const struct dt_file *f, const char *compatible)
+{
+ const char *c = (char *)f->buf;
+ unsigned int len, remains = f->len;
+
+ while (remains) {
+ len = strlen(c);
+ if (!strcmp(c, compatible))
+ return 1;
+
+ if (remains < len + 1)
+ break;
+
+ c += (len + 1);
+ remains -= (len + 1);
+ }
+ return 0;
+}
+
+const struct device_node *
+of_find_compatible_node(const struct device_node *from,
+ const char *type __always_unused,
+ const char *compatible)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+
+ if (list_empty(&linear))
+ return NULL;
+ if (!from)
+ d = list_entry(linear.next, struct dt_dir, linear);
+ else
+ d = node2dir(from);
+ for (d = next_linear(d); d && (!d->compatible ||
+ !check_compatible(d->compatible,
+ compatible));
+ d = next_linear(d))
+ ;
+ if (d)
+ return &d->node.node;
+ return NULL;
+}
+
+const void *
+of_get_property(const struct device_node *from, const char *name,
+ size_t *lenp)
+{
+ const struct dt_dir *d;
+ const struct dt_file *f;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+
+ d = node2dir(from);
+ list_for_each_entry(f, &d->files, node.list)
+ if (!strcmp(f->node.node.name, name)) {
+ if (lenp)
+ *lenp = f->len;
+ return f->buf;
+ }
+ return NULL;
+}
+
+bool
+of_device_is_available(const struct device_node *dev_node)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+ d = node2dir(dev_node);
+ if (!d->status)
+ return true;
+ if (!strcmp((char *)d->status->buf, "okay"))
+ return true;
+ if (!strcmp((char *)d->status->buf, "ok"))
+ return true;
+ return false;
+}
+
+const struct device_node *
+of_find_node_by_phandle(phandle ph)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+ list_for_each_entry(d, &linear, linear)
+ if (d->lphandle && (d->lphandle->len == 4) &&
+ !memcmp(d->lphandle->buf, &ph, 4))
+ return &d->node.node;
+ return NULL;
+}
+
+const struct device_node *
+of_get_parent(const struct device_node *dev_node)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+
+ if (!dev_node)
+ return NULL;
+ d = node2dir(dev_node);
+ if (!d->parent)
+ return NULL;
+ return &d->parent->node.node;
+}
+
+const struct device_node *
+of_get_next_child(const struct device_node *dev_node,
+ const struct device_node *prev)
+{
+ const struct dt_dir *p, *c;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+
+ if (!dev_node)
+ return NULL;
+ p = node2dir(dev_node);
+ if (prev) {
+ c = node2dir(prev);
+ DPAA_BUS_HWWARN((c->parent != p), "Parent/child mismatch");
+ if (c->parent != p)
+ return NULL;
+ if (c->node.list.next == &p->subdirs)
+ /* prev was the last child */
+ return NULL;
+ c = list_entry(c->node.list.next, struct dt_dir, node.list);
+ return &c->node.node;
+ }
+ /* Return first child */
+ if (list_empty(&p->subdirs))
+ return NULL;
+ c = list_entry(p->subdirs.next, struct dt_dir, node.list);
+ return &c->node.node;
+}
+
+uint32_t
+of_n_addr_cells(const struct device_node *dev_node)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised");
+ if (!dev_node)
+ return OF_DEFAULT_NA;
+ d = node2dir(dev_node);
+ while ((d = d->parent))
+ if (d->a_cells) {
+ unsigned char *buf =
+ (unsigned char *)&d->a_cells->buf[0];
+ assert(d->a_cells->len == 4);
+ return ((uint32_t)buf[0] << 24) |
+ ((uint32_t)buf[1] << 16) |
+ ((uint32_t)buf[2] << 8) |
+ (uint32_t)buf[3];
+ }
+ return OF_DEFAULT_NA;
+}
+
+uint32_t
+of_n_size_cells(const struct device_node *dev_node)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+ if (!dev_node)
+ return OF_DEFAULT_NA;
+ d = node2dir(dev_node);
+ while ((d = d->parent))
+ if (d->s_cells) {
+ unsigned char *buf =
+ (unsigned char *)&d->s_cells->buf[0];
+ assert(d->s_cells->len == 4);
+ return ((uint32_t)buf[0] << 24) |
+ ((uint32_t)buf[1] << 16) |
+ ((uint32_t)buf[2] << 8) |
+ (uint32_t)buf[3];
+ }
+ return OF_DEFAULT_NS;
+}
+
+const uint32_t *
+of_get_address(const struct device_node *dev_node, size_t idx,
+ uint64_t *size, uint32_t *flags __rte_unused)
+{
+ const struct dt_dir *d;
+ const unsigned char *buf;
+ uint32_t na = of_n_addr_cells(dev_node);
+ uint32_t ns = of_n_size_cells(dev_node);
+
+ if (!dev_node)
+ d = &root_dir;
+ else
+ d = node2dir(dev_node);
+ if (!d->reg)
+ return NULL;
+ assert(d->reg->len % ((na + ns) * 4) == 0);
+ assert(d->reg->len / ((na + ns) * 4) > (unsigned int) idx);
+ buf = (const unsigned char *)&d->reg->buf[0];
+ buf += (na + ns) * idx * 4;
+ if (size)
+ for (*size = 0; ns > 0; ns--, na++)
+ *size = (*size << 32) +
+ (((uint32_t)buf[4 * na] << 24) |
+ ((uint32_t)buf[4 * na + 1] << 16) |
+ ((uint32_t)buf[4 * na + 2] << 8) |
+ (uint32_t)buf[4 * na + 3]);
+ return (const uint32_t *)buf;
+}
+
+uint64_t
+of_translate_address(const struct device_node *dev_node,
+ const uint32_t *addr)
+{
+ uint64_t phys_addr, tmp_addr;
+ const struct device_node *parent;
+ const uint32_t *ranges;
+ size_t rlen;
+ uint32_t na, pna;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+ assert(dev_node != NULL);
+
+ na = of_n_addr_cells(dev_node);
+ phys_addr = of_read_number(addr, na);
+
+ dev_node = of_get_parent(dev_node);
+ if (!dev_node)
+ return 0;
+ else if (node2dir(dev_node) == &root_dir)
+ return phys_addr;
+
+ do {
+ pna = of_n_addr_cells(dev_node);
+ parent = of_get_parent(dev_node);
+ if (!parent)
+ return 0;
+
+ ranges = of_get_property(dev_node, "ranges", &rlen);
+ /* "ranges" property is missing. Translation breaks */
+ if (!ranges)
+ return 0;
+ /* "ranges" property is empty. Do 1:1 translation */
+ else if (rlen == 0)
+ continue;
+ else
+ tmp_addr = of_read_number(ranges + na, pna);
+
+ na = pna;
+ dev_node = parent;
+ phys_addr += tmp_addr;
+ } while (node2dir(parent) != &root_dir);
+
+ return phys_addr;
+}
+
+bool
+of_device_is_compatible(const struct device_node *dev_node,
+ const char *compatible)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+ if (!dev_node)
+ d = &root_dir;
+ else
+ d = node2dir(dev_node);
+ if (d->compatible && check_compatible(d->compatible, compatible))
+ return true;
+ return false;
+}
+
+static const void *of_get_mac_addr(const struct device_node *np,
+ const char *name)
+{
+ return of_get_property(np, name, NULL);
+}
+
+/**
+ * Search the device tree for the best MAC address to use. 'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+ * address. If that isn't set, then 'local-mac-address' is checked next,
+ * because that is the default address. If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree.
+ *
+ * Note that the 'address' property is supposed to contain a virtual address of
+ * the register set, but some DTS files have redefined that property to be the
+ * MAC address.
+ *
+ * All-zero MAC addresses are rejected, because those could be properties that
+ * exist in the device tree, but were not set by U-Boot. For example, the
+ * DTS could define 'mac-address' and 'local-mac-address', with zero MAC
+ * addresses. Some older U-Boots only initialized 'local-mac-address'. In
+ * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
+ * but is all zeros.
+ */
+const void *of_get_mac_address(const struct device_node *np)
+{
+ const void *addr;
+
+ addr = of_get_mac_addr(np, "mac-address");
+ if (addr)
+ return addr;
+
+ addr = of_get_mac_addr(np, "local-mac-address");
+ if (addr)
+ return addr;
+
+ return of_get_mac_addr(np, "address");
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.c
new file mode 100644
index 00000000..8a629073
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.c
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include "bman.h"
+#include <rte_branch_prediction.h>
+
+/* Compilation constants */
+#define RCR_THRESH 2 /* reread h/w CI when running out of space */
+#define IRQNAME "BMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
+
+struct bman_portal {
+ struct bm_portal p;
+ /* 2-element array. pools[0] is mask, pools[1] is snapshot. */
+ struct bman_depletion *pools;
+ int thresh_set;
+ unsigned long irq_sources;
+ u32 slowpoll; /* only used when interrupts are off */
+ /* When the cpu-affine portal is activated, this is non-NULL */
+ const struct bm_portal_config *config;
+ char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static RTE_DEFINE_PER_LCORE(struct bman_portal, bman_affine_portal);
+
+static inline struct bman_portal *get_affine_portal(void)
+{
+ return &RTE_PER_LCORE(bman_affine_portal);
+}
+
+/*
+ * This object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of
+ * the pool are operating via different portals.
+ */
+struct bman_pool {
+ struct bman_pool_params params;
+ /* Used for hash-table admin when using depletion notifications. */
+ struct bman_portal *portal;
+ struct bman_pool *next;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ atomic_t in_use;
+#endif
+};
+
+static inline
+struct bman_portal *bman_create_portal(struct bman_portal *portal,
+ const struct bm_portal_config *c)
+{
+ struct bm_portal *p;
+ const struct bman_depletion *pools = &c->mask;
+ int ret;
+ u8 bpid = 0;
+
+ p = &portal->p;
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference...
+ */
+ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+ p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
+ pr_err("Bman RCR initialisation failed\n");
+ return NULL;
+ }
+ if (bm_mc_init(p)) {
+ pr_err("Bman MC initialisation failed\n");
+ goto fail_mc;
+ }
+ portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
+ if (!portal->pools)
+ goto fail_pools;
+ portal->pools[0] = *pools;
+ bman_depletion_init(portal->pools + 1);
+ while (bpid < bman_pool_max) {
+ /*
+ * Default to all BPIDs disabled, we enable as required at
+ * run-time.
+ */
+ bm_isr_bscn_mask(p, bpid, 0);
+ bpid++;
+ }
+ portal->slowpoll = 0;
+ /* Write-to-clear any stale interrupt status bits */
+ bm_isr_disable_write(p, 0xffffffff);
+ portal->irq_sources = 0;
+ bm_isr_enable_write(p, portal->irq_sources);
+ bm_isr_status_clear(p, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, NULL, 0, portal->irqname,
+ portal)) {
+ pr_err("request_irq() failed\n");
+ goto fail_irq;
+ }
+
+ /* Need RCR to be empty before continuing */
+ ret = bm_rcr_get_fill(p);
+ if (ret) {
+ pr_err("Bman RCR unclean\n");
+ goto fail_rcr_empty;
+ }
+ /* Success */
+ portal->config = c;
+
+ bm_isr_disable_write(p, 0);
+ bm_isr_uninhibit(p);
+ return portal;
+fail_rcr_empty:
+ free_irq(c->irq, portal);
+fail_irq:
+ kfree(portal->pools);
+fail_pools:
+ bm_mc_finish(p);
+fail_mc:
+ bm_rcr_finish(p);
+ return NULL;
+}
+
+struct bman_portal *
+bman_create_affine_portal(const struct bm_portal_config *c)
+{
+ struct bman_portal *portal = get_affine_portal();
+
+ /*This function is called from the context which is already affine to
+ *CPU or in other words this in non-migratable to other CPUs.
+ */
+ portal = bman_create_portal(portal, c);
+ if (portal) {
+ spin_lock(&affine_mask_lock);
+ CPU_SET(c->cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ }
+ return portal;
+}
+
+static inline
+void bman_destroy_portal(struct bman_portal *bm)
+{
+ const struct bm_portal_config *pcfg;
+
+ pcfg = bm->config;
+ bm_rcr_cce_update(&bm->p);
+ bm_rcr_cce_update(&bm->p);
+
+ free_irq(pcfg->irq, bm);
+
+ kfree(bm->pools);
+ bm_mc_finish(&bm->p);
+ bm_rcr_finish(&bm->p);
+ bm->config = NULL;
+}
+
+const struct
+bm_portal_config *bman_destroy_affine_portal(void)
+{
+ struct bman_portal *bm = get_affine_portal();
+ const struct bm_portal_config *pcfg;
+
+ pcfg = bm->config;
+ bman_destroy_portal(bm);
+ spin_lock(&affine_mask_lock);
+ CPU_CLR(pcfg->cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ return pcfg;
+}
+
+int
+bman_get_portal_index(void)
+{
+ struct bman_portal *p = get_affine_portal();
+ return p->config->index;
+}
+
+static const u32 zero_thresholds[4] = {0, 0, 0, 0};
+
+struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
+{
+ struct bman_pool *pool = NULL;
+ u32 bpid;
+
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
+ int ret = bman_alloc_bpid(&bpid);
+
+ if (ret)
+ return NULL;
+ } else {
+ if (params->bpid >= bman_pool_max)
+ return NULL;
+ bpid = params->bpid;
+ }
+ if (params->flags & BMAN_POOL_FLAG_THRESH) {
+ int ret = bm_pool_set(bpid, params->thresholds);
+
+ if (ret)
+ goto err;
+ }
+
+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ goto err;
+ pool->params = *params;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ atomic_set(&pool->in_use, 1);
+#endif
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ pool->params.bpid = bpid;
+
+ return pool;
+err:
+ if (params->flags & BMAN_POOL_FLAG_THRESH)
+ bm_pool_set(bpid, zero_thresholds);
+
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ bman_release_bpid(bpid);
+ kfree(pool);
+
+ return NULL;
+}
+
+void bman_free_pool(struct bman_pool *pool)
+{
+ if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
+ bm_pool_set(pool->params.bpid, zero_thresholds);
+ if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ bman_release_bpid(pool->params.bpid);
+ kfree(pool);
+}
+
+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
+{
+ return &pool->params;
+}
+
+static void update_rcr_ci(struct bman_portal *p, int avail)
+{
+ if (avail)
+ bm_rcr_cce_prefetch(&p->p);
+ else
+ bm_rcr_cce_update(&p->p);
+}
+
+#define BMAN_BUF_MASK 0x0000fffffffffffful
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
+ u32 flags __maybe_unused)
+{
+ struct bman_portal *p;
+ struct bm_rcr_entry *r;
+ u32 i = num - 1;
+ u8 avail;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (!num || (num > 8))
+ return -EINVAL;
+ if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
+ return -EINVAL;
+#endif
+
+ p = get_affine_portal();
+ avail = bm_rcr_get_avail(&p->p);
+ if (avail < 2)
+ update_rcr_ci(p, avail);
+ r = bm_rcr_start(&p->p);
+ if (unlikely(!r))
+ return -EBUSY;
+
+ /*
+ * we can copy all but the first entry, as this can trigger badness
+ * with the valid-bit
+ */
+ r->bufs[0].opaque =
+ cpu_to_be64(((u64)pool->params.bpid << 48) |
+ (bufs[0].opaque & BMAN_BUF_MASK));
+ if (i) {
+ for (i = 1; i < num; i++)
+ r->bufs[i].opaque =
+ cpu_to_be64(bufs[i].opaque & BMAN_BUF_MASK);
+ }
+
+ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+ (num & BM_RCR_VERB_BUFCOUNT_MASK));
+
+ return 0;
+}
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
+ u32 flags __maybe_unused)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_command *mcc;
+ struct bm_mc_result *mcr;
+ int ret, i;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (!num || (num > 8))
+ return -EINVAL;
+ if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
+ return -EINVAL;
+#endif
+
+ mcc = bm_mc_start(&p->p);
+ mcc->acquire.bpid = pool->params.bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+ while (!(mcr = bm_mc_result(&p->p)))
+ cpu_relax();
+ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+ if (bufs) {
+ for (i = 0; i < num; i++)
+ bufs[i].opaque =
+ be64_to_cpu(mcr->acquire.bufs[i].opaque);
+ }
+ if (ret != num)
+ ret = -ENOMEM;
+ return ret;
+}
+
+int bman_query_pools(struct bm_pool_state *state)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_result *mcr;
+
+ bm_mc_start(&p->p);
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
+ while (!(mcr = bm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) ==
+ BM_MCR_VERB_CMD_QUERY);
+ *state = mcr->query;
+ state->as.state.state[0] = be32_to_cpu(state->as.state.state[0]);
+ state->as.state.state[1] = be32_to_cpu(state->as.state.state[1]);
+ state->ds.state.state[0] = be32_to_cpu(state->ds.state.state[0]);
+ state->ds.state.state[1] = be32_to_cpu(state->ds.state.state[1]);
+ return 0;
+}
+
+u32 bman_query_free_buffers(struct bman_pool *pool)
+{
+ return bm_pool_free_buffers(pool->params.bpid);
+}
+
+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
+{
+ u32 bpid;
+
+ bpid = bman_get_params(pool)->bpid;
+
+ return bm_pool_set(bpid, thresholds);
+}
+
+int bman_shutdown_pool(u32 bpid)
+{
+ struct bman_portal *p = get_affine_portal();
+ return bm_shutdown_pool(&p->p, bpid);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.h b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.h
new file mode 100644
index 00000000..21a6bee7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.h
@@ -0,0 +1,541 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __BMAN_H
+#define __BMAN_H
+
+#include "bman_priv.h"
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x3000
+#define BM_REG_RCR_CI_CINH 0x3100
+#define BM_REG_RCR_ITR 0x3200
+#define BM_REG_CFG 0x3300
+#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
+#define BM_REG_ISR 0x3e00
+#define BM_REG_IIR 0x3ec0
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent).
+ */
+
+/* Cache-inhibited register access. */
+#define __bm_in(bm, o) be32_to_cpu(__raw_readl((bm)->ci + (o)))
+#define __bm_out(bm, o, val) __raw_writel(cpu_to_be32(val), \
+ (bm)->ci + (o))
+#define bm_in(reg) __bm_in(&portal->addr, BM_REG_##reg)
+#define bm_out(reg, val) __bm_out(&portal->addr, BM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->ce + (o))
+#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->ce + (o))
+#define __bm_cl_in(bm, o) be32_to_cpu(__raw_readl((bm)->ce + (o)))
+#define __bm_cl_out(bm, o, val) \
+ do { \
+ u32 *__tmpclout = (bm)->ce + (o); \
+ __raw_writel(cpu_to_be32(val), __tmpclout); \
+ dcbf(__tmpclout); \
+ } while (0)
+#define __bm_cl_invalidate(bm, o) dccivac((bm)->ce + (o))
+#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_in(reg) __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
+#define bm_cl_invalidate(reg)\
+ __bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues.
+ */
+static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
+ bm_rcr_pci = 0, /* PI index, cache-inhibited */
+ bm_rcr_pce = 1, /* PI index, cache-enabled */
+ bm_rcr_pvb = 2 /* valid-bit */
+};
+
+enum bm_rcr_cmode { /* s/w-only */
+ bm_rcr_cci, /* CI index, cache-inhibited */
+ bm_rcr_cce /* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE 8
+
+struct bm_rcr {
+ struct bm_rcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ u32 busy;
+ enum bm_rcr_pmode pmode;
+ enum bm_rcr_cmode cmode;
+#endif
+};
+
+struct bm_mc {
+ struct bm_mc_command *cr;
+ struct bm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ enum {
+ /* Can only be _mc_start()ed */
+ mc_idle,
+ /* Can only be _mc_commit()ed or _mc_abort()ed */
+ mc_user,
+ /* Can only be _mc_retry()ed */
+ mc_hw
+ } state;
+#endif
+};
+
+struct bm_addr {
+ void __iomem *ce; /* cache-enabled */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct bm_portal {
+ struct bm_addr addr;
+ struct bm_rcr rcr;
+ struct bm_mc mc;
+ struct bm_portal_config config;
+} ____cacheline_aligned;
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define RCR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void RCR_INC(struct bm_rcr *rcr)
+{
+ /* NB: this is odd-looking, but experiments show that it generates
+ * fast code with essentially no branching overheads. We increment to
+ * the next RCR pointer and handle overflow and 'vbit'.
+ */
+ struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+ rcr->cursor = RCR_CARRYCLEAR(partial);
+ if (partial != rcr->cursor)
+ rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+ __maybe_unused enum bm_rcr_cmode cmode)
+{
+ /* This use of 'register', as well as all other occurrences, is because
+ * it has been observed to generate much faster code with gcc than is
+ * otherwise the case.
+ */
+ register struct bm_rcr *rcr = &portal->rcr;
+ u32 cfg;
+ u8 pi;
+
+ rcr->ring = portal->addr.ce + BM_CL_RCR;
+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+
+ pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ rcr->cursor = rcr->ring + pi;
+ rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0;
+ rcr->available = BM_RCR_SIZE - 1
+ - bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+ rcr->ithresh = bm_in(RCR_ITR);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+ rcr->pmode = pmode;
+ rcr->cmode = cmode;
+#endif
+ cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
+ bm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void bm_rcr_finish(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(!rcr->busy);
+#endif
+ if (pi != RCR_PTR2IDX(rcr->cursor))
+ pr_crit("losing uncommitted RCR entries\n");
+ if (ci != rcr->ci)
+ pr_crit("missing existing RCR completions\n");
+ if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
+ pr_crit("RCR destroyed unquiesced\n");
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(!rcr->busy);
+#endif
+ if (!rcr->available)
+ return NULL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 1;
+#endif
+ dcbz_64(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_abort(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->busy);
+ rcr->busy = 0;
+#endif
+}
+
+static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
+ struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode != bm_rcr_pvb);
+#endif
+ if (rcr->available == 1)
+ return NULL;
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ dcbf_64(rcr->cursor);
+ RCR_INC(rcr);
+ rcr->available--;
+ dcbz_64(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pci);
+#endif
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ RCR_INC(rcr);
+ rcr->available--;
+ hwsync();
+ bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pce);
+#endif
+ bm_cl_invalidate(RCR_PI);
+ bm_cl_touch_rw(RCR_PI);
+}
+
+static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pce);
+#endif
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ RCR_INC(rcr);
+ rcr->available--;
+ lwsync();
+ bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ struct bm_rcr_entry *rcursor;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
+#endif
+ lwsync();
+ rcursor = rcr->cursor;
+ rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ dcbf_64(rcursor);
+ RCR_INC(rcr);
+ rcr->available--;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+#endif
+}
+
+static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cci);
+#endif
+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+#endif
+ bm_cl_touch_ro(RCR_CI);
+}
+
+static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+#endif
+ rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
+ bm_cl_invalidate(RCR_CI);
+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->ithresh;
+}
+
+static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ rcr->ithresh = ithresh;
+ bm_out(RCR_ITR, ithresh);
+}
+
+static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->available;
+}
+
+static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+/* --- Management command API --- */
+
+static inline int bm_mc_init(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + BM_CL_CR;
+ mc->rr = portal->addr.ce + BM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
+ BM_MCC_VERB_VBIT) ? 0 : 1;
+ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = mc_idle;
+#endif
+ return 0;
+}
+
+static inline void bm_mc_finish(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_mc *mc = &portal->mc;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_idle);
+ if (mc->state != mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_idle);
+ mc->state = mc_user;
+#endif
+ dcbz_64(mc->cr);
+ return mc->cr;
+}
+
+static inline void bm_mc_abort(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_mc *mc = &portal->mc;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_user);
+ mc->state = mc_idle;
+#endif
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_mc *mc = &portal->mc;
+ struct bm_mc_result *rr = mc->rr + mc->rridx;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_user);
+#endif
+ lwsync();
+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+ dcbf(mc->cr);
+ dcbit_ro(rr);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = mc_hw;
+#endif
+}
+
+static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+ struct bm_mc_result *rr = mc->rr + mc->rridx;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_hw);
+#endif
+ /* The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering.
+ */
+ if (!__raw_readb(&rr->verb)) {
+ dcbit_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = mc_idle;
+#endif
+ return rr;
+}
+
+#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
+#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
+static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
+ int enable)
+{
+ u32 val;
+
+ DPAA_ASSERT(bpid < bman_pool_max);
+ /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
+ val = __bm_in(&portal->addr, SCN_REG(bpid));
+ if (enable)
+ val |= SCN_BIT(bpid);
+ else
+ val &= ~SCN_BIT(bpid);
+ __bm_out(&portal->addr, SCN_REG(bpid), val);
+}
+
+static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
+{
+#if defined(RTE_ARCH_ARM64)
+ return __bm_in(&portal->addr, BM_REG_ISR + (n << 6));
+#else
+ return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
+#endif
+}
+
+static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
+ u32 val)
+{
+#if defined(RTE_ARCH_ARM64)
+ __bm_out(&portal->addr, BM_REG_ISR + (n << 6), val);
+#else
+ __bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
+#endif
+}
+
+/* Buffer Pool Cleanup */
+static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
+{
+ struct bm_mc_command *bm_cmd;
+ struct bm_mc_result *bm_res;
+
+ int aq_count = 0;
+ bool stop = false;
+
+ while (!stop) {
+ /* Acquire buffers until empty */
+ bm_cmd = bm_mc_start(p);
+ bm_cmd->acquire.bpid = bpid;
+ bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+ while (!(bm_res = bm_mc_result(p)))
+ cpu_relax();
+ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+ /* Pool is empty */
+ stop = true;
+ } else
+ ++aq_count;
+ };
+ return 0;
+}
+
+#endif /* __BMAN_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_driver.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_driver.c
new file mode 100644
index 00000000..b14b5905
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_driver.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <rte_branch_prediction.h>
+
+#include <fsl_usd.h>
+#include <process.h>
+#include "bman_priv.h"
+#include <sys/ioctl.h>
+
+/*
+ * Global variables of the max portal/pool number this bman version supported
+ */
+static u16 bman_ip_rev;
+u16 bman_pool_max;
+static void *bman_ccsr_map;
+
+/*****************/
+/* Portal driver */
+/*****************/
+
+static __thread int fd = -1;
+static __thread struct bm_portal_config pcfg;
+static __thread struct dpaa_ioctl_portal_map map = {
+ .type = dpaa_portal_bman
+};
+
+static int fsl_bman_portal_init(uint32_t idx, int is_shared)
+{
+ cpu_set_t cpuset;
+ struct bman_portal *portal;
+ int loop, ret;
+ struct dpaa_ioctl_irq_map irq_map;
+
+ /* Verify the thread's cpu-affinity */
+ ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
+ &cpuset);
+ if (ret) {
+ error(0, ret, "pthread_getaffinity_np()");
+ return ret;
+ }
+ pcfg.cpu = -1;
+ for (loop = 0; loop < CPU_SETSIZE; loop++)
+ if (CPU_ISSET(loop, &cpuset)) {
+ if (pcfg.cpu != -1) {
+ pr_err("Thread is not affine to 1 cpu");
+ return -EINVAL;
+ }
+ pcfg.cpu = loop;
+ }
+ if (pcfg.cpu == -1) {
+ pr_err("Bug in getaffinity handling!");
+ return -EINVAL;
+ }
+ /* Allocate and map a bman portal */
+ map.index = idx;
+ ret = process_portal_map(&map);
+ if (ret) {
+ error(0, ret, "process_portal_map()");
+ return ret;
+ }
+ /* Make the portal's cache-[enabled|inhibited] regions */
+ pcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
+ pcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
+ pcfg.is_shared = is_shared;
+ pcfg.index = map.index;
+ bman_depletion_fill(&pcfg.mask);
+
+ fd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
+ if (fd == -1) {
+ pr_err("BMan irq init failed");
+ process_portal_unmap(&map.addr);
+ return -EBUSY;
+ }
+ /* Use the IRQ FD as a unique IRQ number */
+ pcfg.irq = fd;
+
+ portal = bman_create_affine_portal(&pcfg);
+ if (!portal) {
+ pr_err("Bman portal initialisation failed (%d)",
+ pcfg.cpu);
+ process_portal_unmap(&map.addr);
+ return -EBUSY;
+ }
+
+ /* Set the IRQ number */
+ irq_map.type = dpaa_portal_bman;
+ irq_map.portal_cinh = map.addr.cinh;
+ process_portal_irq_map(fd, &irq_map);
+ return 0;
+}
+
+static int fsl_bman_portal_finish(void)
+{
+ __maybe_unused const struct bm_portal_config *cfg;
+ int ret;
+
+ process_portal_irq_unmap(fd);
+
+ cfg = bman_destroy_affine_portal();
+ DPAA_BUG_ON(cfg != &pcfg);
+ ret = process_portal_unmap(&map.addr);
+ if (ret)
+ error(0, ret, "process_portal_unmap()");
+ return ret;
+}
+
+int bman_thread_init(void)
+{
+ /* Convert from contiguous/virtual cpu numbering to real cpu when
+ * calling into the code that is dependent on the device naming.
+ */
+ return fsl_bman_portal_init(QBMAN_ANY_PORTAL_IDX, 0);
+}
+
+int bman_thread_finish(void)
+{
+ return fsl_bman_portal_finish();
+}
+
+void bman_thread_irq(void)
+{
+ qbman_invoke_irq(pcfg.irq);
+ /* Now we need to uninhibit interrupts. This is the only code outside
+ * the regular portal driver that manipulates any portal register, so
+ * rather than breaking that encapsulation I am simply hard-coding the
+ * offset to the inhibit register here.
+ */
+ out_be32(pcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
+}
+
+int bman_init_ccsr(const struct device_node *node)
+{
+ static int ccsr_map_fd;
+ uint64_t phys_addr;
+ const uint32_t *bman_addr;
+ uint64_t regs_size;
+
+ bman_addr = of_get_address(node, 0, &regs_size, NULL);
+ if (!bman_addr) {
+ pr_err("of_get_address cannot return BMan address");
+ return -EINVAL;
+ }
+ phys_addr = of_translate_address(node, bman_addr);
+ if (!phys_addr) {
+ pr_err("of_translate_address failed");
+ return -EINVAL;
+ }
+
+ ccsr_map_fd = open(BMAN_CCSR_MAP, O_RDWR);
+ if (unlikely(ccsr_map_fd < 0)) {
+ pr_err("Can not open /dev/mem for BMan CCSR map");
+ return ccsr_map_fd;
+ }
+
+ bman_ccsr_map = mmap(NULL, regs_size, PROT_READ |
+ PROT_WRITE, MAP_SHARED, ccsr_map_fd, phys_addr);
+ if (bman_ccsr_map == MAP_FAILED) {
+ pr_err("Can not map BMan CCSR base Bman: "
+ "0x%x Phys: 0x%" PRIx64 " size 0x%" PRIu64,
+ *bman_addr, phys_addr, regs_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bman_global_init(void)
+{
+ const struct device_node *dt_node;
+ static int done;
+
+ if (done)
+ return -EBUSY;
+ /* Use the device-tree to determine IP revision until something better
+ * is devised.
+ */
+ dt_node = of_find_compatible_node(NULL, NULL, "fsl,bman-portal");
+ if (!dt_node) {
+ pr_err("No bman portals available for any CPU\n");
+ return -ENODEV;
+ }
+ if (of_device_is_compatible(dt_node, "fsl,bman-portal-1.0") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-1.0.0")) {
+ bman_ip_rev = BMAN_REV10;
+ bman_pool_max = 64;
+ } else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.0") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-2.0.8")) {
+ bman_ip_rev = BMAN_REV20;
+ bman_pool_max = 8;
+ } else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.0") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.1") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.2") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.3")) {
+ bman_ip_rev = BMAN_REV21;
+ bman_pool_max = 64;
+ } else {
+ pr_warn("unknown BMan version in portal node,default "
+ "to rev1.0");
+ bman_ip_rev = BMAN_REV10;
+ bman_pool_max = 64;
+ }
+
+ if (!bman_ip_rev) {
+ pr_err("Unknown bman portal version\n");
+ return -ENODEV;
+ }
+ {
+ const struct device_node *dn = of_find_compatible_node(NULL,
+ NULL, "fsl,bman");
+ if (!dn)
+ pr_err("No bman device node available");
+
+ if (bman_init_ccsr(dn))
+ pr_err("BMan CCSR map failed.");
+ }
+
+ done = 1;
+ return 0;
+}
+
+#define BMAN_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
+u32 bm_pool_free_buffers(u32 bpid)
+{
+ return in_be32(bman_ccsr_map + BMAN_POOL_CONTENT(bpid));
+}
+
+static u32 __generate_thresh(u32 val, int roundup)
+{
+ u32 e = 0; /* co-efficient, exponent */
+ int oddbit = 0;
+
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ DPAA_ASSERT(e < 0x10);
+ return (val | (e << 8));
+}
+
+#define POOL_SWDET(n) (0x0000 + ((n) * 0x04))
+#define POOL_HWDET(n) (0x0100 + ((n) * 0x04))
+#define POOL_SWDXT(n) (0x0200 + ((n) * 0x04))
+#define POOL_HWDXT(n) (0x0300 + ((n) * 0x04))
+int bm_pool_set(u32 bpid, const u32 *thresholds)
+{
+ if (!bman_ccsr_map)
+ return -ENODEV;
+ if (bpid >= bman_pool_max)
+ return -EINVAL;
+ out_be32(bman_ccsr_map + POOL_SWDET(bpid),
+ __generate_thresh(thresholds[0], 0));
+ out_be32(bman_ccsr_map + POOL_SWDXT(bpid),
+ __generate_thresh(thresholds[1], 1));
+ out_be32(bman_ccsr_map + POOL_HWDET(bpid),
+ __generate_thresh(thresholds[2], 0));
+ out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
+ __generate_thresh(thresholds[3], 1));
+ return 0;
+}
+
+#define BMAN_LOW_DEFAULT_THRESH 0x40
+#define BMAN_HIGH_DEFAULT_THRESH 0x80
+int bm_pool_set_hw_threshold(u32 bpid, const u32 low_thresh,
+ const u32 high_thresh)
+{
+ if (!bman_ccsr_map)
+ return -ENODEV;
+ if (bpid >= bman_pool_max)
+ return -EINVAL;
+ if (low_thresh && high_thresh) {
+ out_be32(bman_ccsr_map + POOL_HWDET(bpid),
+ __generate_thresh(low_thresh, 0));
+ out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
+ __generate_thresh(high_thresh, 1));
+ } else {
+ out_be32(bman_ccsr_map + POOL_HWDET(bpid),
+ __generate_thresh(BMAN_LOW_DEFAULT_THRESH, 0));
+ out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
+ __generate_thresh(BMAN_HIGH_DEFAULT_THRESH, 1));
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_priv.h b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_priv.h
new file mode 100644
index 00000000..5a3e330d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_priv.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __BMAN_PRIV_H
+#define __BMAN_PRIV_H
+
+#include "dpaa_sys.h"
+#include <fsl_bman.h>
+
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+
+#define BMAN_PORTAL_IRQ_PATH "/dev/fsl-usdpaa-irq"
+#define BMAN_CCSR_MAP "/dev/mem"
+
+/* This mask contains all the "irqsource" bits visible to API users */
+#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN)
+
+/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write
+ * the disable register" rather than "disable the ability to write".
+ */
+#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status)
+#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m)
+#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable)
+#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v)
+#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable)
+#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v)
+#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1)
+#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0)
+
+/*
+ * Global variables of the max portal/pool number this bman version supported
+ */
+extern u16 bman_pool_max;
+
+/* used by CCSR and portal interrupt code */
+enum bm_isr_reg {
+ bm_isr_status = 0,
+ bm_isr_enable = 1,
+ bm_isr_disable = 2,
+ bm_isr_inhibit = 3
+};
+
+struct bm_portal_config {
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ /* User-visible portal configuration settings */
+ /* This is used for any "core-affine" portals, ie. default portals
+ * associated to the corresponding cpu. -1 implies that there is no
+ * core affinity configured.
+ */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /* the unique index of this portal */
+ u32 index;
+ /* Is this portal shared? (If so, it has coarser locking and demuxes
+ * processing on behalf of other CPUs.).
+ */
+ int is_shared;
+ /* These are the buffer pool IDs that may be used via this portal. */
+ struct bman_depletion mask;
+
+};
+
+int bman_init_ccsr(const struct device_node *node);
+
+struct bman_portal *bman_create_affine_portal(
+ const struct bm_portal_config *config);
+const struct bm_portal_config *bman_destroy_affine_portal(void);
+
+/* Set depletion thresholds associated with a buffer pool. Requires that the
+ * operating system have access to Bman CCSR (ie. compiled in support and
+ * run-time access courtesy of the device-tree).
+ */
+int bm_pool_set(u32 bpid, const u32 *thresholds);
+
+/* Read the free buffer count for a given buffer */
+u32 bm_pool_free_buffers(u32 bpid);
+
+#endif /* __BMAN_PRIV_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_alloc.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_alloc.c
new file mode 100644
index 00000000..a05803c2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_alloc.c
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2009-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include "dpaa_sys.h"
+#include <process.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+
+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return process_alloc(dpaa_id_bpid, result, count, align, partial);
+}
+
+void bman_release_bpid_range(u32 bpid, u32 count)
+{
+ process_release(dpaa_id_bpid, bpid, count);
+}
+
+int bman_reserve_bpid_range(u32 bpid, u32 count)
+{
+ return process_reserve(dpaa_id_bpid, bpid, count);
+}
+
+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return process_alloc(dpaa_id_fqid, result, count, align, partial);
+}
+
+void qman_release_fqid_range(u32 fqid, u32 count)
+{
+ process_release(dpaa_id_fqid, fqid, count);
+}
+
+int qman_reserve_fqid_range(u32 fqid, unsigned int count)
+{
+ return process_reserve(dpaa_id_fqid, fqid, count);
+}
+
+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return process_alloc(dpaa_id_qpool, result, count, align, partial);
+}
+
+void qman_release_pool_range(u32 pool, u32 count)
+{
+ process_release(dpaa_id_qpool, pool, count);
+}
+
+int qman_reserve_pool_range(u32 pool, u32 count)
+{
+ return process_reserve(dpaa_id_qpool, pool, count);
+}
+
+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return process_alloc(dpaa_id_cgrid, result, count, align, partial);
+}
+
+void qman_release_cgrid_range(u32 cgrid, u32 count)
+{
+ process_release(dpaa_id_cgrid, cgrid, count);
+}
+
+int qman_reserve_cgrid_range(u32 cgrid, u32 count)
+{
+ return process_reserve(dpaa_id_cgrid, cgrid, count);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.c
new file mode 100644
index 00000000..9d6bfd40
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.c
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <process.h>
+#include "dpaa_sys.h"
+
+struct process_interrupt {
+ int irq;
+ irqreturn_t (*isr)(int irq, void *arg);
+ unsigned long flags;
+ const char *name;
+ void *arg;
+ struct list_head node;
+};
+
+static COMPAT_LIST_HEAD(process_irq_list);
+static pthread_mutex_t process_irq_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static void process_interrupt_install(struct process_interrupt *irq)
+{
+ int ret;
+ /* Add the irq to the end of the list */
+ ret = pthread_mutex_lock(&process_irq_lock);
+ assert(!ret);
+ list_add_tail(&irq->node, &process_irq_list);
+ ret = pthread_mutex_unlock(&process_irq_lock);
+ assert(!ret);
+}
+
+static void process_interrupt_remove(struct process_interrupt *irq)
+{
+ int ret;
+
+ ret = pthread_mutex_lock(&process_irq_lock);
+ assert(!ret);
+ list_del(&irq->node);
+ ret = pthread_mutex_unlock(&process_irq_lock);
+ assert(!ret);
+}
+
+static struct process_interrupt *process_interrupt_find(int irq_num)
+{
+ int ret;
+ struct process_interrupt *i = NULL;
+
+ ret = pthread_mutex_lock(&process_irq_lock);
+ assert(!ret);
+ list_for_each_entry(i, &process_irq_list, node) {
+ if (i->irq == irq_num)
+ goto done;
+ }
+done:
+ ret = pthread_mutex_unlock(&process_irq_lock);
+ assert(!ret);
+ return i;
+}
+
+/* This is the interface from the platform-agnostic driver code to (de)register
+ * interrupt handlers. We simply create/destroy corresponding structs.
+ */
+int qbman_request_irq(int irq, irqreturn_t (*isr)(int irq, void *arg),
+ unsigned long flags, const char *name,
+ void *arg __maybe_unused)
+{
+ struct process_interrupt *irq_node =
+ kmalloc(sizeof(*irq_node), GFP_KERNEL);
+
+ if (!irq_node)
+ return -ENOMEM;
+ irq_node->irq = irq;
+ irq_node->isr = isr;
+ irq_node->flags = flags;
+ irq_node->name = name;
+ irq_node->arg = arg;
+ process_interrupt_install(irq_node);
+ return 0;
+}
+
+int qbman_free_irq(int irq, __maybe_unused void *arg)
+{
+ struct process_interrupt *irq_node = process_interrupt_find(irq);
+
+ if (!irq_node)
+ return -EINVAL;
+ process_interrupt_remove(irq_node);
+ kfree(irq_node);
+ return 0;
+}
+
+/* This is the interface from the platform-specific driver code to obtain
+ * interrupt handlers that have been registered.
+ */
+void qbman_invoke_irq(int irq)
+{
+ struct process_interrupt *irq_node = process_interrupt_find(irq);
+
+ if (irq_node)
+ irq_node->isr(irq, irq_node->arg);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.h b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.h
new file mode 100644
index 00000000..034991ba
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __DPAA_SYS_H
+#define __DPAA_SYS_H
+
+#include <of.h>
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPAA_PORTAL_CE 0
+#define DPAA_PORTAL_CI 1
+
+#define DPAA_ASSERT(x) RTE_ASSERT(x)
+
+/* This is the interface from the platform-agnostic driver code to (de)register
+ * interrupt handlers. We simply create/destroy corresponding structs.
+ */
+int qbman_request_irq(int irq, irqreturn_t (*isr)(int irq, void *arg),
+ unsigned long flags, const char *name, void *arg);
+int qbman_free_irq(int irq, void *arg);
+
+void qbman_invoke_irq(int irq);
+
+#endif /* __DPAA_SYS_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/process.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/process.c
new file mode 100644
index 00000000..2c23c98d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/process.c
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2011-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+#include <assert.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+
+#include "process.h"
+
+#include <fsl_usd.h>
+
+/* As higher-level drivers will be built on top of this (dma_mem, qbman, ...),
+ * it's preferable that the process driver itself not provide any exported API.
+ * As such, combined with the fact that none of these operations are
+ * performance critical, it is justified to use lazy initialisation, so that's
+ * what the lock is for.
+ */
+static int fd = -1;
+static pthread_mutex_t fd_init_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static int check_fd(void)
+{
+ int ret;
+
+ if (fd >= 0)
+ return 0;
+ ret = pthread_mutex_lock(&fd_init_lock);
+ assert(!ret);
+ /* check again with the lock held */
+ if (fd < 0)
+ fd = open(PROCESS_PATH, O_RDWR);
+ ret = pthread_mutex_unlock(&fd_init_lock);
+ assert(!ret);
+ return (fd >= 0) ? 0 : -ENODEV;
+}
+
+#define DPAA_IOCTL_MAGIC 'u'
+struct dpaa_ioctl_id_alloc {
+ uint32_t base; /* Return value, the start of the allocated range */
+ enum dpaa_id_type id_type; /* what kind of resource(s) to allocate */
+ uint32_t num; /* how many IDs to allocate (and return value) */
+ uint32_t align; /* must be a power of 2, 0 is treated like 1 */
+ int partial; /* whether to allow less than 'num' */
+};
+
+struct dpaa_ioctl_id_release {
+ /* Input; */
+ enum dpaa_id_type id_type;
+ uint32_t base;
+ uint32_t num;
+};
+
+struct dpaa_ioctl_id_reserve {
+ enum dpaa_id_type id_type;
+ uint32_t base;
+ uint32_t num;
+};
+
+#define DPAA_IOCTL_ID_ALLOC \
+ _IOWR(DPAA_IOCTL_MAGIC, 0x01, struct dpaa_ioctl_id_alloc)
+#define DPAA_IOCTL_ID_RELEASE \
+ _IOW(DPAA_IOCTL_MAGIC, 0x02, struct dpaa_ioctl_id_release)
+#define DPAA_IOCTL_ID_RESERVE \
+ _IOW(DPAA_IOCTL_MAGIC, 0x0A, struct dpaa_ioctl_id_reserve)
+
+int process_alloc(enum dpaa_id_type id_type, uint32_t *base, uint32_t num,
+ uint32_t align, int partial)
+{
+ struct dpaa_ioctl_id_alloc id = {
+ .id_type = id_type,
+ .num = num,
+ .align = align,
+ .partial = partial
+ };
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+ ret = ioctl(fd, DPAA_IOCTL_ID_ALLOC, &id);
+ if (ret)
+ return ret;
+ for (ret = 0; ret < (int)id.num; ret++)
+ base[ret] = id.base + ret;
+ return id.num;
+}
+
+void process_release(enum dpaa_id_type id_type, uint32_t base, uint32_t num)
+{
+ struct dpaa_ioctl_id_release id = {
+ .id_type = id_type,
+ .base = base,
+ .num = num
+ };
+ int ret = check_fd();
+
+ if (ret) {
+ fprintf(stderr, "Process FD failure\n");
+ return;
+ }
+ ret = ioctl(fd, DPAA_IOCTL_ID_RELEASE, &id);
+ if (ret)
+ fprintf(stderr, "Process FD ioctl failure type %d base 0x%x num %d\n",
+ id_type, base, num);
+}
+
+int process_reserve(enum dpaa_id_type id_type, uint32_t base, uint32_t num)
+{
+ struct dpaa_ioctl_id_reserve id = {
+ .id_type = id_type,
+ .base = base,
+ .num = num
+ };
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+ return ioctl(fd, DPAA_IOCTL_ID_RESERVE, &id);
+}
+
+/***************************************/
+/* Mapping and using QMan/BMan portals */
+/***************************************/
+
+#define DPAA_IOCTL_PORTAL_MAP \
+ _IOWR(DPAA_IOCTL_MAGIC, 0x07, struct dpaa_ioctl_portal_map)
+#define DPAA_IOCTL_PORTAL_UNMAP \
+ _IOW(DPAA_IOCTL_MAGIC, 0x08, struct dpaa_portal_map)
+
+int process_portal_map(struct dpaa_ioctl_portal_map *params)
+{
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+
+ ret = ioctl(fd, DPAA_IOCTL_PORTAL_MAP, params);
+ if (ret) {
+ perror("ioctl(DPAA_IOCTL_PORTAL_MAP)");
+ return ret;
+ }
+ return 0;
+}
+
+int process_portal_unmap(struct dpaa_portal_map *map)
+{
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+
+ ret = ioctl(fd, DPAA_IOCTL_PORTAL_UNMAP, map);
+ if (ret) {
+ perror("ioctl(DPAA_IOCTL_PORTAL_UNMAP)");
+ return ret;
+ }
+ return 0;
+}
+
+#define DPAA_IOCTL_PORTAL_IRQ_MAP \
+ _IOW(DPAA_IOCTL_MAGIC, 0x09, struct dpaa_ioctl_irq_map)
+
+int process_portal_irq_map(int ifd, struct dpaa_ioctl_irq_map *map)
+{
+ map->fd = fd;
+ return ioctl(ifd, DPAA_IOCTL_PORTAL_IRQ_MAP, map);
+}
+
+int process_portal_irq_unmap(int ifd)
+{
+ return close(ifd);
+}
+
+struct dpaa_ioctl_raw_portal {
+ /* inputs */
+ enum dpaa_portal_type type; /* Type of portal to allocate */
+
+ uint8_t enable_stash; /* set to non zero to turn on stashing */
+ /* Stashing attributes for the portal */
+ uint32_t cpu;
+ uint32_t cache;
+ uint32_t window;
+ /* Specifies the stash request queue this portal should use */
+ uint8_t sdest;
+
+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ * for don't care. The portal index will be populated by the
+ * driver when the ioctl() successfully completes.
+ */
+ uint32_t index;
+
+ /* outputs */
+ uint64_t cinh;
+ uint64_t cena;
+};
+
+#define DPAA_IOCTL_ALLOC_RAW_PORTAL \
+ _IOWR(DPAA_IOCTL_MAGIC, 0x0C, struct dpaa_ioctl_raw_portal)
+
+#define DPAA_IOCTL_FREE_RAW_PORTAL \
+ _IOR(DPAA_IOCTL_MAGIC, 0x0D, struct dpaa_ioctl_raw_portal)
+
+static int process_portal_allocate(struct dpaa_ioctl_raw_portal *portal)
+{
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+
+ ret = ioctl(fd, DPAA_IOCTL_ALLOC_RAW_PORTAL, portal);
+ if (ret) {
+ perror("ioctl(DPAA_IOCTL_ALLOC_RAW_PORTAL)");
+ return ret;
+ }
+ return 0;
+}
+
+static int process_portal_free(struct dpaa_ioctl_raw_portal *portal)
+{
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+
+ ret = ioctl(fd, DPAA_IOCTL_FREE_RAW_PORTAL, portal);
+ if (ret) {
+ perror("ioctl(DPAA_IOCTL_FREE_RAW_PORTAL)");
+ return ret;
+ }
+ return 0;
+}
+
+int qman_allocate_raw_portal(struct dpaa_raw_portal *portal)
+{
+ struct dpaa_ioctl_raw_portal input;
+ int ret;
+
+ input.type = dpaa_portal_qman;
+ input.index = portal->index;
+ input.enable_stash = portal->enable_stash;
+ input.cpu = portal->cpu;
+ input.cache = portal->cache;
+ input.window = portal->window;
+ input.sdest = portal->sdest;
+
+ ret = process_portal_allocate(&input);
+ if (ret)
+ return ret;
+ portal->index = input.index;
+ portal->cinh = input.cinh;
+ portal->cena = input.cena;
+ return 0;
+}
+
+int qman_free_raw_portal(struct dpaa_raw_portal *portal)
+{
+ struct dpaa_ioctl_raw_portal input;
+
+ input.type = dpaa_portal_qman;
+ input.index = portal->index;
+ input.cinh = portal->cinh;
+ input.cena = portal->cena;
+
+ return process_portal_free(&input);
+}
+
+int bman_allocate_raw_portal(struct dpaa_raw_portal *portal)
+{
+ struct dpaa_ioctl_raw_portal input;
+ int ret;
+
+ input.type = dpaa_portal_bman;
+ input.index = portal->index;
+ input.enable_stash = 0;
+
+ ret = process_portal_allocate(&input);
+ if (ret)
+ return ret;
+ portal->index = input.index;
+ portal->cinh = input.cinh;
+ portal->cena = input.cena;
+ return 0;
+}
+
+int bman_free_raw_portal(struct dpaa_raw_portal *portal)
+{
+ struct dpaa_ioctl_raw_portal input;
+
+ input.type = dpaa_portal_bman;
+ input.index = portal->index;
+ input.cinh = portal->cinh;
+ input.cena = portal->cena;
+
+ return process_portal_free(&input);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.c
new file mode 100644
index 00000000..7c17027f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.c
@@ -0,0 +1,2755 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include "qman.h"
+#include <rte_branch_prediction.h>
+#include <rte_dpaa_bus.h>
+#include <rte_eventdev.h>
+#include <rte_byteorder.h>
+
+/* Compilation constants */
+#define DQRR_MAXFILL 15
+#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
+#define IRQNAME "QMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
+/* maximum number of DQRR entries to process in qman_poll() */
+#define FSL_QMAN_POLL_LIMIT 8
+
+/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
+ * inter-processor locking only. Note, FQLOCK() is always called either under a
+ * local_irq_save() or from interrupt context - hence there's no need for irq
+ * protection (and indeed, attempting to nest irq-protection doesn't work, as
+ * the "irq en/disable" machinery isn't recursive...).
+ */
+#define FQLOCK(fq) \
+ do { \
+ struct qman_fq *__fq478 = (fq); \
+ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
+ spin_lock(&__fq478->fqlock); \
+ } while (0)
+#define FQUNLOCK(fq) \
+ do { \
+ struct qman_fq *__fq478 = (fq); \
+ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
+ spin_unlock(&__fq478->fqlock); \
+ } while (0)
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+ dpaa_set_bits(mask, &fq->flags);
+}
+
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+ dpaa_clear_bits(mask, &fq->flags);
+}
+
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+ return fq->flags & mask;
+}
+
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+ return !(fq->flags & mask);
+}
+
+struct qman_portal {
+ struct qm_portal p;
+ /* PORTAL_BITS_*** - dynamic, strictly internal */
+ unsigned long bits;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ u32 use_eqcr_ci_stashing;
+ u32 slowpoll; /* only used when interrupts are off */
+ /* only 1 volatile dequeue at a time */
+ struct qman_fq *vdqcr_owned;
+ u32 sdqcr;
+ int dqrr_disable_ref;
+ /* A portal-specific handler for DCP ERNs. If this is NULL, the global
+ * handler is called instead.
+ */
+ qman_cb_dc_ern cb_dc_ern;
+ /* When the cpu-affine portal is activated, this is non-NULL */
+ const struct qm_portal_config *config;
+ struct dpa_rbtree retire_table;
+ char irqname[MAX_IRQNAME];
+ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+ struct qman_cgrs *cgrs;
+ /* linked-list of CSCN handlers. */
+ struct list_head cgr_cbs;
+ /* list lock */
+ spinlock_t cgr_lock;
+ /* track if memory was allocated by the driver */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
+ * do byte swaps of DQRR read only memory. First entry must be aligned
+ * to 2 ** 10 to ensure DQRR index calculations based shadow copy
+ * address (6 bits for address shift + 4 bits for the DQRR size).
+ */
+ struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE]
+ __attribute__((aligned(1024)));
+#endif
+};
+
+/* Global handler for DCP ERNs. Used when the portal receiving the message does
+ * not have a portal-specific handler.
+ */
+static qman_cb_dc_ern cb_dc_ern;
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+static RTE_DEFINE_PER_LCORE(struct qman_portal, qman_affine_portal);
+
+static inline struct qman_portal *get_affine_portal(void)
+{
+ return &RTE_PER_LCORE(qman_affine_portal);
+}
+
+/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
+ * retirement notifications (the fact they are sometimes h/w-consumed means that
+ * contextB isn't always a s/w demux - and as we can't know which case it is
+ * when looking at the notification, we have to use the slow lookup for all of
+ * them). NB, it's possible to have multiple FQ objects refer to the same FQID
+ * (though at most one of them should be the consumer), so this table isn't for
+ * all FQs - FQs are added when retirement commands are issued, and removed when
+ * they complete, which also massively reduces the size of this table.
+ */
+IMPLEMENT_DPAA_RBTREE(fqtree, struct qman_fq, node, fqid);
+/*
+ * This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
+{
+ int ret = fqtree_push(&p->retire_table, fq);
+
+ if (ret)
+ pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
+ return ret;
+}
+
+static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
+{
+ fqtree_del(&p->retire_table, fq);
+}
+
+static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
+{
+ return fqtree_find(&p->retire_table, fqid);
+}
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+static void **qman_fq_lookup_table;
+static size_t qman_fq_lookup_table_size;
+
+int qman_setup_fq_lookup_table(size_t num_entries)
+{
+ num_entries++;
+ /* Allocate 1 more entry since the first entry is not used */
+ qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *)));
+ if (!qman_fq_lookup_table) {
+ pr_err("QMan: Could not allocate fq lookup table\n");
+ return -ENOMEM;
+ }
+ memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *));
+ qman_fq_lookup_table_size = num_entries;
+ pr_debug("QMan: Allocated lookup table at %p, entry count %lu\n",
+ qman_fq_lookup_table,
+ (unsigned long)qman_fq_lookup_table_size);
+ return 0;
+}
+
+/* global structure that maintains fq object mapping */
+static DEFINE_SPINLOCK(fq_hash_table_lock);
+
+static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
+{
+ u32 i;
+
+ spin_lock(&fq_hash_table_lock);
+ /* Can't use index zero because this has special meaning
+ * in context_b field.
+ */
+ for (i = 1; i < qman_fq_lookup_table_size; i++) {
+ if (qman_fq_lookup_table[i] == NULL) {
+ *entry = i;
+ qman_fq_lookup_table[i] = fq;
+ spin_unlock(&fq_hash_table_lock);
+ return 0;
+ }
+ }
+ spin_unlock(&fq_hash_table_lock);
+ return -ENOMEM;
+}
+
+static void clear_fq_table_entry(u32 entry)
+{
+ spin_lock(&fq_hash_table_lock);
+ DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
+ qman_fq_lookup_table[entry] = NULL;
+ spin_unlock(&fq_hash_table_lock);
+}
+
+static inline struct qman_fq *get_fq_table_entry(u32 entry)
+{
+ DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
+ return qman_fq_lookup_table[entry];
+}
+#endif
+
+static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
+{
+ /* Byteswap the FQD to HW format */
+ fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
+ fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
+ fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
+ fqd->context_b = cpu_to_be32(fqd->context_b);
+ fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
+ fqd->opaque_td = cpu_to_be16(fqd->opaque_td);
+}
+
+static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
+{
+ /* Byteswap the FQD to CPU format */
+ fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
+ fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
+ fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
+ fqd->context_b = be32_to_cpu(fqd->context_b);
+ fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
+}
+
+static inline void cpu_to_hw_fd(struct qm_fd *fd)
+{
+ fd->addr = cpu_to_be40(fd->addr);
+ fd->status = cpu_to_be32(fd->status);
+ fd->opaque = cpu_to_be32(fd->opaque);
+}
+
+static inline void hw_fd_to_cpu(struct qm_fd *fd)
+{
+ fd->addr = be40_to_cpu(fd->addr);
+ fd->status = be32_to_cpu(fd->status);
+ fd->opaque = be32_to_cpu(fd->opaque);
+}
+
+/* In the case that slow- and fast-path handling are both done by qman_poll()
+ * (ie. because there is no interrupt handling), we ought to balance how often
+ * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
+ * sources, so we call the fast poll 'n' times before calling the slow poll
+ * once. The idle decrementer constant is used when the last slow-poll detected
+ * no work to do, and the busy decrementer constant when the last slow-poll had
+ * work to do.
+ */
+#define SLOW_POLL_IDLE 1000
+#define SLOW_POLL_BUSY 10
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit);
+
+/* Portal interrupt handler */
+static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
+{
+ struct qman_portal *p = ptr;
+ /*
+ * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
+ * it could race against a Query Congestion State command also given
+ * as part of the handling of this interrupt source. We mustn't
+ * clear it a second time in this top-level function.
+ */
+ u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
+ ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
+ u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
+ /* DQRR-handling if it's interrupt-driven */
+ if (is & QM_PIRQ_DQRI)
+ __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
+ /* Handling of anything else that's interrupt-driven */
+ clear |= __poll_portal_slow(p, is);
+ qm_isr_status_clear(&p->p, clear);
+ return IRQ_HANDLED;
+}
+
+/* This inner version is used privately by qman_create_affine_portal(), as well
+ * as by the exported qman_stop_dequeues().
+ */
+static inline void qman_stop_dequeues_ex(struct qman_portal *p)
+{
+ if (!(p->dqrr_disable_ref++))
+ qm_dqrr_set_maxfill(&p->p, 0);
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+ const struct qm_mr_entry *msg;
+loop:
+ msg = qm_mr_current(p);
+ if (!msg) {
+ /*
+ * if MR was full and h/w had other FQRNI entries to produce, we
+ * need to allow it time to produce those entries once the
+ * existing entries are consumed. A worst-case situation
+ * (fully-loaded system) means h/w sequencers may have to do 3-4
+ * other things before servicing the portal's MR pump, each of
+ * which (if slow) may take ~50 qman cycles (which is ~200
+ * processor cycles). So rounding up and then multiplying this
+ * worst-case estimate by a factor of 10, just to be
+ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+ * one entry at a time, so h/w has an opportunity to produce new
+ * entries well before the ring has been fully consumed, so
+ * we're being *really* paranoid here.
+ */
+ u64 now, then = mfatb();
+
+ do {
+ now = mfatb();
+ } while ((then + 10000) > now);
+ msg = qm_mr_current(p);
+ if (!msg)
+ return 0;
+ }
+ if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+ /* We aren't draining anything but FQRNIs */
+ pr_err("Found verb 0x%x in MR\n", msg->ern.verb);
+ return -1;
+ }
+ qm_mr_next(p);
+ qm_mr_cci_consume(p, 1);
+ goto loop;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+ enum qm_eqcr_pmode pmode,
+ unsigned int eq_stash_thresh,
+ int eq_stash_prio)
+{
+ /* This use of 'register', as well as all other occurrences, is because
+ * it has been observed to generate much faster code with gcc than is
+ * otherwise the case.
+ */
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u32 cfg;
+ u8 pi;
+
+ eqcr->ring = portal->addr.ce + QM_CL_EQCR;
+ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(EQCR_CI);
+ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ eqcr->cursor = eqcr->ring + pi;
+ eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+ QM_EQCR_VERB_VBIT : 0;
+ eqcr->available = QM_EQCR_SIZE - 1 -
+ qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+ eqcr->ithresh = qm_in(EQCR_ITR);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+ eqcr->pmode = pmode;
+#endif
+ cfg = (qm_in(CFG) & 0x00ffffff) |
+ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+ (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+ qm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 pi, ci;
+ u32 cfg;
+
+ /*
+ * Disable EQCI stashing because the QMan only
+ * presents the value it previously stashed to
+ * maintain coherency. Setting the stash threshold
+ * to 1 then 0 ensures that QMan has resyncronized
+ * its internal copy so that the portal is clean
+ * when it is reinitialized in the future
+ */
+ cfg = (qm_in(CFG) & 0x0fffffff) |
+ (1 << 28); /* QCSP_CFG: EST */
+ qm_out(CFG, cfg);
+ cfg &= 0x0fffffff; /* stash threshold = 0 */
+ qm_out(CFG, cfg);
+
+ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+ /* Refresh EQCR CI cache value */
+ qm_cl_invalidate(EQCR_CI);
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(!eqcr->busy);
+#endif
+ if (pi != EQCR_PTR2IDX(eqcr->cursor))
+ pr_crit("losing uncommitted EQCR entries\n");
+ if (ci != eqcr->ci)
+ pr_crit("missing existing EQCR completions\n");
+ if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
+ pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+ __maybe_unused const struct qm_portal_config *config,
+ enum qm_dqrr_dmode dmode,
+ __maybe_unused enum qm_dqrr_pmode pmode,
+ enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u32 cfg;
+
+ /* Make sure the DQRR will be idle when we enable */
+ qm_out(DQRR_SDQCR, 0);
+ qm_out(DQRR_VDQCR, 0);
+ qm_out(DQRR_PDQCR, 0);
+ dqrr->ring = portal->addr.ce + QM_CL_DQRR;
+ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->cursor = dqrr->ring + dqrr->ci;
+ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+ dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+ QM_DQRR_VERB_VBIT : 0;
+ dqrr->ithresh = qm_in(DQRR_ITR);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ dqrr->dmode = dmode;
+ dqrr->pmode = pmode;
+ dqrr->cmode = cmode;
+#endif
+ /* Invalidate every ring entry before beginning */
+ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+ dccivac(qm_cl(dqrr->ring, cfg));
+ cfg = (qm_in(CFG) & 0xff000f00) |
+ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+ ((dmode & 1) << 18) | /* DP */
+ ((cmode & 3) << 16) | /* DCM */
+ 0xa0 | /* RE+SE */
+ (0 ? 0x40 : 0) | /* Ignore RP */
+ (0 ? 0x10 : 0); /* Ignore SP */
+ qm_out(CFG, cfg);
+ qm_dqrr_set_maxfill(portal, max_fill);
+ return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if ((dqrr->cmode != qm_dqrr_cdc) &&
+ (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
+ pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline int qm_mr_init(struct qm_portal *portal,
+ __maybe_unused enum qm_mr_pmode pmode,
+ enum qm_mr_cmode cmode)
+{
+ register struct qm_mr *mr = &portal->mr;
+ u32 cfg;
+
+ mr->ring = portal->addr.ce + QM_CL_MR;
+ mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
+ mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
+ mr->cursor = mr->ring + mr->ci;
+ mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+ mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
+ mr->ithresh = qm_in(MR_ITR);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mr->pmode = pmode;
+ mr->cmode = cmode;
+#endif
+ cfg = (qm_in(CFG) & 0xfffff0ff) |
+ ((cmode & 1) << 8); /* QCSP_CFG:MM */
+ qm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mr->pmode == qm_mr_pvb);
+#endif
+ /* when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads".
+ */
+ if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+ if (!mr->pi)
+ mr->vbit ^= QM_MR_VERB_VBIT;
+ mr->fill++;
+ res = MR_INC(res);
+ }
+ dcbit_ro(res);
+}
+
+static inline
+struct qman_portal *qman_create_portal(
+ struct qman_portal *portal,
+ const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qm_portal *p;
+ char buf[16];
+ int ret;
+ u32 isdr;
+
+ p = &portal->p;
+
+ if (dpaa_svr_family == SVR_LS1043A_FAMILY)
+ portal->use_eqcr_ci_stashing = 3;
+ else
+ portal->use_eqcr_ci_stashing =
+ ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference
+ */
+ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+ p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ /*
+ * If CI-stashing is used, the current defaults use a threshold of 3,
+ * and stash with high-than-DQRR priority.
+ */
+ if (qm_eqcr_init(p, qm_eqcr_pvb,
+ portal->use_eqcr_ci_stashing, 1)) {
+ pr_err("Qman EQCR initialisation failed\n");
+ goto fail_eqcr;
+ }
+ if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
+ qm_dqrr_cdc, DQRR_MAXFILL)) {
+ pr_err("Qman DQRR initialisation failed\n");
+ goto fail_dqrr;
+ }
+ if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
+ pr_err("Qman MR initialisation failed\n");
+ goto fail_mr;
+ }
+ if (qm_mc_init(p)) {
+ pr_err("Qman MC initialisation failed\n");
+ goto fail_mc;
+ }
+
+ /* static interrupt-gating controls */
+ qm_dqrr_set_ithresh(p, 0);
+ qm_mr_set_ithresh(p, 0);
+ qm_isr_set_iperiod(p, 0);
+ portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
+ if (!portal->cgrs)
+ goto fail_cgrs;
+ /* initial snapshot is no-depletion */
+ qman_cgrs_init(&portal->cgrs[1]);
+ if (cgrs)
+ portal->cgrs[0] = *cgrs;
+ else
+ /* if the given mask is NULL, assume all CGRs can be seen */
+ qman_cgrs_fill(&portal->cgrs[0]);
+ INIT_LIST_HEAD(&portal->cgr_cbs);
+ spin_lock_init(&portal->cgr_lock);
+ portal->bits = 0;
+ portal->slowpoll = 0;
+ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+ portal->dqrr_disable_ref = 0;
+ portal->cb_dc_ern = NULL;
+ sprintf(buf, "qportal-%d", c->channel);
+ dpa_rbtree_init(&portal->retire_table);
+ isdr = 0xffffffff;
+ qm_isr_disable_write(p, isdr);
+ portal->irq_sources = 0;
+ qm_isr_enable_write(p, portal->irq_sources);
+ qm_isr_status_clear(p, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname,
+ portal)) {
+ pr_err("request_irq() failed\n");
+ goto fail_irq;
+ }
+
+ /* Need EQCR to be empty before continuing */
+ isdr &= ~QM_PIRQ_EQCI;
+ qm_isr_disable_write(p, isdr);
+ ret = qm_eqcr_get_fill(p);
+ if (ret) {
+ pr_err("Qman EQCR unclean\n");
+ goto fail_eqcr_empty;
+ }
+ isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
+ qm_isr_disable_write(p, isdr);
+ if (qm_dqrr_current(p)) {
+ pr_err("Qman DQRR unclean\n");
+ qm_dqrr_cdc_consume_n(p, 0xffff);
+ }
+ if (qm_mr_current(p) && drain_mr_fqrni(p)) {
+ /* special handling, drain just in case it's a few FQRNIs */
+ if (drain_mr_fqrni(p))
+ goto fail_dqrr_mr_empty;
+ }
+ /* Success */
+ portal->config = c;
+ qm_isr_disable_write(p, 0);
+ qm_isr_uninhibit(p);
+ /* Write a sane SDQCR */
+ qm_dqrr_sdqcr_set(p, portal->sdqcr);
+ return portal;
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+ free_irq(c->irq, portal);
+fail_irq:
+ kfree(portal->cgrs);
+ spin_lock_destroy(&portal->cgr_lock);
+fail_cgrs:
+ qm_mc_finish(p);
+fail_mc:
+ qm_mr_finish(p);
+fail_mr:
+ qm_dqrr_finish(p);
+fail_dqrr:
+ qm_eqcr_finish(p);
+fail_eqcr:
+ return NULL;
+}
+
+#define MAX_GLOBAL_PORTALS 8
+static struct qman_portal global_portals[MAX_GLOBAL_PORTALS];
+static rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS];
+
+static struct qman_portal *
+qman_alloc_global_portal(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
+ if (rte_atomic16_test_and_set(&global_portals_used[i]))
+ return &global_portals[i];
+ }
+ pr_err("No portal available (%x)\n", MAX_GLOBAL_PORTALS);
+
+ return NULL;
+}
+
+static int
+qman_free_global_portal(struct qman_portal *portal)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
+ if (&global_portals[i] == portal) {
+ rte_atomic16_clear(&global_portals_used[i]);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs,
+ int alloc)
+{
+ struct qman_portal *res;
+ struct qman_portal *portal;
+
+ if (alloc)
+ portal = qman_alloc_global_portal();
+ else
+ portal = get_affine_portal();
+
+ /* A criteria for calling this function (from qman_driver.c) is that
+ * we're already affine to the cpu and won't schedule onto another cpu.
+ */
+
+ res = qman_create_portal(portal, c, cgrs);
+ if (res) {
+ spin_lock(&affine_mask_lock);
+ CPU_SET(c->cpu, &affine_mask);
+ affine_channels[c->cpu] =
+ c->channel;
+ spin_unlock(&affine_mask_lock);
+ }
+ return res;
+}
+
+static inline
+void qman_destroy_portal(struct qman_portal *qm)
+{
+ const struct qm_portal_config *pcfg;
+
+ /* Stop dequeues on the portal */
+ qm_dqrr_sdqcr_set(&qm->p, 0);
+
+ /*
+ * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+ * something related to QM_PIRQ_EQCI, this may need fixing.
+ * Also, due to the prefetching model used for CI updates in the enqueue
+ * path, this update will only invalidate the CI cacheline *after*
+ * working on it, so we need to call this twice to ensure a full update
+ * irrespective of where the enqueue processing was at when the teardown
+ * began.
+ */
+ qm_eqcr_cce_update(&qm->p);
+ qm_eqcr_cce_update(&qm->p);
+ pcfg = qm->config;
+
+ free_irq(pcfg->irq, qm);
+
+ kfree(qm->cgrs);
+ qm_mc_finish(&qm->p);
+ qm_mr_finish(&qm->p);
+ qm_dqrr_finish(&qm->p);
+ qm_eqcr_finish(&qm->p);
+
+ qm->config = NULL;
+
+ spin_lock_destroy(&qm->cgr_lock);
+}
+
+const struct qm_portal_config *
+qman_destroy_affine_portal(struct qman_portal *qp)
+{
+ /* We don't want to redirect if we're a slave, use "raw" */
+ struct qman_portal *qm;
+ const struct qm_portal_config *pcfg;
+ int cpu;
+
+ if (qp == NULL)
+ qm = get_affine_portal();
+ else
+ qm = qp;
+ pcfg = qm->config;
+ cpu = pcfg->cpu;
+
+ qman_destroy_portal(qm);
+
+ spin_lock(&affine_mask_lock);
+ CPU_CLR(cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+
+ qman_free_global_portal(qm);
+
+ return pcfg;
+}
+
+int qman_get_portal_index(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ return p->config->index;
+}
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_mr_entry *msg, u8 verb)
+{
+ FQLOCK(fq);
+ switch (verb) {
+ case QM_MR_VERB_FQRL:
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+ fq_clear(fq, QMAN_FQ_STATE_ORL);
+ table_del_fq(p, fq);
+ break;
+ case QM_MR_VERB_FQRN:
+ DPAA_ASSERT((fq->state == qman_fq_state_parked) ||
+ (fq->state == qman_fq_state_sched));
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+ fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ else
+ table_del_fq(p, fq);
+ fq->state = qman_fq_state_retired;
+ break;
+ case QM_MR_VERB_FQPN:
+ DPAA_ASSERT(fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+ fq->state = qman_fq_state_parked;
+ }
+ FQUNLOCK(fq);
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+ const struct qm_mr_entry *msg;
+ struct qm_mr_entry swapped_msg;
+
+ if (is & QM_PIRQ_CSCI) {
+ struct qman_cgrs rr, c;
+ struct qm_mc_result *mcr;
+ struct qman_cgr *cgr;
+
+ spin_lock(&p->cgr_lock);
+ /*
+ * The CSCI bit must be cleared _before_ issuing the
+ * Query Congestion State command, to ensure that a long
+ * CGR State Change callback cannot miss an intervening
+ * state change.
+ */
+ qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ /* mask out the ones I'm not interested in */
+ qman_cgrs_and(&rr, (const struct qman_cgrs *)
+ &mcr->querycongestion.state, &p->cgrs[0]);
+ /* check previous snapshot for delta, enter/exit congestion */
+ qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+ /* update snapshot */
+ qman_cgrs_cp(&p->cgrs[1], &rr);
+ /* Invoke callback */
+ list_for_each_entry(cgr, &p->cgr_cbs, node)
+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+ spin_unlock(&p->cgr_lock);
+ }
+
+ if (is & QM_PIRQ_EQRI) {
+ qm_eqcr_cce_update(&p->p);
+ qm_eqcr_set_ithresh(&p->p, 0);
+ wake_up(&affine_queue);
+ }
+
+ if (is & QM_PIRQ_MRI) {
+ struct qman_fq *fq;
+ u8 verb, num = 0;
+mr_loop:
+ qm_mr_pvb_update(&p->p);
+ msg = qm_mr_current(&p->p);
+ if (!msg)
+ goto mr_done;
+ swapped_msg = *msg;
+ hw_fd_to_cpu(&swapped_msg.ern.fd);
+ verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK;
+ /* The message is a software ERN iff the 0x20 bit is set */
+ if (verb & 0x20) {
+ switch (verb) {
+ case QM_MR_VERB_FQRNI:
+ /* nada, we drop FQRNIs on the floor */
+ break;
+ case QM_MR_VERB_FQRN:
+ case QM_MR_VERB_FQRL:
+ /* Lookup in the retirement table */
+ fq = table_find_fq(p,
+ be32_to_cpu(msg->fq.fqid));
+ DPAA_BUG_ON(!fq);
+ fq_state_change(p, fq, &swapped_msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, &swapped_msg);
+ break;
+ case QM_MR_VERB_FQPN:
+ /* Parked */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(
+ be32_to_cpu(msg->fq.contextB));
+#else
+ fq = (void *)(uintptr_t)
+ be32_to_cpu(msg->fq.contextB);
+#endif
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, &swapped_msg);
+ break;
+ case QM_MR_VERB_DC_ERN:
+ /* DCP ERN */
+ if (p->cb_dc_ern)
+ p->cb_dc_ern(p, msg);
+ else if (cb_dc_ern)
+ cb_dc_ern(p, msg);
+ else {
+ static int warn_once;
+
+ if (!warn_once) {
+ pr_crit("Leaking DCP ERNs!\n");
+ warn_once = 1;
+ }
+ }
+ break;
+ default:
+ pr_crit("Invalid MR verb 0x%02x\n", verb);
+ }
+ } else {
+ /* Its a software ERN */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
+#else
+ fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
+#endif
+ fq->cb.ern(p, fq, &swapped_msg);
+ }
+ num++;
+ qm_mr_next(&p->p);
+ goto mr_loop;
+mr_done:
+ qm_mr_cci_consume(&p->p, num);
+ }
+ /*
+ * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
+ * processing. If that interrupt source has meanwhile been re-asserted,
+ * we mustn't clear it here (or in the top-level interrupt handler).
+ */
+ return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
+}
+
+/*
+ * remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined.
+ */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+ p->vdqcr_owned = NULL;
+ FQLOCK(fq);
+ fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+ FQUNLOCK(fq);
+ wake_up(&affine_queue);
+}
+
+/*
+ * The only states that would conflict with other things if they ran at the
+ * same time on the same cpu are:
+ *
+ * (i) setting/clearing vdqcr_owned, and
+ * (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ * (i) this clearing can only occur after qman_set_vdq() has set the
+ * vdqcr_owned field (which it does before setting VDQCR), and
+ * qman_volatile_dequeue() blocks interrupts and preemption while this is
+ * done so that we can't interfere.
+ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ * with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_set_vdq() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API.
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ struct qm_dqrr_entry *shadow;
+#endif
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (unlikely(!dq))
+ break;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* If running on an LE system the fields of the
+ * dequeue entry must be swapper. Because the
+ * QMan HW will ignore writes the DQRR entry is
+ * copied and the index stored within the copy
+ */
+ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+ *shadow = *dq;
+ dq = shadow;
+ shadow->fqid = be32_to_cpu(shadow->fqid);
+ shadow->contextB = be32_to_cpu(shadow->contextB);
+ shadow->seqnum = be16_to_cpu(shadow->seqnum);
+ hw_fd_to_cpu(&shadow->fd);
+#endif
+
+ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+ /*
+ * VDQCR: don't trust context_b as the FQ may have
+ * been configured for h/w consumption and we're
+ * draining it post-retirement.
+ */
+ fq = p->vdqcr_owned;
+ /*
+ * We only set QMAN_FQ_STATE_NE when retiring, so we
+ * only need to check for clearing it when doing
+ * volatile dequeues. It's one less thing to check
+ * in the critical path (SDQCR).
+ */
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+ /*
+ * This is duplicated from the SDQCR code, but we
+ * have stuff to do before *and* after this callback,
+ * and we don't want multiple if()s in the critical
+ * path (SDQCR).
+ */
+ res = fq->cb.dqrr(p, fq, dq);
+ if (res == qman_cb_dqrr_stop)
+ break;
+ /* Check for VDQCR completion */
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+ } else {
+ /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(dq->contextB);
+#else
+ fq = (void *)(uintptr_t)dq->contextB;
+#endif
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr(p, fq, dq);
+ /*
+ * The callback can request that we exit without
+ * consuming this entry nor advancing;
+ */
+ if (res == qman_cb_dqrr_stop)
+ break;
+ }
+ /* Interpret 'dq' from a driver perspective. */
+ /*
+ * Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both.
+ */
+ DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ /* just means "skip it, I'll consume it myself later on" */
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+ res == qman_cb_dqrr_park);
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /*
+ * Entry processed and consumed, increment our counter. The
+ * callback can request that we exit after consuming the
+ * entry, and we also exit if we reach our processing limit,
+ * so loop back only if neither of these conditions is met.
+ */
+ } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
+
+ return limit;
+}
+
+u16 qman_affine_channel(int cpu)
+{
+ if (cpu < 0) {
+ struct qman_portal *portal = get_affine_portal();
+
+ cpu = portal->config->cpu;
+ }
+ DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask));
+ return affine_channels[cpu];
+}
+
+unsigned int qman_portal_poll_rx(unsigned int poll_limit,
+ void **bufs,
+ struct qman_portal *p)
+{
+ struct qm_portal *portal = &p->p;
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ struct qm_dqrr_entry *dq[QM_DQRR_SIZE], *shadow[QM_DQRR_SIZE];
+ struct qman_fq *fq;
+ unsigned int limit = 0, rx_number = 0;
+ uint32_t consume = 0;
+
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ if (!dqrr->fill)
+ break;
+
+ dq[rx_number] = dqrr->cursor;
+ dqrr->cursor = DQRR_CARRYCLEAR(dqrr->cursor + 1);
+ /* Prefetch the next DQRR entry */
+ rte_prefetch0(dqrr->cursor);
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* If running on an LE system the fields of the
+ * dequeue entry must be swapper. Because the
+ * QMan HW will ignore writes the DQRR entry is
+ * copied and the index stored within the copy
+ */
+ shadow[rx_number] =
+ &p->shadow_dqrr[DQRR_PTR2IDX(dq[rx_number])];
+ shadow[rx_number]->fd.opaque_addr =
+ dq[rx_number]->fd.opaque_addr;
+ shadow[rx_number]->fd.addr =
+ be40_to_cpu(dq[rx_number]->fd.addr);
+ shadow[rx_number]->fd.opaque =
+ be32_to_cpu(dq[rx_number]->fd.opaque);
+#else
+ shadow[rx_number] = dq[rx_number];
+#endif
+
+ /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = qman_fq_lookup_table[be32_to_cpu(dq[rx_number]->contextB)];
+#else
+ fq = (void *)be32_to_cpu(dq[rx_number]->contextB);
+#endif
+ if (fq->cb.dqrr_prepare)
+ fq->cb.dqrr_prepare(shadow[rx_number],
+ &bufs[rx_number]);
+
+ consume |= (1 << (31 - DQRR_PTR2IDX(shadow[rx_number])));
+ rx_number++;
+ --dqrr->fill;
+ } while (++limit < poll_limit);
+
+ if (rx_number)
+ fq->cb.dqrr_dpdk_pull_cb(&fq, shadow, bufs, rx_number);
+
+ /* Consume all the DQRR enries together */
+ qm_out(DQRR_DCAP, (1 << 8) | consume);
+
+ return rx_number;
+}
+
+u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
+ void **bufs)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+ struct qman_portal *p = get_affine_portal();
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ struct qm_dqrr_entry *shadow;
+#endif
+ unsigned int rx_number = 0;
+
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ break;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ /*
+ * If running on an LE system the fields of the
+ * dequeue entry must be swapper. Because the
+ * QMan HW will ignore writes the DQRR entry is
+ * copied and the index stored within the copy
+ */
+ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+ *shadow = *dq;
+ dq = shadow;
+ shadow->fqid = be32_to_cpu(shadow->fqid);
+ shadow->contextB = be32_to_cpu(shadow->contextB);
+ shadow->seqnum = be16_to_cpu(shadow->seqnum);
+ hw_fd_to_cpu(&shadow->fd);
+#endif
+
+ /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(dq->contextB);
+#else
+ fq = (void *)(uintptr_t)dq->contextB;
+#endif
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr_dpdk_cb(&ev[rx_number], p, fq,
+ dq, &bufs[rx_number]);
+ rx_number++;
+ /* Interpret 'dq' from a driver perspective. */
+ /*
+ * Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both.
+ */
+ DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+ res == qman_cb_dqrr_park);
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /*
+ * Entry processed and consumed, increment our counter. The
+ * callback can request that we exit after consuming the
+ * entry, and we also exit if we reach our processing limit,
+ * so loop back only if neither of these conditions is met.
+ */
+ } while (++limit < poll_limit);
+
+ return limit;
+}
+
+struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
+{
+ struct qman_portal *p = get_affine_portal();
+ const struct qm_dqrr_entry *dq;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ struct qm_dqrr_entry *shadow;
+#endif
+
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ return NULL;
+
+ if (!(dq->stat & QM_DQRR_STAT_FD_VALID)) {
+ /* Invalid DQRR - put the portal and consume the DQRR.
+ * Return NULL to user as no packet is seen.
+ */
+ qman_dqrr_consume(fq, (struct qm_dqrr_entry *)dq);
+ return NULL;
+ }
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+ *shadow = *dq;
+ dq = shadow;
+ shadow->fqid = be32_to_cpu(shadow->fqid);
+ shadow->contextB = be32_to_cpu(shadow->contextB);
+ shadow->seqnum = be16_to_cpu(shadow->seqnum);
+ hw_fd_to_cpu(&shadow->fd);
+#endif
+
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+
+ return (struct qm_dqrr_entry *)dq;
+}
+
+void qman_dqrr_consume(struct qman_fq *fq,
+ struct qm_dqrr_entry *dq)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, 0);
+ qm_dqrr_next(&p->p);
+}
+
+int qman_poll_dqrr(unsigned int limit)
+{
+ struct qman_portal *p = get_affine_portal();
+ int ret;
+
+ ret = __poll_portal_fast(p, limit);
+ return ret;
+}
+
+void qman_poll(void)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ if ((~p->irq_sources) & QM_PIRQ_SLOW) {
+ if (!(p->slowpoll--)) {
+ u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
+ u32 active = __poll_portal_slow(p, is);
+
+ if (active) {
+ qm_isr_status_clear(&p->p, active);
+ p->slowpoll = SLOW_POLL_BUSY;
+ } else
+ p->slowpoll = SLOW_POLL_IDLE;
+ }
+ }
+ if ((~p->irq_sources) & QM_PIRQ_DQRI)
+ __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
+}
+
+void qman_stop_dequeues(void)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qman_stop_dequeues_ex(p);
+}
+
+void qman_start_dequeues(void)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ DPAA_ASSERT(p->dqrr_disable_ref > 0);
+ if (!(--p->dqrr_disable_ref))
+ qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
+}
+
+void qman_static_dequeue_add(u32 pools, struct qman_portal *qp)
+{
+ struct qman_portal *p = qp ? qp : get_affine_portal();
+
+ pools &= p->config->pools;
+ p->sdqcr |= pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+}
+
+void qman_static_dequeue_del(u32 pools, struct qman_portal *qp)
+{
+ struct qman_portal *p = qp ? qp : get_affine_portal();
+
+ pools &= p->config->pools;
+ p->sdqcr &= ~pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+}
+
+u32 qman_static_dequeue_get(struct qman_portal *qp)
+{
+ struct qman_portal *p = qp ? qp : get_affine_portal();
+ return p->sdqcr;
+}
+
+void qman_dca(const struct qm_dqrr_entry *dq, int park_request)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
+}
+
+void qman_dca_index(u8 index, int park_request)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qm_dqrr_cdc_consume_1(&p->p, index, park_request);
+}
+
+/* Frame queue API */
+static const char *mcr_result_str(u8 result)
+{
+ switch (result) {
+ case QM_MCR_RESULT_NULL:
+ return "QM_MCR_RESULT_NULL";
+ case QM_MCR_RESULT_OK:
+ return "QM_MCR_RESULT_OK";
+ case QM_MCR_RESULT_ERR_FQID:
+ return "QM_MCR_RESULT_ERR_FQID";
+ case QM_MCR_RESULT_ERR_FQSTATE:
+ return "QM_MCR_RESULT_ERR_FQSTATE";
+ case QM_MCR_RESULT_ERR_NOTEMPTY:
+ return "QM_MCR_RESULT_ERR_NOTEMPTY";
+ case QM_MCR_RESULT_PENDING:
+ return "QM_MCR_RESULT_PENDING";
+ case QM_MCR_RESULT_ERR_BADCOMMAND:
+ return "QM_MCR_RESULT_ERR_BADCOMMAND";
+ }
+ return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+ struct qm_fqd fqd;
+ struct qm_mcr_queryfq_np np;
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+ int ret = qman_alloc_fqid(&fqid);
+
+ if (ret)
+ return ret;
+ }
+ spin_lock_init(&fq->fqlock);
+ fq->fqid = fqid;
+ fq->fqid_le = cpu_to_be32(fqid);
+ fq->flags = flags;
+ fq->state = qman_fq_state_oos;
+ fq->cgr_groupid = 0;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) {
+ pr_info("Find empty table entry failed\n");
+ return -ENOMEM;
+ }
+#endif
+ if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
+ return 0;
+ /* Everything else is AS_IS support */
+ p = get_affine_portal();
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
+ goto err;
+ }
+ fqd = mcr->queryfq.fqd;
+ hw_fqd_to_cpu(&fqd);
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq_np.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
+ goto err;
+ }
+ np = mcr->queryfq_np;
+ /* Phew, have queryfq and queryfq_np results, stitch together
+ * the FQ object from those.
+ */
+ fq->cgr_groupid = fqd.cgid;
+ switch (np.state & QM_MCR_NP_STATE_MASK) {
+ case QM_MCR_NP_STATE_OOS:
+ break;
+ case QM_MCR_NP_STATE_RETIRED:
+ fq->state = qman_fq_state_retired;
+ if (np.frm_cnt)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ break;
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ fq->state = qman_fq_state_sched;
+ if (np.state & QM_MCR_NP_STATE_R)
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ break;
+ case QM_MCR_NP_STATE_PARKED:
+ fq->state = qman_fq_state_parked;
+ break;
+ default:
+ DPAA_ASSERT(NULL == "invalid FQ state");
+ }
+ if (fqd.fq_ctrl & QM_FQCTRL_CGE)
+ fq->state |= QMAN_FQ_STATE_CGR_EN;
+ return 0;
+err:
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
+ qman_release_fqid(fqid);
+ return -EIO;
+}
+
+void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
+{
+ /*
+ * We don't need to lock the FQ as it is a pre-condition that the FQ be
+ * quiesced. Instead, run some checks.
+ */
+ switch (fq->state) {
+ case qman_fq_state_parked:
+ DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
+ /* Fallthrough */
+ case qman_fq_state_oos:
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+ qman_release_fqid(fq->fqid);
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ clear_fq_table_entry(fq->key);
+#endif
+ return;
+ default:
+ break;
+ }
+ DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+ return fq->fqid;
+}
+
+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
+{
+ if (state)
+ *state = fq->state;
+ if (flags)
+ *flags = fq->flags;
+}
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+ if ((fq->state != qman_fq_state_oos) &&
+ (fq->state != qman_fq_state_parked))
+ return -EINVAL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+ /* And can't be set at the same time as TDTHRESH */
+ if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+ return -EINVAL;
+ }
+ /* Issue an INITFQ_[PARKED|SCHED] management command */
+ p = get_affine_portal();
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ ((fq->state != qman_fq_state_oos) &&
+ (fq->state != qman_fq_state_parked)))) {
+ FQUNLOCK(fq);
+ return -EBUSY;
+ }
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initfq = *opts;
+ mcc->initfq.fqid = cpu_to_be32(fq->fqid);
+ mcc->initfq.count = 0;
+ /*
+ * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
+ * demux pointer. Otherwise, the caller-provided value is allowed to
+ * stand, don't overwrite it.
+ */
+ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+ dma_addr_t phys_fq;
+
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ mcc->initfq.fqd.context_b = fq->key;
+#else
+ mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
+#endif
+ /*
+ * and the physical address - NB, if the user wasn't trying to
+ * set CONTEXTA, clear the stashing settings.
+ */
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ memset(&mcc->initfq.fqd.context_a, 0,
+ sizeof(mcc->initfq.fqd.context_a));
+ } else {
+ phys_fq = rte_mem_virt2iova(fq);
+ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+ }
+ }
+ if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+ mcc->initfq.fqd.dest.channel = p->config->channel;
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ mcc->initfq.fqd.dest.wq = 4;
+ }
+ }
+ mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
+ cpu_to_hw_fqd(&mcc->initfq.fqd);
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ FQUNLOCK(fq);
+ return -EIO;
+ }
+ if (opts) {
+ if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
+ if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+ fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+ else
+ fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+ }
+ if (opts->we_mask & QM_INITFQ_WE_CGID)
+ fq->cgr_groupid = opts->fqd.cgid;
+ }
+ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ qman_fq_state_sched : qman_fq_state_parked;
+ FQUNLOCK(fq);
+ return 0;
+}
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ int ret = 0;
+ u8 res;
+
+ if (fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ /* Issue a ALTERFQ_SCHED management command */
+ p = get_affine_portal();
+
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state != qman_fq_state_parked))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_sched;
+out:
+ FQUNLOCK(fq);
+
+ return ret;
+}
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ int rval;
+ u8 res;
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_sched))
+ return -EINVAL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state == qman_fq_state_retired) ||
+ (fq->state == qman_fq_state_oos))) {
+ rval = -EBUSY;
+ goto out;
+ }
+ rval = table_push_fq(p, fq);
+ if (rval)
+ goto out;
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result;
+ /*
+ * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+ * and defer the flags until FQRNI or FQRN (respectively) show up. But
+ * "Friendly" is to process OK immediately, and not set CHANGING. We do
+ * friendly, otherwise the caller doesn't necessarily have a fully
+ * "retired" FQ on return even if the retirement was immediate. However
+ * this does mean some code duplication between here and
+ * fq_state_change().
+ */
+ if (likely(res == QM_MCR_RESULT_OK)) {
+ rval = 0;
+ /* Process 'fq' right away, we'll ignore FQRNI */
+ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ else
+ table_del_fq(p, fq);
+ if (flags)
+ *flags = fq->flags;
+ fq->state = qman_fq_state_retired;
+ if (fq->cb.fqs) {
+ /*
+ * Another issue with supporting "immediate" retirement
+ * is that we're forced to drop FQRNIs, because by the
+ * time they're seen it may already be "too late" (the
+ * fq may have been OOS'd and free()'d already). But if
+ * the upper layer wants a callback whether it's
+ * immediate or not, we have to fake a "MR" entry to
+ * look like an FQRNI...
+ */
+ struct qm_mr_entry msg;
+
+ msg.ern.verb = QM_MR_VERB_FQRNI;
+ msg.fq.fqs = mcr->alterfq.fqs;
+ msg.fq.fqid = fq->fqid;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ msg.fq.contextB = fq->key;
+#else
+ msg.fq.contextB = (u32)(uintptr_t)fq;
+#endif
+ fq->cb.fqs(p, fq, &msg);
+ }
+ } else if (res == QM_MCR_RESULT_PENDING) {
+ rval = 1;
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ } else {
+ rval = -EIO;
+ table_del_fq(p, fq);
+ }
+out:
+ FQUNLOCK(fq);
+ return rval;
+}
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ int ret = 0;
+ u8 res;
+
+ if (fq->state != qman_fq_state_retired)
+ return -EINVAL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
+ (fq->state != qman_fq_state_retired))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_oos;
+out:
+ FQUNLOCK(fq);
+ return ret;
+}
+
+int qman_fq_flow_control(struct qman_fq *fq, int xon)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ int ret = 0;
+ u8 res;
+ u8 myverb;
+
+ if ((fq->state == qman_fq_state_oos) ||
+ (fq->state == qman_fq_state_retired) ||
+ (fq->state == qman_fq_state_parked))
+ return -EINVAL;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
+ p = get_affine_portal();
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state == qman_fq_state_parked) ||
+ (fq->state == qman_fq_state_oos) ||
+ (fq->state == qman_fq_state_retired))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ mcc->alterfq.count = 0;
+ myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
+
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+out:
+ FQUNLOCK(fq);
+ return ret;
+}
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *fqd = mcr->queryfq.fqd;
+ hw_fqd_to_cpu(fqd);
+ if (res != QM_MCR_RESULT_OK)
+ return -EIO;
+ return 0;
+}
+
+int qman_query_fq_has_pkts(struct qman_fq *fq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ int ret = 0;
+ u8 res;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ ret = !!mcr->queryfq_np.frm_cnt;
+ return ret;
+}
+
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK) {
+ *np = mcr->queryfq_np;
+ np->fqd_link = be24_to_cpu(np->fqd_link);
+ np->odp_seq = be16_to_cpu(np->odp_seq);
+ np->orp_nesn = be16_to_cpu(np->orp_nesn);
+ np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq);
+ np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq);
+ np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
+ np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
+ np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
+ np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
+ np->ics_surp = be16_to_cpu(np->ics_surp);
+ np->byte_cnt = be32_to_cpu(np->byte_cnt);
+ np->frm_cnt = be24_to_cpu(np->frm_cnt);
+ np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
+ np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
+ np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
+ np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
+ np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
+ }
+ if (res == QM_MCR_RESULT_ERR_FQID)
+ return -ERANGE;
+ else if (res != QM_MCR_RESULT_OK)
+ return -EIO;
+ return 0;
+}
+
+int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *frm_cnt = be24_to_cpu(mcr->queryfq_np.frm_cnt);
+ else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
+ return -ERANGE;
+ else if (mcr->result != QM_MCR_RESULT_OK)
+ return -EIO;
+ return 0;
+}
+
+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res, myverb;
+
+ myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
+ QM_MCR_VERB_QUERYWQ;
+ mcc = qm_mc_start(&p->p);
+ mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK) {
+ int i, array_len;
+
+ wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
+ array_len = ARRAY_SIZE(mcr->querywq.wq_len);
+ for (i = 0; i < array_len; i++)
+ wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
+ }
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ return 0;
+}
+
+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
+ struct qm_mcr_cgrtestwrite *result)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->cgrtestwrite.cgid = cgr->cgrid;
+ mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
+ mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
+ qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *result = mcr->cgrtestwrite;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ return 0;
+}
+
+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ u8 res;
+ unsigned int i;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->querycgr.cgid = cgr->cgrid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *cgrd = mcr->querycgr;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ cgrd->cgr.wr_parm_g.word =
+ be32_to_cpu(cgrd->cgr.wr_parm_g.word);
+ cgrd->cgr.wr_parm_y.word =
+ be32_to_cpu(cgrd->cgr.wr_parm_y.word);
+ cgrd->cgr.wr_parm_r.word =
+ be32_to_cpu(cgrd->cgr.wr_parm_r.word);
+ cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ);
+ cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
+ for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
+ cgrd->cscn_targ_swp[i] =
+ be32_to_cpu(cgrd->cscn_targ_swp[i]);
+ return 0;
+}
+
+int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
+{
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ u8 res;
+ unsigned int i;
+
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCC_VERB_QUERYCONGESTION);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *congestion = mcr->querycongestion;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++)
+ congestion->state.state[i] =
+ be32_to_cpu(congestion->state.state[i]);
+ return 0;
+}
+
+int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags)
+{
+ struct qman_portal *p = get_affine_portal();
+ uint32_t vdqcr;
+ int ret = -EBUSY;
+
+ vdqcr = vdqcr_flags;
+ vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_retired)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+
+ if (!p->vdqcr_owned) {
+ FQLOCK(fq);
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto escape;
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ FQUNLOCK(fq);
+ p->vdqcr_owned = fq;
+ ret = 0;
+ }
+escape:
+ if (!ret)
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+
+out:
+ return ret;
+}
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
+ u32 vdqcr)
+{
+ struct qman_portal *p;
+ int ret = -EBUSY;
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_retired))
+ return -EINVAL;
+ if (vdqcr & QM_VDQCR_FQID_MASK)
+ return -EINVAL;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ return -EBUSY;
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+
+ p = get_affine_portal();
+
+ if (!p->vdqcr_owned) {
+ FQLOCK(fq);
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto escape;
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ FQUNLOCK(fq);
+ p->vdqcr_owned = fq;
+ ret = 0;
+ }
+escape:
+ if (ret)
+ return ret;
+
+ /* VDQCR is set */
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ return 0;
+}
+
+static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+ if (avail)
+ qm_eqcr_cce_prefetch(&p->p);
+ else
+ qm_eqcr_cce_update(&p->p);
+}
+
+int qman_eqcr_is_empty(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ u8 avail;
+
+ update_eqcr_ci(p, 0);
+ avail = qm_eqcr_get_fill(&p->p);
+ return (avail == 0);
+}
+
+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
+{
+ if (affine) {
+ struct qman_portal *p = get_affine_portal();
+
+ p->cb_dc_ern = handler;
+ } else
+ cb_dc_ern = handler;
+}
+
+static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+ u8 avail;
+
+ if (p->use_eqcr_ci_stashing) {
+ /*
+ * The stashing case is easy, only update if we need to in
+ * order to try and liberate ring entries.
+ */
+ eq = qm_eqcr_start_stash(&p->p);
+ } else {
+ /*
+ * The non-stashing case is harder, need to prefetch ahead of
+ * time.
+ */
+ avail = qm_eqcr_get_avail(&p->p);
+ if (avail < 2)
+ update_eqcr_ci(p, avail);
+ eq = qm_eqcr_start_no_stash(&p->p);
+ }
+
+ if (unlikely(!eq))
+ return NULL;
+
+ if (flags & QMAN_ENQUEUE_FLAG_DCA)
+ eq->dca = QM_EQCR_DCA_ENABLE |
+ ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
+ QM_EQCR_DCA_PARK : 0) |
+ ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
+ eq->fqid = cpu_to_be32(fq->fqid);
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ eq->tag = cpu_to_be32(fq->key);
+#else
+ eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
+#endif
+ eq->fd = *fd;
+ cpu_to_hw_fd(&eq->fd);
+ return eq;
+}
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
+{
+ struct qman_portal *p = get_affine_portal();
+ struct qm_eqcr_entry *eq;
+
+ eq = try_p_eq_start(p, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ /* Factor the below out, it's used from qman_enqueue_orp() too */
+ return 0;
+}
+
+int qman_enqueue_multi(struct qman_fq *fq,
+ const struct qm_fd *fd, u32 *flags,
+ int frames_to_send)
+{
+ struct qman_portal *p = get_affine_portal();
+ struct qm_portal *portal = &p->p;
+
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
+
+ u8 i = 0, diff, old_ci, sent = 0;
+
+ /* Update the available entries if no entry is free */
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return 0;
+ }
+
+ /* try to send as many frames as possible */
+ while (eqcr->available && frames_to_send--) {
+ eq->fqid = fq->fqid_le;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ eq->tag = cpu_to_be32(fq->key);
+#else
+ eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
+#endif
+ eq->fd.opaque_addr = fd->opaque_addr;
+ eq->fd.addr = cpu_to_be40(fd->addr);
+ eq->fd.status = cpu_to_be32(fd->status);
+ eq->fd.opaque = cpu_to_be32(fd->opaque);
+ if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
+ eq->dca = QM_EQCR_DCA_ENABLE |
+ ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
+ }
+ i++;
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ eqcr->available--;
+ sent++;
+ fd++;
+ }
+ lwsync();
+
+ /* In order for flushes to complete faster, all lines are recorded in
+ * 32 bit word.
+ */
+ eq = eqcr->cursor;
+ for (i = 0; i < sent; i++) {
+ eq->__dont_write_directly__verb =
+ QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
+ prev_eq = eq;
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ if (unlikely((prev_eq + 1) != eq))
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+ }
+
+ /* We need to flush all the lines but without load/store operations
+ * between them
+ */
+ eq = eqcr->cursor;
+ for (i = 0; i < sent; i++) {
+ dcbf(eq);
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ }
+ /* Update cursor for the next call */
+ eqcr->cursor = eq;
+ return sent;
+}
+
+int
+qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
+ int frames_to_send)
+{
+ struct qman_portal *p = get_affine_portal();
+ struct qm_portal *portal = &p->p;
+
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
+
+ u8 i, diff, old_ci, sent = 0;
+
+ /* Update the available entries if no entry is free */
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return 0;
+ }
+
+ /* try to send as many frames as possible */
+ while (eqcr->available && frames_to_send--) {
+ eq->fqid = fq[sent]->fqid_le;
+ eq->fd.opaque_addr = fd->opaque_addr;
+ eq->fd.addr = cpu_to_be40(fd->addr);
+ eq->fd.status = cpu_to_be32(fd->status);
+ eq->fd.opaque = cpu_to_be32(fd->opaque);
+
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ eqcr->available--;
+ sent++;
+ fd++;
+ }
+ lwsync();
+
+ /* In order for flushes to complete faster, all lines are recorded in
+ * 32 bit word.
+ */
+ eq = eqcr->cursor;
+ for (i = 0; i < sent; i++) {
+ eq->__dont_write_directly__verb =
+ QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
+ prev_eq = eq;
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ if (unlikely((prev_eq + 1) != eq))
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+ }
+
+ /* We need to flush all the lines but without load/store operations
+ * between them
+ */
+ eq = eqcr->cursor;
+ for (i = 0; i < sent; i++) {
+ dcbf(eq);
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ }
+ /* Update cursor for the next call */
+ eqcr->cursor = eq;
+ return sent;
+}
+
+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
+ struct qman_fq *orp, u16 orp_seqnum)
+{
+ struct qman_portal *p = get_affine_portal();
+ struct qm_eqcr_entry *eq;
+
+ eq = try_p_eq_start(p, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Process ORP-specifics here */
+ if (flags & QMAN_ENQUEUE_FLAG_NLIS)
+ orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
+ else {
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
+ if (flags & QMAN_ENQUEUE_FLAG_NESN)
+ orp_seqnum |= QM_EQCR_SEQNUM_NESN;
+ else
+ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
+ }
+ eq->seqnum = cpu_to_be16(orp_seqnum);
+ eq->orp = cpu_to_be32(orp->fqid);
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
+ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
+ 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+
+ return 0;
+}
+
+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res;
+ u8 verb = QM_MCC_VERB_MODIFYCGR;
+
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initcgr = *opts;
+ mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
+ mcc->initcgr.cgr.wr_parm_g.word =
+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
+ mcc->initcgr.cgr.wr_parm_y.word =
+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
+ mcc->initcgr.cgr.wr_parm_r.word =
+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
+ mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
+ mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
+
+ mcc->initcgr.cgid = cgr->cgrid;
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ verb = QM_MCC_VERB_INITCGR;
+ qm_mc_commit(&p->p, verb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+ res = mcr->result;
+ return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
+}
+
+#define TARG_MASK(n) (0x80000000 >> (n->config->channel - \
+ QM_CHANNEL_SWPORTAL0))
+#define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
+#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret;
+ struct qman_portal *p;
+
+ /* We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= __CGR_NUM)
+ return -EINVAL;
+
+ p = get_affine_portal();
+
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ cgr->chan = p->config->channel;
+ spin_lock(&p->cgr_lock);
+
+ /* if no opts specified, just add it to the list */
+ if (!opts)
+ goto add_list;
+
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ goto release_lock;
+ if (opts)
+ local_opts = *opts;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl =
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
+ else
+ /* Overwrite TARG */
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+ TARG_MASK(p);
+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ /* send init if flags indicate so */
+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
+ else
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ goto release_lock;
+add_list:
+ list_add(&cgr->node, &p->cgr_cbs);
+
+ /* Determine if newly added object requires its callback to be called */
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* we can't go back, so proceed and return success, but screen
+ * and wail to the log file.
+ */
+ pr_crit("CGR HW state partially modified\n");
+ ret = 0;
+ goto release_lock;
+ }
+ if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
+ cgr->cgrid))
+ cgr->cb(p, cgr, 1);
+release_lock:
+ spin_unlock(&p->cgr_lock);
+ return ret;
+}
+
+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mcc_initcgr local_opts;
+ struct qm_mcr_querycgr cgr_state;
+ int ret;
+
+ if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
+ pr_warn("QMan version doesn't support CSCN => DCP portal\n");
+ return -EINVAL;
+ }
+ /* We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= __CGR_NUM)
+ return -EINVAL;
+
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ return ret;
+
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ if (opts)
+ local_opts = *opts;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl =
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
+ QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
+ else
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+ TARG_DCP_MASK(dcp_portal);
+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ /* send init if flags indicate so */
+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+ &local_opts);
+ else
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+
+ return ret;
+}
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret = 0;
+ struct qman_cgr *i;
+ struct qman_portal *p = get_affine_portal();
+
+ if (cgr->chan != p->config->channel) {
+ pr_crit("Attempting to delete cgr from different portal than"
+ " it was create: create 0x%x, delete 0x%x\n",
+ cgr->chan, p->config->channel);
+ ret = -EINVAL;
+ goto put_portal;
+ }
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ spin_lock(&p->cgr_lock);
+ list_del(&cgr->node);
+ /*
+ * If there are no other CGR objects for this CGRID in the list,
+ * update CSCN_TARG accordingly
+ */
+ list_for_each_entry(i, &p->cgr_cbs, node)
+ if ((i->cgrid == cgr->cgrid) && i->cb)
+ goto release_lock;
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+ goto release_lock;
+ }
+ /* Overwrite TARG */
+ local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
+ else
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
+ ~(TARG_MASK(p));
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+ spin_unlock(&p->cgr_lock);
+put_portal:
+ return ret;
+}
+
+int qman_shutdown_fq(u32 fqid)
+{
+ struct qman_portal *p;
+ struct qm_portal *low_p;
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ u8 state;
+ int orl_empty, fq_empty, drain = 0;
+ u32 result;
+ u32 channel, wq;
+ u16 dest_wq;
+
+ p = get_affine_portal();
+ low_p = &p->p;
+
+ /* Determine the state of the FQID */
+ mcc = qm_mc_start(low_p);
+ mcc->queryfq_np.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+ if (state == QM_MCR_NP_STATE_OOS)
+ return 0; /* Already OOS, no need to do anymore checks */
+
+ /* Query which channel the FQ is using */
+ mcc = qm_mc_start(low_p);
+ mcc->queryfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+
+ /* Need to store these since the MCR gets reused */
+ dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
+ channel = dest_wq & 0x7;
+ wq = dest_wq >> 3;
+
+ switch (state) {
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ case QM_MCR_NP_STATE_PARKED:
+ orl_empty = 0;
+ mcc = qm_mc_start(low_p);
+ mcc->alterfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_RETIRE);
+ result = mcr->result; /* Make a copy as we reuse MCR below */
+
+ if (result == QM_MCR_RESULT_PENDING) {
+ /* Need to wait for the FQRN in the message ring, which
+ * will only occur once the FQ has been drained. In
+ * order for the FQ to drain the portal needs to be set
+ * to dequeue from the channel the FQ is scheduled on
+ */
+ const struct qm_mr_entry *msg;
+ const struct qm_dqrr_entry *dqrr = NULL;
+ int found_fqrn = 0;
+ __maybe_unused u16 dequeue_wq = 0;
+
+ /* Flag that we need to drain FQ */
+ drain = 1;
+
+ if (channel >= qm_channel_pool1 &&
+ channel < (u16)(qm_channel_pool1 + 15)) {
+ /* Pool channel, enable the bit in the portal */
+ dequeue_wq = (channel -
+ qm_channel_pool1 + 1) << 4 | wq;
+ } else if (channel < qm_channel_pool1) {
+ /* Dedicated channel */
+ dequeue_wq = wq;
+ } else {
+ pr_info("Cannot recover FQ 0x%x,"
+ " it is scheduled on channel 0x%x",
+ fqid, channel);
+ return -EBUSY;
+ }
+ /* Set the sdqcr to drain this channel */
+ if (channel < qm_channel_pool1)
+ qm_dqrr_sdqcr_set(low_p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_DEDICATED);
+ else
+ qm_dqrr_sdqcr_set(low_p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_POOL_CONV
+ (channel));
+ while (!found_fqrn) {
+ /* Keep draining DQRR while checking the MR*/
+ qm_dqrr_pvb_update(low_p);
+ dqrr = qm_dqrr_current(low_p);
+ while (dqrr) {
+ qm_dqrr_cdc_consume_1ptr(
+ low_p, dqrr, 0);
+ qm_dqrr_pvb_update(low_p);
+ qm_dqrr_next(low_p);
+ dqrr = qm_dqrr_current(low_p);
+ }
+ /* Process message ring too */
+ qm_mr_pvb_update(low_p);
+ msg = qm_mr_current(low_p);
+ while (msg) {
+ if ((msg->ern.verb &
+ QM_MR_VERB_TYPE_MASK)
+ == QM_MR_VERB_FQRN)
+ found_fqrn = 1;
+ qm_mr_next(low_p);
+ qm_mr_cci_consume_to_current(low_p);
+ qm_mr_pvb_update(low_p);
+ msg = qm_mr_current(low_p);
+ }
+ cpu_relax();
+ }
+ }
+ if (result != QM_MCR_RESULT_OK &&
+ result != QM_MCR_RESULT_PENDING) {
+ /* error */
+ pr_err("qman_retire_fq failed on FQ 0x%x,"
+ " result=0x%x\n", fqid, result);
+ return -1;
+ }
+ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+ /* ORL had no entries, no need to wait until the
+ * ERNs come in.
+ */
+ orl_empty = 1;
+ }
+ /* Retirement succeeded, check to see if FQ needs
+ * to be drained.
+ */
+ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+ /* FQ is Not Empty, drain using volatile DQ commands */
+ fq_empty = 0;
+ do {
+ const struct qm_dqrr_entry *dqrr = NULL;
+ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+
+ qm_dqrr_vdqcr_set(low_p, vdqcr);
+
+ /* Wait for a dequeue to occur */
+ while (dqrr == NULL) {
+ qm_dqrr_pvb_update(low_p);
+ dqrr = qm_dqrr_current(low_p);
+ if (!dqrr)
+ cpu_relax();
+ }
+ /* Process the dequeues, making sure to
+ * empty the ring completely.
+ */
+ while (dqrr) {
+ if (dqrr->fqid == fqid &&
+ dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_empty = 1;
+ qm_dqrr_cdc_consume_1ptr(low_p,
+ dqrr, 0);
+ qm_dqrr_pvb_update(low_p);
+ qm_dqrr_next(low_p);
+ dqrr = qm_dqrr_current(low_p);
+ }
+ } while (fq_empty == 0);
+ }
+ qm_dqrr_sdqcr_set(low_p, 0);
+
+ /* Wait for the ORL to have been completely drained */
+ while (orl_empty == 0) {
+ const struct qm_mr_entry *msg;
+
+ qm_mr_pvb_update(low_p);
+ msg = qm_mr_current(low_p);
+ while (msg) {
+ if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) ==
+ QM_MR_VERB_FQRL)
+ orl_empty = 1;
+ qm_mr_next(low_p);
+ qm_mr_cci_consume_to_current(low_p);
+ qm_mr_pvb_update(low_p);
+ msg = qm_mr_current(low_p);
+ }
+ cpu_relax();
+ }
+ mcc = qm_mc_start(low_p);
+ mcc->alterfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err(
+ "OOS after drain Failed on FQID 0x%x, result 0x%x\n",
+ fqid, mcr->result);
+ return -1;
+ }
+ return 0;
+
+ case QM_MCR_NP_STATE_RETIRED:
+ /* Send OOS Command */
+ mcc = qm_mc_start(low_p);
+ mcc->alterfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result) {
+ pr_err("OOS Failed on FQID 0x%x\n", fqid);
+ return -1;
+ }
+ return 0;
+
+ }
+ return -1;
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.h b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.h
new file mode 100644
index 00000000..4346d865
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.h
@@ -0,0 +1,913 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include "qman_priv.h"
+
+/***************************/
+/* Portal register assists */
+/***************************/
+#define QM_REG_EQCR_PI_CINH 0x3000
+#define QM_REG_EQCR_CI_CINH 0x3040
+#define QM_REG_EQCR_ITR 0x3080
+#define QM_REG_DQRR_PI_CINH 0x3100
+#define QM_REG_DQRR_CI_CINH 0x3140
+#define QM_REG_DQRR_ITR 0x3180
+#define QM_REG_DQRR_DCAP 0x31C0
+#define QM_REG_DQRR_SDQCR 0x3200
+#define QM_REG_DQRR_VDQCR 0x3240
+#define QM_REG_DQRR_PDQCR 0x3280
+#define QM_REG_MR_PI_CINH 0x3300
+#define QM_REG_MR_CI_CINH 0x3340
+#define QM_REG_MR_ITR 0x3380
+#define QM_REG_CFG 0x3500
+#define QM_REG_ISR 0x3600
+#define QM_REG_IIR 0x36C0
+#define QM_REG_ITPR 0x3740
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3040
+#define QM_CL_DQRR_PI_CENA 0x3100
+#define QM_CL_DQRR_CI_CENA 0x3140
+#define QM_CL_MR_PI_CENA 0x3300
+#define QM_CL_MR_CI_CENA 0x3340
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent).
+ */
+
+/* Cache-inhibited register access. */
+#define __qm_in(qm, o) be32_to_cpu(__raw_readl((qm)->ci + (o)))
+#define __qm_out(qm, o, val) __raw_writel((cpu_to_be32(val)), \
+ (qm)->ci + (o))
+#define qm_in(reg) __qm_in(&portal->addr, QM_REG_##reg)
+#define qm_out(reg, val) __qm_out(&portal->addr, QM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->ce + (o))
+#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->ce + (o))
+#define __qm_cl_in(qm, o) be32_to_cpu(__raw_readl((qm)->ce + (o)))
+#define __qm_cl_out(qm, o, val) \
+ do { \
+ u32 *__tmpclout = (qm)->ce + (o); \
+ __raw_writel(cpu_to_be32(val), __tmpclout); \
+ dcbf(__tmpclout); \
+ } while (0)
+#define __qm_cl_invalidate(qm, o) dccivac((qm)->ce + (o))
+#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_in(reg) __qm_cl_in(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, QM_CL_##reg##_CENA, val)
+#define qm_cl_invalidate(reg)\
+ __qm_cl_invalidate(&portal->addr, QM_CL_##reg##_CENA)
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues.
+ */
+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * dmode == h/w dequeue mode.
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ * As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
+ qm_eqcr_pci = 0, /* PI index, cache-inhibited */
+ qm_eqcr_pce = 1, /* PI index, cache-enabled */
+ qm_eqcr_pvb = 2 /* valid-bit */
+};
+
+enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
+ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
+ qm_dqrr_dpull = 1 /* PDQCR */
+};
+
+enum qm_dqrr_pmode { /* s/w-only */
+ qm_dqrr_pci, /* reads DQRR_PI_CINH */
+ qm_dqrr_pce, /* reads DQRR_PI_CENA */
+ qm_dqrr_pvb /* reads valid-bit */
+};
+
+enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
+ qm_dqrr_cci = 0, /* CI index, cache-inhibited */
+ qm_dqrr_cce = 1, /* CI index, cache-enabled */
+ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
+};
+
+enum qm_mr_pmode { /* s/w-only */
+ qm_mr_pci, /* reads MR_PI_CINH */
+ qm_mr_pce, /* reads MR_PI_CENA */
+ qm_mr_pvb /* reads valid-bit */
+};
+
+enum qm_mr_cmode { /* matches QCSP_CFG::MM */
+ qm_mr_cci = 0, /* CI index, cache-inhibited */
+ qm_mr_cce = 1 /* CI index, cache-enabled */
+};
+
+/* ------------------------- */
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE 8
+#define QM_DQRR_SIZE 16
+#define QM_MR_SIZE 8
+
+struct qm_eqcr {
+ struct qm_eqcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ u32 busy;
+ enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+ struct qm_dqrr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ enum qm_dqrr_dmode dmode;
+ enum qm_dqrr_pmode pmode;
+ enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+ const struct qm_mr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ enum qm_mr_pmode pmode;
+ enum qm_mr_cmode cmode;
+#endif
+};
+
+struct qm_mc {
+ struct qm_mc_command *cr;
+ struct qm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ enum {
+ /* Can be _mc_start()ed */
+ qman_mc_idle,
+ /* Can be _mc_commit()ed or _mc_abort()ed */
+ qman_mc_user,
+ /* Can only be _mc_retry()ed */
+ qman_mc_hw
+ } state;
+#endif
+};
+
+#define QM_PORTAL_ALIGNMENT ____cacheline_aligned
+
+struct qm_addr {
+ void __iomem *ce; /* cache-enabled */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct qm_portal {
+ struct qm_addr addr;
+ struct qm_eqcr eqcr;
+ struct qm_dqrr dqrr;
+ struct qm_mr mr;
+ struct qm_mc mc;
+} QM_PORTAL_ALIGNMENT;
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define EQCR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
+
+extern dma_addr_t rte_mem_virt2iova(const void *addr);
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void EQCR_INC(struct qm_eqcr *eqcr)
+{
+ /* NB: this is odd-looking, but experiments show that it generates fast
+ * code with essentially no branching overheads. We increment to the
+ * next EQCR pointer and handle overflow and 'vbit'.
+ */
+ struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+ eqcr->cursor = EQCR_CARRYCLEAR(partial);
+ if (partial != eqcr->cursor)
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+ *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(!eqcr->busy);
+#endif
+ if (!eqcr->available)
+ return NULL;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 1;
+#endif
+
+ return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+ *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(!eqcr->busy);
+#endif
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return NULL;
+ }
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 1;
+#endif
+ return eqcr->cursor;
+}
+
+static inline void qm_eqcr_abort(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(eqcr->busy);
+ eqcr->busy = 0;
+#endif
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next(
+ struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(eqcr->busy);
+ DPAA_ASSERT(eqcr->pmode != qm_eqcr_pvb);
+#endif
+ if (eqcr->available == 1)
+ return NULL;
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ dcbf(eqcr->cursor);
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ return eqcr->cursor;
+}
+
+#define EQCR_COMMIT_CHECKS(eqcr) \
+do { \
+ DPAA_ASSERT(eqcr->busy); \
+ DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); \
+ DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); \
+} while (0)
+
+static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pci);
+#endif
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ dcbf(eqcr->cursor);
+ hwsync();
+ qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor));
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+#endif
+ qm_cl_invalidate(EQCR_PI);
+ qm_cl_touch_rw(EQCR_PI);
+}
+
+static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+#endif
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ dcbf(eqcr->cursor);
+ lwsync();
+ qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor));
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eqcursor;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+#endif
+ lwsync();
+ eqcursor = eqcr->cursor;
+ eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ dcbf(eqcursor);
+ EQCR_INC(eqcr);
+ eqcr->available--;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+#endif
+}
+
+static inline u8 qm_eqcr_cci_update(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ qm_cl_touch_ro(EQCR_CI);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(EQCR_CI);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return eqcr->ithresh;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ eqcr->ithresh = ithresh;
+ qm_out(EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+#define DQRR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6)))
+
+static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1);
+}
+
+static inline struct qm_dqrr_entry *DQRR_INC(
+ const struct qm_dqrr_entry *e)
+{
+ return DQRR_CARRYCLEAR(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+ qm_out(CFG, (qm_in(CFG) & 0xff0fffff) |
+ ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+ struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (!dqrr->fill)
+ return NULL;
+ return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_cursor(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ return DQRR_PTR2IDX(dqrr->cursor);
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->fill);
+ dqrr->cursor = DQRR_INC(dqrr->cursor);
+ return --dqrr->fill;
+}
+
+static inline u8 qm_dqrr_pci_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 diff, old_pi = dqrr->pi;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pci);
+#endif
+ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
+ dqrr->fill += diff;
+ return diff;
+}
+
+static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+#endif
+ qm_cl_invalidate(DQRR_PI);
+ qm_cl_touch_ro(DQRR_PI);
+}
+
+static inline u8 qm_dqrr_pce_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 diff, old_pi = dqrr->pi;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+#endif
+ dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1);
+ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
+ dqrr->fill += diff;
+ return diff;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+#endif
+ /* when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads".
+ */
+ if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+ if (!dqrr->pi)
+ dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+ dqrr->fill++;
+ }
+}
+
+static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+#endif
+ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
+ qm_out(DQRR_CI_CINH, dqrr->ci);
+}
+
+static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+#endif
+ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
+ qm_out(DQRR_CI_CINH, dqrr->ci);
+}
+
+static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+#endif
+ qm_cl_invalidate(DQRR_CI);
+ qm_cl_touch_rw(DQRR_CI);
+}
+
+static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+#endif
+ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
+ qm_cl_out(DQRR_CI, dqrr->ci);
+}
+
+static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+#endif
+ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
+ qm_cl_out(DQRR_CI, dqrr->ci);
+}
+
+static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx,
+ int park)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ DPAA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ ((park ? 1 : 0) << 6) | /* PK */
+ idx); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+ const struct qm_dqrr_entry *dq,
+ int park)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 idx = DQRR_PTR2IDX(dq);
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ DPAA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
+ idx); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+ ((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
+ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+}
+
+static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+}
+
+static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ qm_cl_invalidate(DQRR_CI);
+ qm_cl_touch_ro(DQRR_CI);
+}
+
+static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1);
+}
+
+static inline u8 qm_dqrr_get_ci(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+#endif
+ return dqrr->ci;
+}
+
+static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+#endif
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ (1 << 6) | /* PK */
+ (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_park_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+#endif
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ (1 << 6) | /* PK */
+ DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+ qm_out(DQRR_SDQCR, sdqcr);
+}
+
+static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal)
+{
+ return qm_in(DQRR_SDQCR);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+ qm_out(DQRR_VDQCR, vdqcr);
+}
+
+static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal)
+{
+ return qm_in(DQRR_VDQCR);
+}
+
+static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ return dqrr->ithresh;
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(DQRR_ITR, ithresh);
+}
+
+static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal)
+{
+ return (qm_in(CFG) & 0x00f00000) >> 20;
+}
+
+/* -------------- */
+/* --- MR API --- */
+
+#define MR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6)))
+
+static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1);
+}
+
+static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e)
+{
+ return MR_CARRYCLEAR(e + 1);
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ if (mr->ci != MR_PTR2IDX(mr->cursor))
+ pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ if (!mr->fill)
+ return NULL;
+ return mr->cursor;
+}
+
+static inline u8 qm_mr_next(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->fill);
+ mr->cursor = MR_INC(mr->cursor);
+ return --mr->fill;
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+#endif
+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+ qm_out(MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+#endif
+ mr->ci = MR_PTR2IDX(mr->cursor);
+ qm_out(MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(MR_ITR, ithresh);
+}
+
+/* ------------------------------ */
+/* --- Management command API --- */
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + QM_CL_CR;
+ mc->rr = portal->addr.ce + QM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
+ QM_MCC_VERB_VBIT) ? 0 : 1;
+ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = qman_mc_idle;
+#endif
+ return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_mc *mc = &portal->mc;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+ if (mc->state != qman_mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+ mc->state = qman_mc_user;
+#endif
+ dcbz_64(mc->cr);
+ return mc->cr;
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_mc *mc = &portal->mc;
+ struct qm_mc_result *rr = mc->rr + mc->rridx;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == qman_mc_user);
+#endif
+ lwsync();
+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+ dcbf(mc->cr);
+ dcbit_ro(rr);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = qman_mc_hw;
+#endif
+}
+
+static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+ struct qm_mc_result *rr = mc->rr + mc->rridx;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == qman_mc_hw);
+#endif
+ /* The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering.
+ */
+ if (!__raw_readb(&rr->verb)) {
+ dcbit_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = qman_mc_idle;
+#endif
+ return rr;
+}
+
+/* Portal interrupt register API */
+static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod)
+{
+ qm_out(ITPR, iperiod);
+}
+
+static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n)
+{
+#if defined(RTE_ARCH_ARM64)
+ return __qm_in(&portal->addr, QM_REG_ISR + (n << 6));
+#else
+ return __qm_in(&portal->addr, QM_REG_ISR + (n << 2));
+#endif
+}
+
+static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n,
+ u32 val)
+{
+#if defined(RTE_ARCH_ARM64)
+ __qm_out(&portal->addr, QM_REG_ISR + (n << 6), val);
+#else
+ __qm_out(&portal->addr, QM_REG_ISR + (n << 2), val);
+#endif
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_driver.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_driver.c
new file mode 100644
index 00000000..f6ecd6b2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_driver.c
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <fsl_usd.h>
+#include <process.h>
+#include "qman_priv.h"
+#include <sys/ioctl.h>
+#include <rte_branch_prediction.h>
+
+/* Global variable containing revision id (even on non-control plane systems
+ * where CCSR isn't available).
+ */
+u16 qman_ip_rev;
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
+u16 qm_channel_pme = QMAN_CHANNEL_PME;
+
+/* Ccsr map address to access ccsrbased register */
+static void *qman_ccsr_map;
+/* The qman clock frequency */
+static u32 qman_clk;
+
+static __thread int qmfd = -1;
+static __thread struct qm_portal_config qpcfg;
+static __thread struct dpaa_ioctl_portal_map map = {
+ .type = dpaa_portal_qman
+};
+
+static int fsl_qman_portal_init(uint32_t index, int is_shared)
+{
+ cpu_set_t cpuset;
+ struct qman_portal *portal;
+ int loop, ret;
+ struct dpaa_ioctl_irq_map irq_map;
+
+ /* Verify the thread's cpu-affinity */
+ ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
+ &cpuset);
+ if (ret) {
+ error(0, ret, "pthread_getaffinity_np()");
+ return ret;
+ }
+ qpcfg.cpu = -1;
+ for (loop = 0; loop < CPU_SETSIZE; loop++)
+ if (CPU_ISSET(loop, &cpuset)) {
+ if (qpcfg.cpu != -1) {
+ pr_err("Thread is not affine to 1 cpu\n");
+ return -EINVAL;
+ }
+ qpcfg.cpu = loop;
+ }
+ if (qpcfg.cpu == -1) {
+ pr_err("Bug in getaffinity handling!\n");
+ return -EINVAL;
+ }
+
+ /* Allocate and map a qman portal */
+ map.index = index;
+ ret = process_portal_map(&map);
+ if (ret) {
+ error(0, ret, "process_portal_map()");
+ return ret;
+ }
+ qpcfg.channel = map.channel;
+ qpcfg.pools = map.pools;
+ qpcfg.index = map.index;
+
+ /* Make the portal's cache-[enabled|inhibited] regions */
+ qpcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
+ qpcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
+
+ qmfd = open(QMAN_PORTAL_IRQ_PATH, O_RDONLY);
+ if (qmfd == -1) {
+ pr_err("QMan irq init failed\n");
+ process_portal_unmap(&map.addr);
+ return -EBUSY;
+ }
+
+ qpcfg.is_shared = is_shared;
+ qpcfg.node = NULL;
+ qpcfg.irq = qmfd;
+
+ portal = qman_create_affine_portal(&qpcfg, NULL, 0);
+ if (!portal) {
+ pr_err("Qman portal initialisation failed (%d)\n",
+ qpcfg.cpu);
+ process_portal_unmap(&map.addr);
+ return -EBUSY;
+ }
+
+ irq_map.type = dpaa_portal_qman;
+ irq_map.portal_cinh = map.addr.cinh;
+ process_portal_irq_map(qmfd, &irq_map);
+ return 0;
+}
+
+static int fsl_qman_portal_finish(void)
+{
+ __maybe_unused const struct qm_portal_config *cfg;
+ int ret;
+
+ process_portal_irq_unmap(qmfd);
+
+ cfg = qman_destroy_affine_portal(NULL);
+ DPAA_BUG_ON(cfg != &qpcfg);
+ ret = process_portal_unmap(&map.addr);
+ if (ret)
+ error(0, ret, "process_portal_unmap()");
+ return ret;
+}
+
+int qman_thread_init(void)
+{
+ /* Convert from contiguous/virtual cpu numbering to real cpu when
+ * calling into the code that is dependent on the device naming.
+ */
+ return fsl_qman_portal_init(QBMAN_ANY_PORTAL_IDX, 0);
+}
+
+int qman_thread_finish(void)
+{
+ return fsl_qman_portal_finish();
+}
+
+void qman_thread_irq(void)
+{
+ qbman_invoke_irq(qpcfg.irq);
+
+ /* Now we need to uninhibit interrupts. This is the only code outside
+ * the regular portal driver that manipulates any portal register, so
+ * rather than breaking that encapsulation I am simply hard-coding the
+ * offset to the inhibit register here.
+ */
+ out_be32(qpcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
+}
+
+struct qman_portal *fsl_qman_portal_create(void)
+{
+ cpu_set_t cpuset;
+ struct qman_portal *res;
+
+ struct qm_portal_config *q_pcfg;
+ int loop, ret;
+ struct dpaa_ioctl_irq_map irq_map;
+ struct dpaa_ioctl_portal_map q_map = {0};
+ int q_fd;
+
+ q_pcfg = kzalloc((sizeof(struct qm_portal_config)), 0);
+ if (!q_pcfg) {
+ error(0, -1, "q_pcfg kzalloc failed");
+ return NULL;
+ }
+
+ /* Verify the thread's cpu-affinity */
+ ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
+ &cpuset);
+ if (ret) {
+ error(0, ret, "pthread_getaffinity_np()");
+ kfree(q_pcfg);
+ return NULL;
+ }
+
+ q_pcfg->cpu = -1;
+ for (loop = 0; loop < CPU_SETSIZE; loop++)
+ if (CPU_ISSET(loop, &cpuset)) {
+ if (q_pcfg->cpu != -1) {
+ pr_err("Thread is not affine to 1 cpu\n");
+ kfree(q_pcfg);
+ return NULL;
+ }
+ q_pcfg->cpu = loop;
+ }
+ if (q_pcfg->cpu == -1) {
+ pr_err("Bug in getaffinity handling!\n");
+ kfree(q_pcfg);
+ return NULL;
+ }
+
+ /* Allocate and map a qman portal */
+ q_map.type = dpaa_portal_qman;
+ q_map.index = QBMAN_ANY_PORTAL_IDX;
+ ret = process_portal_map(&q_map);
+ if (ret) {
+ error(0, ret, "process_portal_map()");
+ kfree(q_pcfg);
+ return NULL;
+ }
+ q_pcfg->channel = q_map.channel;
+ q_pcfg->pools = q_map.pools;
+ q_pcfg->index = q_map.index;
+
+ /* Make the portal's cache-[enabled|inhibited] regions */
+ q_pcfg->addr_virt[DPAA_PORTAL_CE] = q_map.addr.cena;
+ q_pcfg->addr_virt[DPAA_PORTAL_CI] = q_map.addr.cinh;
+
+ q_fd = open(QMAN_PORTAL_IRQ_PATH, O_RDONLY);
+ if (q_fd == -1) {
+ pr_err("QMan irq init failed\n");
+ goto err1;
+ }
+
+ q_pcfg->irq = q_fd;
+
+ res = qman_create_affine_portal(q_pcfg, NULL, true);
+ if (!res) {
+ pr_err("Qman portal initialisation failed (%d)\n",
+ q_pcfg->cpu);
+ goto err2;
+ }
+
+ irq_map.type = dpaa_portal_qman;
+ irq_map.portal_cinh = q_map.addr.cinh;
+ process_portal_irq_map(q_fd, &irq_map);
+
+ return res;
+err2:
+ close(q_fd);
+err1:
+ process_portal_unmap(&q_map.addr);
+ kfree(q_pcfg);
+ return NULL;
+}
+
+int fsl_qman_portal_destroy(struct qman_portal *qp)
+{
+ const struct qm_portal_config *cfg;
+ struct dpaa_portal_map addr;
+ int ret;
+
+ cfg = qman_destroy_affine_portal(qp);
+ kfree(qp);
+
+ process_portal_irq_unmap(cfg->irq);
+
+ addr.cena = cfg->addr_virt[DPAA_PORTAL_CE];
+ addr.cinh = cfg->addr_virt[DPAA_PORTAL_CI];
+
+ ret = process_portal_unmap(&addr);
+ if (ret)
+ pr_err("process_portal_unmap() (%d)\n", ret);
+
+ kfree((void *)cfg);
+
+ return ret;
+}
+
+int qman_global_init(void)
+{
+ const struct device_node *dt_node;
+ size_t lenp;
+ const u32 *chanid;
+ static int ccsr_map_fd;
+ const uint32_t *qman_addr;
+ uint64_t phys_addr;
+ uint64_t regs_size;
+ const u32 *clk;
+
+ static int done;
+
+ if (done)
+ return -EBUSY;
+
+ /* Use the device-tree to determine IP revision until something better
+ * is devised.
+ */
+ dt_node = of_find_compatible_node(NULL, NULL, "fsl,qman-portal");
+ if (!dt_node) {
+ pr_err("No qman portals available for any CPU\n");
+ return -ENODEV;
+ }
+ if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-1.0.0"))
+ pr_err("QMan rev1.0 on P4080 rev1 is not supported!\n");
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.1") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-1.1.0"))
+ qman_ip_rev = QMAN_REV11;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.2") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-1.2.0"))
+ qman_ip_rev = QMAN_REV12;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-2.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-2.0.0"))
+ qman_ip_rev = QMAN_REV20;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.0.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.0.1"))
+ qman_ip_rev = QMAN_REV30;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.1") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.2") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.3"))
+ qman_ip_rev = QMAN_REV31;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.2.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.2.1"))
+ qman_ip_rev = QMAN_REV32;
+ else
+ qman_ip_rev = QMAN_REV11;
+
+ if (!qman_ip_rev) {
+ pr_err("Unknown qman portal version\n");
+ return -ENODEV;
+ }
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+ qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
+ qm_channel_pme = QMAN_CHANNEL_PME_REV3;
+ }
+
+ dt_node = of_find_compatible_node(NULL, NULL, "fsl,pool-channel-range");
+ if (!dt_node) {
+ pr_err("No qman pool channel range available\n");
+ return -ENODEV;
+ }
+ chanid = of_get_property(dt_node, "fsl,pool-channel-range", &lenp);
+ if (!chanid) {
+ pr_err("Can not get pool-channel-range property\n");
+ return -EINVAL;
+ }
+
+ /* get ccsr base */
+ dt_node = of_find_compatible_node(NULL, NULL, "fsl,qman");
+ if (!dt_node) {
+ pr_err("No qman device node available\n");
+ return -ENODEV;
+ }
+ qman_addr = of_get_address(dt_node, 0, &regs_size, NULL);
+ if (!qman_addr) {
+ pr_err("of_get_address cannot return qman address\n");
+ return -EINVAL;
+ }
+ phys_addr = of_translate_address(dt_node, qman_addr);
+ if (!phys_addr) {
+ pr_err("of_translate_address failed\n");
+ return -EINVAL;
+ }
+
+ ccsr_map_fd = open("/dev/mem", O_RDWR);
+ if (unlikely(ccsr_map_fd < 0)) {
+ pr_err("Can not open /dev/mem for qman ccsr map\n");
+ return ccsr_map_fd;
+ }
+
+ qman_ccsr_map = mmap(NULL, regs_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, ccsr_map_fd, phys_addr);
+ if (qman_ccsr_map == MAP_FAILED) {
+ pr_err("Can not map qman ccsr base\n");
+ return -EINVAL;
+ }
+
+ clk = of_get_property(dt_node, "clock-frequency", NULL);
+ if (!clk)
+ pr_warn("Can't find Qman clock frequency\n");
+ else
+ qman_clk = be32_to_cpu(*clk);
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ return qman_setup_fq_lookup_table(CONFIG_FSL_QMAN_FQ_LOOKUP_MAX);
+#endif
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_priv.h b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_priv.h
new file mode 100644
index 00000000..02f6301f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_priv.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __QMAN_PRIV_H
+#define __QMAN_PRIV_H
+
+#include "dpaa_sys.h"
+#include <fsl_qman.h>
+
+/* Congestion Groups */
+/*
+ * This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+struct qman_cgrs {
+ struct __qm_mcr_querycongestion q;
+};
+
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+ memset(c, 0, sizeof(*c));
+}
+
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+ memset(c, 0xff, sizeof(*c));
+}
+
+static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
+{
+ return QM_MCR_QUERYCONGESTION(&c->q, num);
+}
+
+static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
+{
+ c->q.state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
+}
+
+static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
+{
+ c->q.state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
+}
+
+static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
+{
+ while ((++num < (int)__CGR_NUM) && !qman_cgrs_get(c, num))
+ ;
+ return num;
+}
+
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+ const struct qman_cgrs *src)
+{
+ memcpy(dest, src, sizeof(*dest));
+}
+
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+ const struct qman_cgrs *a,
+ const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *(_d++) = *(_a++) & *(_b++);
+}
+
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+ const struct qman_cgrs *a,
+ const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *(_d++) = *(_a++) ^ *(_b++);
+}
+
+/* used by CCSR and portal interrupt code */
+enum qm_isr_reg {
+ qm_isr_status = 0,
+ qm_isr_enable = 1,
+ qm_isr_disable = 2,
+ qm_isr_inhibit = 3
+};
+
+struct qm_portal_config {
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
+ struct device_node *node;
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ /* User-visible portal configuration settings */
+ /* If the caller enables DQRR stashing (and thus wishes to operate the
+ * portal from only one cpu), this is the logical CPU that the portal
+ * will stash to. Whether stashing is enabled or not, this setting is
+ * also used for any "core-affine" portals, ie. default portals
+ * associated to the corresponding cpu. -1 implies that there is no
+ * core affinity configured.
+ */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /* the unique index of this portal */
+ u32 index;
+ /* Is this portal shared? (If so, it has coarser locking and demuxes
+ * processing on behalf of other CPUs.).
+ */
+ int is_shared;
+ /* The portal's dedicated channel id, use this value for initialising
+ * frame queues to target this portal when scheduled.
+ */
+ u16 channel;
+ /* A mask of which pool channels this portal has dequeue access to
+ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask).
+ */
+ u32 pools;
+
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+#define QMAN_REV32 0x0302
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+int qm_set_wpm(int wpm);
+int qm_get_wpm(int *wpm);
+
+struct qman_portal *qman_create_affine_portal(
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs,
+ int alloc);
+const struct qm_portal_config *
+qman_destroy_affine_portal(struct qman_portal *q);
+
+struct qm_portal_config *qm_get_unused_portal(void);
+struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);
+
+void qm_put_unused_portal(struct qm_portal_config *pcfg);
+void qm_set_liodns(struct qm_portal_config *pcfg);
+
+/* This CGR feature is supported by h/w and required by unit-tests and the
+ * debugfs hooks, so is implemented in the driver. However it allows an explicit
+ * corruption of h/w fields by s/w that are usually incorruptible (because the
+ * counters are usually maintained entirely within h/w). As such, we declare
+ * this API internally.
+ */
+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
+ struct qm_mcr_cgrtestwrite *result);
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+/* If the fq object pointer is greater than the size of context_b field,
+ * than a lookup table is required.
+ */
+int qman_setup_fq_lookup_table(size_t num_entries);
+#endif
+
+/* QMan s/w corenet portal, low-level i/face */
+
+/*
+ * For Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ * priority.
+ * If SOURCE == SPECIFICWQ,
+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ * same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS 0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
+#define QM_SDQCR_COUNT_EXACT1 0x0
+#define QM_SDQCR_COUNT_UPTO3 0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
+#define QM_SDQCR_TYPE_MASK 0x03000000
+#define QM_SDQCR_TYPE_NULL 0x0
+#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
+#define QM_SDQCR_TYPE_ACTIVE 0x03000000
+#define QM_SDQCR_TOKEN_MASK 0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
+
+#define QM_VDQCR_FQID_MASK 0x00ffffff
+#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
+
+#define QM_EQCR_VERB_VBIT 0x80
+#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
+#define QM_EQCR_VERB_COLOUR_MASK 0x18 /* 4 possible values; */
+#define QM_EQCR_VERB_COLOUR_GREEN 0x00
+#define QM_EQCR_VERB_COLOUR_YELLOW 0x08
+#define QM_EQCR_VERB_COLOUR_RED 0x10
+#define QM_EQCR_VERB_COLOUR_OVERRIDE 0x18
+#define QM_EQCR_VERB_INTERRUPT 0x04 /* on command consumption */
+#define QM_EQCR_VERB_ORP 0x02 /* enable order restoration */
+#define QM_EQCR_DCA_ENABLE 0x80
+#define QM_EQCR_DCA_PARK 0x40
+#define QM_EQCR_DCA_IDXMASK 0x0f /* "DQRR::idx" goes here */
+#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
+#define QM_EQCR_FQID_NULL 0 /* eg. for an ORP seqnum hole */
+
+#define QM_MCC_VERB_VBIT 0x80
+#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED 0x40
+#define QM_MCC_VERB_INITFQ_SCHED 0x41
+#define QM_MCC_VERB_QUERYFQ 0x44
+#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ 0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
+#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
+#define QM_MCC_VERB_INITCGR 0x50
+#define QM_MCC_VERB_MODIFYCGR 0x51
+#define QM_MCC_VERB_CGRTESTWRITE 0x52
+#define QM_MCC_VERB_QUERYCGR 0x58
+#define QM_MCC_VERB_QUERYCONGESTION 0x59
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL 0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
+#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK 0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
+ * the disable register" rather than "disable the ability to write".
+ */
+#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status)
+#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m)
+#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable)
+#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v)
+#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable)
+#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v)
+/* TODO: unfortunate name-clash here, reword? */
+#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1)
+#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0)
+
+#define QMAN_PORTAL_IRQ_PATH "/dev/fsl-usdpaa-irq"
+
+#endif /* _QMAN_PRIV_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/dpaa_bus.c b/src/spdk/dpdk/drivers/bus/dpaa/dpaa_bus.c
new file mode 100644
index 00000000..16fabd1b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/dpaa_bus.c
@@ -0,0 +1,646 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+/* System headers */
+#include <stdio.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_bus.h>
+#include <rte_mbuf_pool_ops.h>
+
+#include <rte_dpaa_bus.h>
+#include <rte_dpaa_logs.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
+
+int dpaa_logtype_bus;
+int dpaa_logtype_mempool;
+int dpaa_logtype_pmd;
+int dpaa_logtype_eventdev;
+
+struct rte_dpaa_bus rte_dpaa_bus;
+struct netcfg_info *dpaa_netcfg;
+
+/* define a variable to hold the portal_key, once created.*/
+static pthread_key_t dpaa_portal_key;
+
+unsigned int dpaa_svr_family;
+
+#define FSL_DPAA_BUS_NAME dpaa_bus
+
+RTE_DEFINE_PER_LCORE(bool, dpaa_io);
+RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
+
+static int
+compare_dpaa_devices(struct rte_dpaa_device *dev1,
+ struct rte_dpaa_device *dev2)
+{
+ int comp = 0;
+
+ /* Segragating ETH from SEC devices */
+ if (dev1->device_type > dev2->device_type)
+ comp = 1;
+ else if (dev1->device_type < dev2->device_type)
+ comp = -1;
+ else
+ comp = 0;
+
+ if ((comp != 0) || (dev1->device_type != FSL_DPAA_ETH))
+ return comp;
+
+ if (dev1->id.fman_id > dev2->id.fman_id) {
+ comp = 1;
+ } else if (dev1->id.fman_id < dev2->id.fman_id) {
+ comp = -1;
+ } else {
+ /* FMAN ids match, check for mac_id */
+ if (dev1->id.mac_id > dev2->id.mac_id)
+ comp = 1;
+ else if (dev1->id.mac_id < dev2->id.mac_id)
+ comp = -1;
+ else
+ comp = 0;
+ }
+
+ return comp;
+}
+
+static inline void
+dpaa_add_to_device_list(struct rte_dpaa_device *newdev)
+{
+ int comp, inserted = 0;
+ struct rte_dpaa_device *dev = NULL;
+ struct rte_dpaa_device *tdev = NULL;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
+ comp = compare_dpaa_devices(newdev, dev);
+ if (comp < 0) {
+ TAILQ_INSERT_BEFORE(dev, newdev, next);
+ inserted = 1;
+ break;
+ }
+ }
+
+ if (!inserted)
+ TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, newdev, next);
+}
+
+/*
+ * Reads the SEC device from DTS
+ * Returns -1 if SEC devices not available, 0 otherwise
+ */
+static inline int
+dpaa_sec_available(void)
+{
+ const struct device_node *caam_node;
+
+ for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
+ return 0;
+ }
+
+ return -1;
+}
+
+static void dpaa_clean_device_list(void);
+
+static struct rte_devargs *
+dpaa_devargs_lookup(struct rte_dpaa_device *dev)
+{
+ struct rte_devargs *devargs;
+ char dev_name[32];
+
+ RTE_EAL_DEVARGS_FOREACH("dpaa_bus", devargs) {
+ devargs->bus->parse(devargs->name, &dev_name);
+ if (strcmp(dev_name, dev->device.name) == 0) {
+ DPAA_BUS_INFO("**Devargs matched %s", dev_name);
+ return devargs;
+ }
+ }
+ return NULL;
+}
+
+static int
+dpaa_create_device_list(void)
+{
+ int i;
+ int ret;
+ struct rte_dpaa_device *dev;
+ struct fm_eth_port_cfg *cfg;
+ struct fman_if *fman_intf;
+
+ /* Creating Ethernet Devices */
+ for (i = 0; i < dpaa_netcfg->num_ethports; i++) {
+ dev = calloc(1, sizeof(struct rte_dpaa_device));
+ if (!dev) {
+ DPAA_BUS_LOG(ERR, "Failed to allocate ETH devices");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cfg = &dpaa_netcfg->port_cfg[i];
+ fman_intf = cfg->fman_if;
+
+ /* Device identifiers */
+ dev->id.fman_id = fman_intf->fman_idx + 1;
+ dev->id.mac_id = fman_intf->mac_idx;
+ dev->device_type = FSL_DPAA_ETH;
+ dev->id.dev_id = i;
+
+ /* Create device name */
+ memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
+ sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
+ fman_intf->mac_idx);
+ DPAA_BUS_LOG(INFO, "%s netdev added", dev->name);
+ dev->device.name = dev->name;
+ dev->device.devargs = dpaa_devargs_lookup(dev);
+
+ dpaa_add_to_device_list(dev);
+ }
+
+ rte_dpaa_bus.device_count = i;
+
+ /* Unlike case of ETH, RTE_LIBRTE_DPAA_MAX_CRYPTODEV SEC devices are
+ * constantly created only if "sec" property is found in the device
+ * tree. Logically there is no limit for number of devices (QI
+ * interfaces) that can be created.
+ */
+
+ if (dpaa_sec_available()) {
+ DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available");
+ return 0;
+ }
+
+ /* Creating SEC Devices */
+ for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
+ dev = calloc(1, sizeof(struct rte_dpaa_device));
+ if (!dev) {
+ DPAA_BUS_LOG(ERR, "Failed to allocate SEC devices");
+ ret = -1;
+ goto cleanup;
+ }
+
+ dev->device_type = FSL_DPAA_CRYPTO;
+ dev->id.dev_id = rte_dpaa_bus.device_count + i;
+
+ /* Even though RTE_CRYPTODEV_NAME_MAX_LEN is valid length of
+ * crypto PMD, using RTE_ETH_NAME_MAX_LEN as that is the size
+ * allocated for dev->name/
+ */
+ memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
+ sprintf(dev->name, "dpaa-sec%d", i);
+ DPAA_BUS_LOG(INFO, "%s cryptodev added", dev->name);
+ dev->device.name = dev->name;
+ dev->device.devargs = dpaa_devargs_lookup(dev);
+
+ dpaa_add_to_device_list(dev);
+ }
+
+ rte_dpaa_bus.device_count += i;
+
+ return 0;
+
+cleanup:
+ dpaa_clean_device_list();
+ return ret;
+}
+
+static void
+dpaa_clean_device_list(void)
+{
+ struct rte_dpaa_device *dev = NULL;
+ struct rte_dpaa_device *tdev = NULL;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
+ TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ }
+}
+
+int rte_dpaa_portal_init(void *arg)
+{
+ cpu_set_t cpuset;
+ pthread_t id;
+ uint32_t cpu = rte_lcore_id();
+ int ret;
+ struct dpaa_portal *dpaa_io_portal;
+
+ BUS_INIT_FUNC_TRACE();
+
+ if ((size_t)arg == 1 || cpu == LCORE_ID_ANY)
+ cpu = rte_get_master_lcore();
+ /* if the core id is not supported */
+ else
+ if (cpu >= RTE_MAX_LCORE)
+ return -1;
+
+ /* Set CPU affinity for this thread */
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+ id = pthread_self();
+ ret = pthread_setaffinity_np(id, sizeof(cpu_set_t), &cpuset);
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "pthread_setaffinity_np failed on "
+ "core :%d with ret: %d", cpu, ret);
+ return ret;
+ }
+
+ /* Initialise bman thread portals */
+ ret = bman_thread_init();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
+ "core %d with ret: %d", cpu, ret);
+ return ret;
+ }
+
+ DPAA_BUS_LOG(DEBUG, "BMAN thread initialized");
+
+ /* Initialise qman thread portals */
+ ret = qman_thread_init();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
+ "core %d with ret: %d", cpu, ret);
+ bman_thread_finish();
+ return ret;
+ }
+
+ DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
+
+ dpaa_io_portal = rte_malloc(NULL, sizeof(struct dpaa_portal),
+ RTE_CACHE_LINE_SIZE);
+ if (!dpaa_io_portal) {
+ DPAA_BUS_LOG(ERR, "Unable to allocate memory");
+ bman_thread_finish();
+ qman_thread_finish();
+ return -ENOMEM;
+ }
+
+ dpaa_io_portal->qman_idx = qman_get_portal_index();
+ dpaa_io_portal->bman_idx = bman_get_portal_index();
+ dpaa_io_portal->tid = syscall(SYS_gettid);
+
+ ret = pthread_setspecific(dpaa_portal_key, (void *)dpaa_io_portal);
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "pthread_setspecific failed on "
+ "core %d with ret: %d", cpu, ret);
+ dpaa_portal_finish(NULL);
+
+ return ret;
+ }
+
+ RTE_PER_LCORE(dpaa_io) = true;
+
+ DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
+
+ return 0;
+}
+
+int
+rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
+{
+ /* Affine above created portal with channel*/
+ u32 sdqcr;
+ struct qman_portal *qp;
+ int ret;
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init(arg);
+ if (ret < 0) {
+ DPAA_BUS_LOG(ERR, "portal initialization failure");
+ return ret;
+ }
+ }
+
+ /* Initialise qman specific portals */
+ qp = fsl_qman_portal_create();
+ if (!qp) {
+ DPAA_BUS_LOG(ERR, "Unable to alloc fq portal");
+ return -1;
+ }
+ fq->qp = qp;
+ sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(fq->ch_id);
+ qman_static_dequeue_add(sdqcr, qp);
+
+ return 0;
+}
+
+int rte_dpaa_portal_fq_close(struct qman_fq *fq)
+{
+ return fsl_qman_portal_destroy(fq->qp);
+}
+
+void
+dpaa_portal_finish(void *arg)
+{
+ struct dpaa_portal *dpaa_io_portal = (struct dpaa_portal *)arg;
+
+ if (!dpaa_io_portal) {
+ DPAA_BUS_LOG(DEBUG, "Portal already cleaned");
+ return;
+ }
+
+ bman_thread_finish();
+ qman_thread_finish();
+
+ pthread_setspecific(dpaa_portal_key, NULL);
+
+ rte_free(dpaa_io_portal);
+ dpaa_io_portal = NULL;
+
+ RTE_PER_LCORE(dpaa_io) = false;
+}
+
+static int
+rte_dpaa_bus_parse(const char *name, void *out_name)
+{
+ int i, j;
+ int max_fman = 2, max_macs = 16;
+ char *sep = strchr(name, ':');
+
+ if (strncmp(name, RTE_STR(FSL_DPAA_BUS_NAME),
+ strlen(RTE_STR(FSL_DPAA_BUS_NAME)))) {
+ return -EINVAL;
+ }
+
+ if (!sep) {
+ DPAA_BUS_ERR("Incorrect device name observed");
+ return -EINVAL;
+ }
+
+ sep = (char *) (sep + 1);
+
+ for (i = 0; i < max_fman; i++) {
+ for (j = 0; j < max_macs; j++) {
+ char fm_name[16];
+ snprintf(fm_name, 16, "fm%d-mac%d", i, j);
+ if (strcmp(fm_name, sep) == 0) {
+ if (out_name)
+ strcpy(out_name, sep);
+ return 0;
+ }
+ }
+ }
+
+ for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
+ char sec_name[16];
+
+ snprintf(sec_name, 16, "dpaa-sec%d", i);
+ if (strcmp(sec_name, sep) == 0) {
+ if (out_name)
+ strcpy(out_name, sep);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+#define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
+#define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa"
+
+static int
+rte_dpaa_bus_scan(void)
+{
+ int ret;
+
+ BUS_INIT_FUNC_TRACE();
+
+ if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
+ (access(DPAA_DEV_PATH2, F_OK) != 0)) {
+ RTE_LOG(DEBUG, EAL, "DPAA Bus not present. Skipping.\n");
+ return 0;
+ }
+
+ /* Load the device-tree driver */
+ ret = of_init();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "of_init failed with ret: %d", ret);
+ return -1;
+ }
+
+ /* Get the interface configurations from device-tree */
+ dpaa_netcfg = netcfg_acquire();
+ if (!dpaa_netcfg) {
+ DPAA_BUS_LOG(ERR, "netcfg_acquire failed");
+ return -EINVAL;
+ }
+
+ RTE_LOG(NOTICE, EAL, "DPAA Bus Detected\n");
+
+ if (!dpaa_netcfg->num_ethports) {
+ DPAA_BUS_LOG(INFO, "no network interfaces available");
+ /* This is not an error */
+ return 0;
+ }
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+ dump_netcfg(dpaa_netcfg);
+#endif
+
+ DPAA_BUS_LOG(DEBUG, "Number of ethernet devices = %d",
+ dpaa_netcfg->num_ethports);
+ ret = dpaa_create_device_list();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "Unable to create device list. (%d)", ret);
+ return ret;
+ }
+
+ /* create the key, supplying a function that'll be invoked
+ * when a portal affined thread will be deleted.
+ */
+ ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
+ if (ret) {
+ DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
+ dpaa_clean_device_list();
+ return ret;
+ }
+
+ return 0;
+}
+
+/* register a dpaa bus based dpaa driver */
+void
+rte_dpaa_driver_register(struct rte_dpaa_driver *driver)
+{
+ RTE_VERIFY(driver);
+
+ BUS_INIT_FUNC_TRACE();
+
+ TAILQ_INSERT_TAIL(&rte_dpaa_bus.driver_list, driver, next);
+ /* Update Bus references */
+ driver->dpaa_bus = &rte_dpaa_bus;
+}
+
+/* un-register a dpaa bus based dpaa driver */
+void
+rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver)
+{
+ struct rte_dpaa_bus *dpaa_bus;
+
+ BUS_INIT_FUNC_TRACE();
+
+ dpaa_bus = driver->dpaa_bus;
+
+ TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next);
+ /* Update Bus references */
+ driver->dpaa_bus = NULL;
+}
+
+static int
+rte_dpaa_device_match(struct rte_dpaa_driver *drv,
+ struct rte_dpaa_device *dev)
+{
+ if (!drv || !dev) {
+ DPAA_BUS_DEBUG("Invalid drv or dev received.");
+ return -1;
+ }
+
+ if (drv->drv_type == dev->device_type)
+ return 0;
+
+ return -1;
+}
+
+static int
+rte_dpaa_bus_probe(void)
+{
+ int ret = -1;
+ struct rte_dpaa_device *dev;
+ struct rte_dpaa_driver *drv;
+ FILE *svr_file = NULL;
+ unsigned int svr_ver;
+ int probe_all = rte_dpaa_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST;
+
+ svr_file = fopen(DPAA_SOC_ID_FILE, "r");
+ if (svr_file) {
+ if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
+ dpaa_svr_family = svr_ver & SVR_MASK;
+ fclose(svr_file);
+ }
+
+ /* For each registered driver, and device, call the driver->probe */
+ TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
+ TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
+ ret = rte_dpaa_device_match(drv, dev);
+ if (ret)
+ continue;
+
+ if (!drv->probe ||
+ (dev->device.devargs &&
+ dev->device.devargs->policy == RTE_DEV_BLACKLISTED))
+ continue;
+
+ if (probe_all ||
+ (dev->device.devargs &&
+ dev->device.devargs->policy ==
+ RTE_DEV_WHITELISTED)) {
+ ret = drv->probe(drv, dev);
+ if (ret)
+ DPAA_BUS_ERR("Unable to probe.\n");
+ }
+ break;
+ }
+ }
+
+ /* Register DPAA mempool ops only if any DPAA device has
+ * been detected.
+ */
+ if (!TAILQ_EMPTY(&rte_dpaa_bus.device_list))
+ rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
+
+ return 0;
+}
+
+static struct rte_device *
+rte_dpaa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ struct rte_dpaa_device *dev;
+
+ TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
+ if (start && &dev->device == start) {
+ start = NULL; /* starting point found */
+ continue;
+ }
+
+ if (cmp(&dev->device, data) == 0)
+ return &dev->device;
+ }
+
+ return NULL;
+}
+
+/*
+ * Get iommu class of DPAA2 devices on the bus.
+ */
+static enum rte_iova_mode
+rte_dpaa_get_iommu_class(void)
+{
+ if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
+ (access(DPAA_DEV_PATH2, F_OK) != 0)) {
+ return RTE_IOVA_DC;
+ }
+ return RTE_IOVA_PA;
+}
+
+struct rte_dpaa_bus rte_dpaa_bus = {
+ .bus = {
+ .scan = rte_dpaa_bus_scan,
+ .probe = rte_dpaa_bus_probe,
+ .parse = rte_dpaa_bus_parse,
+ .find_device = rte_dpaa_find_device,
+ .get_iommu_class = rte_dpaa_get_iommu_class,
+ },
+ .device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list),
+ .driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list),
+ .device_count = 0,
+};
+
+RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);
+
+RTE_INIT(dpaa_init_log)
+{
+ dpaa_logtype_bus = rte_log_register("bus.dpaa");
+ if (dpaa_logtype_bus >= 0)
+ rte_log_set_level(dpaa_logtype_bus, RTE_LOG_NOTICE);
+
+ dpaa_logtype_mempool = rte_log_register("mempool.dpaa");
+ if (dpaa_logtype_mempool >= 0)
+ rte_log_set_level(dpaa_logtype_mempool, RTE_LOG_NOTICE);
+
+ dpaa_logtype_pmd = rte_log_register("pmd.net.dpaa");
+ if (dpaa_logtype_pmd >= 0)
+ rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE);
+
+ dpaa_logtype_eventdev = rte_log_register("pmd.event.dpaa");
+ if (dpaa_logtype_eventdev >= 0)
+ rte_log_set_level(dpaa_logtype_eventdev, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/compat.h b/src/spdk/dpdk/drivers/bus/dpaa/include/compat.h
new file mode 100644
index 00000000..92241d23
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/compat.h
@@ -0,0 +1,384 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ */
+
+#ifndef __COMPAT_H
+#define __COMPAT_H
+
+#include <sched.h>
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <stdint.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <pthread.h>
+#include <linux/types.h>
+#include <stdbool.h>
+#include <ctype.h>
+#include <malloc.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <limits.h>
+#include <assert.h>
+#include <dirent.h>
+#include <inttypes.h>
+#include <error.h>
+#include <rte_byteorder.h>
+#include <rte_atomic.h>
+#include <rte_spinlock.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+
+/* The following definitions are primarily to allow the single-source driver
+ * interfaces to be included by arbitrary program code. Ie. for interfaces that
+ * are also available in kernel-space, these definitions provide compatibility
+ * with certain attributes and types used in those interfaces.
+ */
+
+/* Required compiler attributes */
+#ifndef __maybe_unused
+#define __maybe_unused __rte_unused
+#endif
+#ifndef __always_unused
+#define __always_unused __rte_unused
+#endif
+#ifndef __packed
+#define __packed __rte_packed
+#endif
+#define noinline __attribute__((noinline))
+
+#define L1_CACHE_BYTES 64
+#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+#define __stringify_1(x) #x
+#define __stringify(x) __stringify_1(x)
+
+#ifdef ARRAY_SIZE
+#undef ARRAY_SIZE
+#endif
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+/* Debugging */
+#define prflush(fmt, args...) \
+ do { \
+ printf(fmt, ##args); \
+ fflush(stdout); \
+ } while (0)
+
+#define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args)
+#define pr_err(fmt, args...) prflush("ERR:" fmt, ##args)
+#define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args)
+#define pr_info(fmt, args...) prflush(fmt, ##args)
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
+#ifdef pr_debug
+#undef pr_debug
+#endif
+#define pr_debug(fmt, args...) printf(fmt, ##args)
+#else
+#define pr_debug(fmt, args...) {}
+#endif
+
+#define DPAA_BUG_ON(x) RTE_ASSERT(x)
+
+/* Required types */
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+typedef uint64_t dma_addr_t;
+typedef cpu_set_t cpumask_t;
+typedef uint32_t phandle;
+typedef uint32_t gfp_t;
+typedef uint32_t irqreturn_t;
+
+#define IRQ_HANDLED 0
+#define request_irq qbman_request_irq
+#define free_irq qbman_free_irq
+
+#define __iomem
+#define GFP_KERNEL 0
+#define __raw_readb(p) (*(const volatile unsigned char *)(p))
+#define __raw_readl(p) (*(const volatile unsigned int *)(p))
+#define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
+
+/* to be used as an upper-limit only */
+#define NR_CPUS 64
+
+/* Waitqueue stuff */
+typedef struct { } wait_queue_head_t;
+#define DECLARE_WAIT_QUEUE_HEAD(x) int dummy_##x __always_unused
+#define wake_up(x) do { } while (0)
+
+/* I/O operations */
+static inline u32 in_be32(volatile void *__p)
+{
+ volatile u32 *p = __p;
+ return rte_be_to_cpu_32(*p);
+}
+
+static inline void out_be32(volatile void *__p, u32 val)
+{
+ volatile u32 *p = __p;
+ *p = rte_cpu_to_be_32(val);
+}
+
+#define hwsync() rte_rmb()
+#define lwsync() rte_wmb()
+
+#define dcbt_ro(p) __builtin_prefetch(p, 0)
+#define dcbt_rw(p) __builtin_prefetch(p, 1)
+
+#if defined(RTE_ARCH_ARM64)
+#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
+#define dcbz_64(p) dcbz(p)
+#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
+#define dcbf_64(p) dcbf(p)
+#define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); }
+
+#define dcbit_ro(p) \
+ do { \
+ dccivac(p); \
+ asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); \
+ } while (0)
+
+#elif defined(RTE_ARCH_ARM)
+#define dcbz(p) memset((p), 0, 32)
+#define dcbz_64(p) memset((p), 0, 64)
+#define dcbf(p) RTE_SET_USED(p)
+#define dcbf_64(p) dcbf(p)
+#define dccivac(p) RTE_SET_USED(p)
+#define dcbit_ro(p) RTE_SET_USED(p)
+
+#else
+#define dcbz(p) RTE_SET_USED(p)
+#define dcbz_64(p) dcbz(p)
+#define dcbf(p) RTE_SET_USED(p)
+#define dcbf_64(p) dcbf(p)
+#define dccivac(p) RTE_SET_USED(p)
+#define dcbit_ro(p) RTE_SET_USED(p)
+#endif
+
+#define barrier() { asm volatile ("" : : : "memory"); }
+#define cpu_relax barrier
+
+#if defined(RTE_ARCH_ARM64)
+static inline uint64_t mfatb(void)
+{
+ uint64_t ret, ret_new, timeout = 200;
+
+ asm volatile ("mrs %0, cntvct_el0" : "=r" (ret));
+ asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new));
+ while (ret != ret_new && timeout--) {
+ ret = ret_new;
+ asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new));
+ }
+ DPAA_BUG_ON(!timeout && (ret != ret_new));
+ return ret * 64;
+}
+#else
+
+#define mfatb rte_rdtsc
+
+#endif
+
+/* Spin for a few cycles without bothering the bus */
+static inline void cpu_spin(int cycles)
+{
+ uint64_t now = mfatb();
+
+ while (mfatb() < (now + cycles))
+ ;
+}
+
+/* Qman/Bman API inlines and macros; */
+#ifdef lower_32_bits
+#undef lower_32_bits
+#endif
+#define lower_32_bits(x) ((u32)(x))
+
+#ifdef upper_32_bits
+#undef upper_32_bits
+#endif
+#define upper_32_bits(x) ((u32)(((x) >> 16) >> 16))
+
+/*
+ * Swap bytes of a 48-bit value.
+ */
+static inline uint64_t
+__bswap_48(uint64_t x)
+{
+ return ((x & 0x0000000000ffULL) << 40) |
+ ((x & 0x00000000ff00ULL) << 24) |
+ ((x & 0x000000ff0000ULL) << 8) |
+ ((x & 0x0000ff000000ULL) >> 8) |
+ ((x & 0x00ff00000000ULL) >> 24) |
+ ((x & 0xff0000000000ULL) >> 40);
+}
+
+/*
+ * Swap bytes of a 40-bit value.
+ */
+static inline uint64_t
+__bswap_40(uint64_t x)
+{
+ return ((x & 0x00000000ffULL) << 32) |
+ ((x & 0x000000ff00ULL) << 16) |
+ ((x & 0x0000ff0000ULL)) |
+ ((x & 0x00ff000000ULL) >> 16) |
+ ((x & 0xff00000000ULL) >> 32);
+}
+
+/*
+ * Swap bytes of a 24-bit value.
+ */
+static inline uint32_t
+__bswap_24(uint32_t x)
+{
+ return ((x & 0x0000ffULL) << 16) |
+ ((x & 0x00ff00ULL)) |
+ ((x & 0xff0000ULL) >> 16);
+}
+
+#define be64_to_cpu(x) rte_be_to_cpu_64(x)
+#define be32_to_cpu(x) rte_be_to_cpu_32(x)
+#define be16_to_cpu(x) rte_be_to_cpu_16(x)
+
+#define cpu_to_be64(x) rte_cpu_to_be_64(x)
+#define cpu_to_be32(x) rte_cpu_to_be_32(x)
+#define cpu_to_be16(x) rte_cpu_to_be_16(x)
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+#define cpu_to_be48(x) __bswap_48(x)
+#define be48_to_cpu(x) __bswap_48(x)
+
+#define cpu_to_be40(x) __bswap_40(x)
+#define be40_to_cpu(x) __bswap_40(x)
+
+#define cpu_to_be24(x) __bswap_24(x)
+#define be24_to_cpu(x) __bswap_24(x)
+
+#else /* RTE_BIG_ENDIAN */
+
+#define cpu_to_be48(x) (x)
+#define be48_to_cpu(x) (x)
+
+#define cpu_to_be40(x) (x)
+#define be40_to_cpu(x) (x)
+
+#define cpu_to_be24(x) (x)
+#define be24_to_cpu(x) (x)
+
+#endif /* RTE_BIG_ENDIAN */
+
+/* When copying aligned words or shorts, try to avoid memcpy() */
+/* memcpy() stuff - when you know alignments in advance */
+#define CONFIG_TRY_BETTER_MEMCPY
+
+#ifdef CONFIG_TRY_BETTER_MEMCPY
+static inline void copy_words(void *dest, const void *src, size_t sz)
+{
+ u32 *__dest = dest;
+ const u32 *__src = src;
+ size_t __sz = sz >> 2;
+
+ DPAA_BUG_ON((unsigned long)dest & 0x3);
+ DPAA_BUG_ON((unsigned long)src & 0x3);
+ DPAA_BUG_ON(sz & 0x3);
+ while (__sz--)
+ *(__dest++) = *(__src++);
+}
+
+static inline void copy_shorts(void *dest, const void *src, size_t sz)
+{
+ u16 *__dest = dest;
+ const u16 *__src = src;
+ size_t __sz = sz >> 1;
+
+ DPAA_BUG_ON((unsigned long)dest & 0x1);
+ DPAA_BUG_ON((unsigned long)src & 0x1);
+ DPAA_BUG_ON(sz & 0x1);
+ while (__sz--)
+ *(__dest++) = *(__src++);
+}
+
+static inline void copy_bytes(void *dest, const void *src, size_t sz)
+{
+ u8 *__dest = dest;
+ const u8 *__src = src;
+
+ while (sz--)
+ *(__dest++) = *(__src++);
+}
+#else
+#define copy_words memcpy
+#define copy_shorts memcpy
+#define copy_bytes memcpy
+#endif
+
+/* Allocator stuff */
+#define kmalloc(sz, t) malloc(sz)
+#define vmalloc(sz) malloc(sz)
+#define kfree(p) { if (p) free(p); }
+static inline void *kzalloc(size_t sz, gfp_t __foo __rte_unused)
+{
+ void *ptr = malloc(sz);
+
+ if (ptr)
+ memset(ptr, 0, sz);
+ return ptr;
+}
+
+static inline unsigned long get_zeroed_page(gfp_t __foo __rte_unused)
+{
+ void *p;
+
+ if (posix_memalign(&p, 4096, 4096))
+ return 0;
+ memset(p, 0, 4096);
+ return (unsigned long)p;
+}
+
+/* Spinlock stuff */
+#define spinlock_t rte_spinlock_t
+#define __SPIN_LOCK_UNLOCKED(x) RTE_SPINLOCK_INITIALIZER
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+#define spin_lock_init(x) rte_spinlock_init(x)
+#define spin_lock_destroy(x)
+#define spin_lock(x) rte_spinlock_lock(x)
+#define spin_unlock(x) rte_spinlock_unlock(x)
+#define spin_lock_irq(x) spin_lock(x)
+#define spin_unlock_irq(x) spin_unlock(x)
+#define spin_lock_irqsave(x, f) spin_lock_irq(x)
+#define spin_unlock_irqrestore(x, f) spin_unlock_irq(x)
+
+#define atomic_t rte_atomic32_t
+#define atomic_read(v) rte_atomic32_read(v)
+#define atomic_set(v, i) rte_atomic32_set(v, i)
+
+#define atomic_inc(v) rte_atomic32_add(v, 1)
+#define atomic_dec(v) rte_atomic32_sub(v, 1)
+
+#define atomic_inc_and_test(v) rte_atomic32_inc_and_test(v)
+#define atomic_dec_and_test(v) rte_atomic32_dec_and_test(v)
+
+#define atomic_inc_return(v) rte_atomic32_add_return(v, 1)
+#define atomic_dec_return(v) rte_atomic32_sub_return(v, 1)
+#define atomic_sub_and_test(i, v) (rte_atomic32_sub_return(v, i) == 0)
+
+#include <dpaa_list.h>
+#include <dpaa_bits.h>
+
+#endif /* __COMPAT_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_bits.h b/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_bits.h
new file mode 100644
index 00000000..9bc14d0c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_bits.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __DPAA_BITS_H
+#define __DPAA_BITS_H
+
+/* Bitfield stuff. */
+#define BITS_PER_ULONG (sizeof(unsigned long) << 3)
+#define SHIFT_PER_ULONG (((1 << 5) == BITS_PER_ULONG) ? 5 : 6)
+#define BITS_MASK(idx) (1UL << ((idx) & (BITS_PER_ULONG - 1)))
+#define BITS_IDX(idx) ((idx) >> SHIFT_PER_ULONG)
+
+static inline void dpaa_set_bits(unsigned long mask,
+ volatile unsigned long *p)
+{
+ *p |= mask;
+}
+
+static inline void dpaa_set_bit(int idx, volatile unsigned long *bits)
+{
+ dpaa_set_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
+}
+
+static inline void dpaa_clear_bits(unsigned long mask,
+ volatile unsigned long *p)
+{
+ *p &= ~mask;
+}
+
+static inline void dpaa_clear_bit(int idx,
+ volatile unsigned long *bits)
+{
+ dpaa_clear_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
+}
+
+#endif /* __DPAA_BITS_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_list.h b/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_list.h
new file mode 100644
index 00000000..e9457598
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_list.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __DPAA_LIST_H
+#define __DPAA_LIST_H
+
+/****************/
+/* Linked-lists */
+/****************/
+
+struct list_head {
+ struct list_head *prev;
+ struct list_head *next;
+};
+
+#define COMPAT_LIST_HEAD(n) \
+struct list_head n = { \
+ .prev = &n, \
+ .next = &n \
+}
+
+#define INIT_LIST_HEAD(p) \
+do { \
+ struct list_head *__p298 = (p); \
+ __p298->next = __p298; \
+ __p298->prev = __p298->next; \
+} while (0)
+#define list_entry(node, type, member) \
+ (type *)((void *)node - offsetof(type, member))
+#define list_empty(p) \
+({ \
+ const struct list_head *__p298 = (p); \
+ ((__p298->next == __p298) && (__p298->prev == __p298)); \
+})
+#define list_add(p, l) \
+do { \
+ struct list_head *__p298 = (p); \
+ struct list_head *__l298 = (l); \
+ __p298->next = __l298->next; \
+ __p298->prev = __l298; \
+ __l298->next->prev = __p298; \
+ __l298->next = __p298; \
+} while (0)
+#define list_add_tail(p, l) \
+do { \
+ struct list_head *__p298 = (p); \
+ struct list_head *__l298 = (l); \
+ __p298->prev = __l298->prev; \
+ __p298->next = __l298; \
+ __l298->prev->next = __p298; \
+ __l298->prev = __p298; \
+} while (0)
+#define list_for_each(i, l) \
+ for (i = (l)->next; i != (l); i = i->next)
+#define list_for_each_safe(i, j, l) \
+ for (i = (l)->next, j = i->next; i != (l); \
+ i = j, j = i->next)
+#define list_for_each_entry(i, l, name) \
+ for (i = list_entry((l)->next, typeof(*i), name); &i->name != (l); \
+ i = list_entry(i->name.next, typeof(*i), name))
+#define list_for_each_entry_safe(i, j, l, name) \
+ for (i = list_entry((l)->next, typeof(*i), name), \
+ j = list_entry(i->name.next, typeof(*j), name); \
+ &i->name != (l); \
+ i = j, j = list_entry(j->name.next, typeof(*j), name))
+#define list_del(i) \
+do { \
+ (i)->next->prev = (i)->prev; \
+ (i)->prev->next = (i)->next; \
+} while (0)
+
+#endif /* __DPAA_LIST_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_rbtree.h b/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_rbtree.h
new file mode 100644
index 00000000..6c237e70
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_rbtree.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __DPAA_RBTREE_H
+#define __DPAA_RBTREE_H
+
+#include <rte_common.h>
+/************/
+/* RB-trees */
+/************/
+
+/* Linux has a good RB-tree implementation, that we can't use (GPL). It also has
+ * a flat/hooked-in interface that virtually requires license-contamination in
+ * order to write a caller-compatible implementation. Instead, I've created an
+ * RB-tree encapsulation on top of linux's primitives (it does some of the work
+ * the client logic would normally do), and this gives us something we can
+ * reimplement on LWE. Unfortunately there's no good+free RB-tree
+ * implementations out there that are license-compatible and "flat" (ie. no
+ * dynamic allocation). I did find a malloc-based one that I could convert, but
+ * that will be a task for later on. For now, LWE's RB-tree is implemented using
+ * an ordered linked-list.
+ *
+ * Note, the only linux-esque type is "struct rb_node", because it's used
+ * statically in the exported header, so it can't be opaque. Our version doesn't
+ * include a "rb_parent_color" field because we're doing linked-list instead of
+ * a true rb-tree.
+ */
+
+struct rb_node {
+ struct rb_node *prev, *next;
+};
+
+struct dpa_rbtree {
+ struct rb_node *head, *tail;
+};
+
+#define DPAA_RBTREE { NULL, NULL }
+static inline void dpa_rbtree_init(struct dpa_rbtree *tree)
+{
+ tree->head = tree->tail = NULL;
+}
+
+#define QMAN_NODE2OBJ(ptr, type, node_field) \
+ (type *)((char *)ptr - offsetof(type, node_field))
+
+#define IMPLEMENT_DPAA_RBTREE(name, type, node_field, val_field) \
+static inline int name##_push(struct dpa_rbtree *tree, type *obj) \
+{ \
+ struct rb_node *node = tree->head; \
+ if (!node) { \
+ tree->head = tree->tail = &obj->node_field; \
+ obj->node_field.prev = obj->node_field.next = NULL; \
+ return 0; \
+ } \
+ while (node) { \
+ type *item = QMAN_NODE2OBJ(node, type, node_field); \
+ if (obj->val_field == item->val_field) \
+ return -EBUSY; \
+ if (obj->val_field < item->val_field) { \
+ if (tree->head == node) \
+ tree->head = &obj->node_field; \
+ else \
+ node->prev->next = &obj->node_field; \
+ obj->node_field.prev = node->prev; \
+ obj->node_field.next = node; \
+ node->prev = &obj->node_field; \
+ return 0; \
+ } \
+ node = node->next; \
+ } \
+ obj->node_field.prev = tree->tail; \
+ obj->node_field.next = NULL; \
+ tree->tail->next = &obj->node_field; \
+ tree->tail = &obj->node_field; \
+ return 0; \
+} \
+static inline void name##_del(struct dpa_rbtree *tree, type *obj) \
+{ \
+ if (tree->head == &obj->node_field) { \
+ if (tree->tail == &obj->node_field) \
+ /* Only item in the list */ \
+ tree->head = tree->tail = NULL; \
+ else { \
+ /* Is the head, next != NULL */ \
+ tree->head = tree->head->next; \
+ tree->head->prev = NULL; \
+ } \
+ } else { \
+ if (tree->tail == &obj->node_field) { \
+ /* Is the tail, prev != NULL */ \
+ tree->tail = tree->tail->prev; \
+ tree->tail->next = NULL; \
+ } else { \
+ /* Is neither the head nor the tail */ \
+ obj->node_field.prev->next = obj->node_field.next; \
+ obj->node_field.next->prev = obj->node_field.prev; \
+ } \
+ } \
+} \
+static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \
+{ \
+ struct rb_node *node = tree->head; \
+ while (node) { \
+ type *item = QMAN_NODE2OBJ(node, type, node_field); \
+ if (val == item->val_field) \
+ return item; \
+ if (val < item->val_field) \
+ return NULL; \
+ node = node->next; \
+ } \
+ return NULL; \
+}
+
+#endif /* __DPAA_RBTREE_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fman.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fman.h
new file mode 100644
index 00000000..15bf73a4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fman.h
@@ -0,0 +1,425 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2012 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ */
+
+#ifndef __FMAN_H
+#define __FMAN_H
+
+#include <stdbool.h>
+#include <net/if.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_ether.h>
+
+#include <compat.h>
+
+#ifndef FMAN_DEVICE_PATH
+#define FMAN_DEVICE_PATH "/dev/mem"
+#endif
+
+#define MEMAC_NUM_OF_PADDRS 7 /* Num of additional exact match MAC adr regs */
+
+/* Control and Configuration Register (COMMAND_CONFIG) for MEMAC */
+#define CMD_CFG_LOOPBACK_EN 0x00000400
+/**< 21 XGMII/GMII loopback enable */
+#define CMD_CFG_PROMIS_EN 0x00000010
+/**< 27 Promiscuous operation enable */
+#define CMD_CFG_PAUSE_IGNORE 0x00000100
+/**< 23 Ignore Pause frame quanta */
+
+/* Statistics Configuration Register (STATN_CONFIG) */
+#define STATS_CFG_CLR 0x00000004
+/**< 29 Reset all counters */
+#define STATS_CFG_CLR_ON_RD 0x00000002
+/**< 30 Clear on read */
+#define STATS_CFG_SATURATE 0x00000001
+/**< 31 Saturate at the maximum val */
+
+/**< Max receive frame length mask */
+#define MAXFRM_SIZE_MEMAC 0x00007fe0
+#define MAXFRM_RX_MASK 0x0000ffff
+
+/**< Interface Mode Register Register for MEMAC */
+#define IF_MODE_RLP 0x00000820
+
+/**< Pool Limits */
+#define FMAN_PORT_MAX_EXT_POOLS_NUM 8
+#define FMAN_PORT_OBS_EXT_POOLS_NUM 2
+
+#define FMAN_PORT_CG_MAP_NUM 8
+#define FMAN_PORT_PRS_RESULT_WORDS_NUM 8
+#define FMAN_PORT_BMI_FIFO_UNITS 0x100
+#define FMAN_PORT_IC_OFFSET_UNITS 0x10
+
+#define FMAN_ENABLE_BPOOL_DEPLETION 0xF00000F0
+
+#define HASH_CTRL_MCAST_EN 0x00000100
+#define GROUP_ADDRESS 0x0000010000000000LL
+#define HASH_CTRL_ADDR_MASK 0x0000003F
+
+/* Pre definitions of FMAN interface and Bpool structures */
+struct __fman_if;
+struct fman_if_bpool;
+/* Lists of fman interfaces and bpools */
+TAILQ_HEAD(rte_fman_if_list, __fman_if);
+
+/* Represents the different flavour of network interface */
+enum fman_mac_type {
+ fman_offline = 0,
+ fman_mac_1g,
+ fman_mac_10g,
+};
+
+struct mac_addr {
+ uint32_t mac_addr_l; /**< Lower 32 bits of 48-bit MAC address */
+ uint32_t mac_addr_u; /**< Upper 16 bits of 48-bit MAC address */
+};
+
+struct memac_regs {
+ /* General Control and Status */
+ uint32_t res0000[2];
+ uint32_t command_config; /**< 0x008 Ctrl and cfg */
+ struct mac_addr mac_addr0; /**< 0x00C-0x010 MAC_ADDR_0...1 */
+ uint32_t maxfrm; /**< 0x014 Max frame length */
+ uint32_t res0018[5];
+ uint32_t hashtable_ctrl; /**< 0x02C Hash table control */
+ uint32_t res0030[4];
+ uint32_t ievent; /**< 0x040 Interrupt event */
+ uint32_t tx_ipg_length;
+ /**< 0x044 Transmitter inter-packet-gap */
+ uint32_t res0048;
+ uint32_t imask; /**< 0x04C Interrupt mask */
+ uint32_t res0050;
+ uint32_t pause_quanta[4]; /**< 0x054 Pause quanta */
+ uint32_t pause_thresh[4]; /**< 0x064 Pause quanta threshold */
+ uint32_t rx_pause_status; /**< 0x074 Receive pause status */
+ uint32_t res0078[2];
+ struct mac_addr mac_addr[MEMAC_NUM_OF_PADDRS];
+ /**< 0x80-0x0B4 mac padr */
+ uint32_t lpwake_timer;
+ /**< 0x0B8 Low Power Wakeup Timer */
+ uint32_t sleep_timer;
+ /**< 0x0BC Transmit EEE Low Power Timer */
+ uint32_t res00c0[8];
+ uint32_t statn_config;
+ /**< 0x0E0 Statistics configuration */
+ uint32_t res00e4[7];
+ /* Rx Statistics Counter */
+ uint32_t reoct_l; /**<Rx Eth Octets Counter */
+ uint32_t reoct_u;
+ uint32_t roct_l; /**<Rx Octet Counters */
+ uint32_t roct_u;
+ uint32_t raln_l; /**<Rx Alignment Error Counter */
+ uint32_t raln_u;
+ uint32_t rxpf_l; /**<Rx valid Pause Frame */
+ uint32_t rxpf_u;
+ uint32_t rfrm_l; /**<Rx Frame counter */
+ uint32_t rfrm_u;
+ uint32_t rfcs_l; /**<Rx frame check seq error */
+ uint32_t rfcs_u;
+ uint32_t rvlan_l; /**<Rx Vlan Frame Counter */
+ uint32_t rvlan_u;
+ uint32_t rerr_l; /**<Rx Frame error */
+ uint32_t rerr_u;
+ uint32_t ruca_l; /**<Rx Unicast */
+ uint32_t ruca_u;
+ uint32_t rmca_l; /**<Rx Multicast */
+ uint32_t rmca_u;
+ uint32_t rbca_l; /**<Rx Broadcast */
+ uint32_t rbca_u;
+ uint32_t rdrp_l; /**<Rx Dropper Packet */
+ uint32_t rdrp_u;
+ uint32_t rpkt_l; /**<Rx packet */
+ uint32_t rpkt_u;
+ uint32_t rund_l; /**<Rx undersized packets */
+ uint32_t rund_u;
+ uint32_t r64_l; /**<Rx 64 byte */
+ uint32_t r64_u;
+ uint32_t r127_l;
+ uint32_t r127_u;
+ uint32_t r255_l;
+ uint32_t r255_u;
+ uint32_t r511_l;
+ uint32_t r511_u;
+ uint32_t r1023_l;
+ uint32_t r1023_u;
+ uint32_t r1518_l;
+ uint32_t r1518_u;
+ uint32_t r1519x_l;
+ uint32_t r1519x_u;
+ uint32_t rovr_l; /**<Rx oversized but good */
+ uint32_t rovr_u;
+ uint32_t rjbr_l; /**<Rx oversized with bad csum */
+ uint32_t rjbr_u;
+ uint32_t rfrg_l; /**<Rx fragment Packet */
+ uint32_t rfrg_u;
+ uint32_t rcnp_l; /**<Rx control packets (0x8808 */
+ uint32_t rcnp_u;
+ uint32_t rdrntp_l; /**<Rx dropped due to FIFO overflow */
+ uint32_t rdrntp_u;
+ uint32_t res01d0[12];
+ /* Tx Statistics Counter */
+ uint32_t teoct_l; /**<Tx eth octets */
+ uint32_t teoct_u;
+ uint32_t toct_l; /**<Tx Octets */
+ uint32_t toct_u;
+ uint32_t res0210[2];
+ uint32_t txpf_l; /**<Tx valid pause frame */
+ uint32_t txpf_u;
+ uint32_t tfrm_l; /**<Tx frame counter */
+ uint32_t tfrm_u;
+ uint32_t tfcs_l; /**<Tx FCS error */
+ uint32_t tfcs_u;
+ uint32_t tvlan_l; /**<Tx Vlan Frame */
+ uint32_t tvlan_u;
+ uint32_t terr_l; /**<Tx frame error */
+ uint32_t terr_u;
+ uint32_t tuca_l; /**<Tx Unicast */
+ uint32_t tuca_u;
+ uint32_t tmca_l; /**<Tx Multicast */
+ uint32_t tmca_u;
+ uint32_t tbca_l; /**<Tx Broadcast */
+ uint32_t tbca_u;
+ uint32_t res0258[2];
+ uint32_t tpkt_l; /**<Tx Packet */
+ uint32_t tpkt_u;
+ uint32_t tund_l; /**<Tx Undersized */
+ uint32_t tund_u;
+ uint32_t t64_l;
+ uint32_t t64_u;
+ uint32_t t127_l;
+ uint32_t t127_u;
+ uint32_t t255_l;
+ uint32_t t255_u;
+ uint32_t t511_l;
+ uint32_t t511_u;
+ uint32_t t1023_l;
+ uint32_t t1023_u;
+ uint32_t t1518_l;
+ uint32_t t1518_u;
+ uint32_t t1519x_l;
+ uint32_t t1519x_u;
+ uint32_t res02a8[6];
+ uint32_t tcnp_l; /**<Tx Control Packet type - 0x8808 */
+ uint32_t tcnp_u;
+ uint32_t res02c8[14];
+ /* Line Interface Control */
+ uint32_t if_mode; /**< 0x300 Interface Mode Control */
+ uint32_t if_status; /**< 0x304 Interface Status */
+ uint32_t res0308[14];
+ /* HiGig/2 */
+ uint32_t hg_config; /**< 0x340 Control and cfg */
+ uint32_t res0344[3];
+ uint32_t hg_pause_quanta; /**< 0x350 Pause quanta */
+ uint32_t res0354[3];
+ uint32_t hg_pause_thresh; /**< 0x360 Pause quanta threshold */
+ uint32_t res0364[3];
+ uint32_t hgrx_pause_status; /**< 0x370 Receive pause status */
+ uint32_t hg_fifos_status; /**< 0x374 fifos status */
+ uint32_t rhm; /**< 0x378 rx messages counter */
+ uint32_t thm; /**< 0x37C tx messages counter */
+};
+
+struct rx_bmi_regs {
+ uint32_t fmbm_rcfg; /**< Rx Configuration */
+ uint32_t fmbm_rst; /**< Rx Status */
+ uint32_t fmbm_rda; /**< Rx DMA attributes*/
+ uint32_t fmbm_rfp; /**< Rx FIFO Parameters*/
+ uint32_t fmbm_rfed; /**< Rx Frame End Data*/
+ uint32_t fmbm_ricp; /**< Rx Internal Context Parameters*/
+ uint32_t fmbm_rim; /**< Rx Internal Buffer Margins*/
+ uint32_t fmbm_rebm; /**< Rx External Buffer Margins*/
+ uint32_t fmbm_rfne; /**< Rx Frame Next Engine*/
+ uint32_t fmbm_rfca; /**< Rx Frame Command Attributes.*/
+ uint32_t fmbm_rfpne; /**< Rx Frame Parser Next Engine*/
+ uint32_t fmbm_rpso; /**< Rx Parse Start Offset*/
+ uint32_t fmbm_rpp; /**< Rx Policer Profile */
+ uint32_t fmbm_rccb; /**< Rx Coarse Classification Base */
+ uint32_t fmbm_reth; /**< Rx Excessive Threshold */
+ uint32_t reserved003c[1]; /**< (0x03C 0x03F) */
+ uint32_t fmbm_rprai[FMAN_PORT_PRS_RESULT_WORDS_NUM];
+ /**< Rx Parse Results Array Init*/
+ uint32_t fmbm_rfqid; /**< Rx Frame Queue ID*/
+ uint32_t fmbm_refqid; /**< Rx Error Frame Queue ID*/
+ uint32_t fmbm_rfsdm; /**< Rx Frame Status Discard Mask*/
+ uint32_t fmbm_rfsem; /**< Rx Frame Status Error Mask*/
+ uint32_t fmbm_rfene; /**< Rx Frame Enqueue Next Engine */
+ uint32_t reserved0074[0x2]; /**< (0x074-0x07C) */
+ uint32_t fmbm_rcmne;
+ /**< Rx Frame Continuous Mode Next Engine */
+ uint32_t reserved0080[0x20];/**< (0x080 0x0FF) */
+ uint32_t fmbm_ebmpi[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ /**< Buffer Manager pool Information-*/
+ uint32_t fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ /**< Allocate Counter-*/
+ uint32_t reserved0130[8];
+ /**< 0x130/0x140 - 0x15F reserved -*/
+ uint32_t fmbm_rcgm[FMAN_PORT_CG_MAP_NUM];
+ /**< Congestion Group Map*/
+ uint32_t fmbm_mpd; /**< BM Pool Depletion */
+ uint32_t reserved0184[0x1F]; /**< (0x184 0x1FF) */
+ uint32_t fmbm_rstc; /**< Rx Statistics Counters*/
+ uint32_t fmbm_rfrc; /**< Rx Frame Counter*/
+ uint32_t fmbm_rfbc; /**< Rx Bad Frames Counter*/
+ uint32_t fmbm_rlfc; /**< Rx Large Frames Counter*/
+ uint32_t fmbm_rffc; /**< Rx Filter Frames Counter*/
+ uint32_t fmbm_rfdc; /**< Rx Frame Discard Counter*/
+ uint32_t fmbm_rfldec; /**< Rx Frames List DMA Error Counter*/
+ uint32_t fmbm_rodc; /**< Rx Out of Buffers Discard nntr*/
+ uint32_t fmbm_rbdc; /**< Rx Buffers Deallocate Counter*/
+ uint32_t reserved0224[0x17]; /**< (0x224 0x27F) */
+ uint32_t fmbm_rpc; /**< Rx Performance Counters*/
+ uint32_t fmbm_rpcp; /**< Rx Performance Count Parameters*/
+ uint32_t fmbm_rccn; /**< Rx Cycle Counter*/
+ uint32_t fmbm_rtuc; /**< Rx Tasks Utilization Counter*/
+ uint32_t fmbm_rrquc;
+ /**< Rx Receive Queue Utilization cntr*/
+ uint32_t fmbm_rduc; /**< Rx DMA Utilization Counter*/
+ uint32_t fmbm_rfuc; /**< Rx FIFO Utilization Counter*/
+ uint32_t fmbm_rpac; /**< Rx Pause Activation Counter*/
+ uint32_t reserved02a0[0x18]; /**< (0x2A0 0x2FF) */
+ uint32_t fmbm_rdbg; /**< Rx Debug-*/
+};
+
+struct fman_port_qmi_regs {
+ uint32_t fmqm_pnc; /**< PortID n Configuration Register */
+ uint32_t fmqm_pns; /**< PortID n Status Register */
+ uint32_t fmqm_pnts; /**< PortID n Task Status Register */
+ uint32_t reserved00c[4]; /**< 0xn00C - 0xn01B */
+ uint32_t fmqm_pnen; /**< PortID n Enqueue NIA Register */
+ uint32_t fmqm_pnetfc; /**< PortID n Enq Total Frame Counter */
+ uint32_t reserved024[2]; /**< 0xn024 - 0x02B */
+ uint32_t fmqm_pndn; /**< PortID n Dequeue NIA Register */
+ uint32_t fmqm_pndc; /**< PortID n Dequeue Config Register */
+ uint32_t fmqm_pndtfc; /**< PortID n Dequeue tot Frame cntr */
+ uint32_t fmqm_pndfdc; /**< PortID n Dequeue FQID Dflt Cntr */
+ uint32_t fmqm_pndcc; /**< PortID n Dequeue Confirm Counter */
+};
+
+/* This struct exports parameters about an Fman network interface, determined
+ * from the device-tree.
+ */
+struct fman_if {
+ /* Which Fman this interface belongs to */
+ uint8_t fman_idx;
+ /* The type/speed of the interface */
+ enum fman_mac_type mac_type;
+ /* Boolean, set when mac type is memac */
+ uint8_t is_memac;
+ /* Boolean, set when PHY is RGMII */
+ uint8_t is_rgmii;
+ /* The index of this MAC (within the Fman it belongs to) */
+ uint8_t mac_idx;
+ /* The MAC address */
+ struct ether_addr mac_addr;
+ /* The Qman channel to schedule Tx FQs to */
+ u16 tx_channel_id;
+ /* The hard-coded FQIDs for this interface. Note: this doesn't cover
+ * the PCD nor the "Rx default" FQIDs, which are configured via FMC
+ * and its XML-based configuration.
+ */
+ uint32_t fqid_rx_def;
+ uint32_t fqid_rx_err;
+ uint32_t fqid_tx_err;
+ uint32_t fqid_tx_confirm;
+
+ struct list_head bpool_list;
+ /* The node for linking this interface into "fman_if_list" */
+ struct list_head node;
+};
+
+/* This struct exposes parameters for buffer pools, extracted from the network
+ * interface settings in the device tree.
+ */
+struct fman_if_bpool {
+ uint32_t bpid;
+ uint64_t count;
+ uint64_t size;
+ uint64_t addr;
+ /* The node for linking this bpool into fman_if::bpool_list */
+ struct list_head node;
+};
+
+/* Internal Context transfer params - FMBM_RICP*/
+struct fman_if_ic_params {
+ /*IC offset in the packet buffer */
+ uint16_t iceof;
+ /*IC internal offset */
+ uint16_t iciof;
+ /*IC size to copy */
+ uint16_t icsz;
+};
+
+/* The exported "struct fman_if" type contains the subset of fields we want
+ * exposed. This struct is embedded in a larger "struct __fman_if" which
+ * contains the extra bits we *don't* want exposed.
+ */
+struct __fman_if {
+ struct fman_if __if;
+ char node_path[PATH_MAX];
+ uint64_t regs_size;
+ void *ccsr_map;
+ void *bmi_map;
+ void *qmi_map;
+ struct list_head node;
+};
+
+/* And this is the base list node that the interfaces are added to. (See
+ * fman_if_enable_all_rx() below for an example of its use.)
+ */
+extern const struct list_head *fman_if_list;
+
+extern int fman_ccsr_map_fd;
+
+/* To iterate the "bpool_list" for an interface. Eg;
+ * struct fman_if *p = get_ptr_to_some_interface();
+ * struct fman_if_bpool *bp;
+ * printf("Interface uses following BPIDs;\n");
+ * fman_if_for_each_bpool(bp, p) {
+ * printf(" %d\n", bp->bpid);
+ * [...]
+ * }
+ */
+#define fman_if_for_each_bpool(bp, __if) \
+ list_for_each_entry(bp, &(__if)->bpool_list, node)
+
+#define FMAN_ERR(rc, fmt, args...) \
+ do { \
+ _errno = (rc); \
+ DPAA_BUS_LOG(ERR, fmt "(%d)", ##args, errno); \
+ } while (0)
+
+#define FMAN_IP_REV_1 0xC30C4
+#define FMAN_IP_REV_1_MAJOR_MASK 0x0000FF00
+#define FMAN_IP_REV_1_MAJOR_SHIFT 8
+#define FMAN_V3 0x06
+#define FMAN_V3_CONTEXTA_EN_A2V 0x10000000
+#define FMAN_V3_CONTEXTA_EN_OVOM 0x02000000
+#define FMAN_V3_CONTEXTA_EN_EBD 0x80000000
+#define FMAN_CONTEXTA_DIS_CHECKSUM 0x7ull
+#define FMAN_CONTEXTA_SET_OPCODE11 0x2000000b00000000
+extern u16 fman_ip_rev;
+extern u32 fman_dealloc_bufs_mask_hi;
+extern u32 fman_dealloc_bufs_mask_lo;
+
+/**
+ * Initialize the FMAN driver
+ *
+ * @args void
+ * @return
+ * 0 for success; error OTHERWISE
+ */
+int fman_init(void);
+
+/**
+ * Teardown the FMAN driver
+ *
+ * @args void
+ * @return void
+ */
+void fman_finish(void);
+
+#endif /* __FMAN_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_bman.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_bman.h
new file mode 100644
index 00000000..0c74aba4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_bman.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __FSL_BMAN_H
+#define __FSL_BMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* This wrapper represents a bit-array for the depletion state of the 64 Bman
+ * buffer pools.
+ */
+struct bman_depletion {
+ u32 state[2];
+};
+
+static inline void bman_depletion_init(struct bman_depletion *c)
+{
+ c->state[0] = c->state[1] = 0;
+}
+
+static inline void bman_depletion_fill(struct bman_depletion *c)
+{
+ c->state[0] = c->state[1] = ~0;
+}
+
+/* --- Bman data structures (and associated constants) --- */
+
+/* Represents s/w corenet portal mapped data structures */
+struct bm_rcr_entry; /* RCR (Release Command Ring) entries */
+struct bm_mc_command; /* MC (Management Command) command */
+struct bm_mc_result; /* MC result */
+
+/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer
+ * pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI,
+ * BM_MCC_VERB_ACQUIRE), the 'bpid' field is used.
+ */
+struct bm_buffer {
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved1;
+ u8 bpid;
+ u16 hi; /* High 16-bits of 48-bit address */
+ u32 lo; /* Low 32-bits of 48-bit address */
+#else
+ u32 lo;
+ u16 hi;
+ u8 bpid;
+ u8 __reserved;
+#endif
+ };
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u64 __notaddress:16;
+ u64 addr:48;
+#else
+ u64 addr:48;
+ u64 __notaddress:16;
+#endif
+ };
+ u64 opaque;
+ };
+} __attribute__((aligned(8)));
+static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
+{
+ return buf->addr;
+}
+
+static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
+{
+ return (dma_addr_t)buf->addr;
+}
+
+#define bm_buffer_set64(buf, v) \
+ do { \
+ struct bm_buffer *__buf931 = (buf); \
+ __buf931->hi = upper_32_bits(v); \
+ __buf931->lo = lower_32_bits(v); \
+ } while (0)
+
+/* See 1.5.3.5.4: "Release Command" */
+struct bm_rcr_entry {
+ union {
+ struct {
+ u8 __dont_write_directly__verb;
+ u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+ u8 __reserved1[62];
+ };
+ struct bm_buffer bufs[8];
+ };
+} __packed;
+#define BM_RCR_VERB_VBIT 0x80
+#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
+
+/* See 1.5.3.1: "Acquire Command" */
+/* See 1.5.3.2: "Query Command" */
+struct bm_mcc_acquire {
+ u8 bpid;
+ u8 __reserved1[62];
+} __packed;
+struct bm_mcc_query {
+ u8 __reserved2[63];
+} __packed;
+struct bm_mc_command {
+ u8 __dont_write_directly__verb;
+ union {
+ struct bm_mcc_acquire acquire;
+ struct bm_mcc_query query;
+ };
+} __packed;
+#define BM_MCC_VERB_VBIT 0x80
+#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE 0x10
+#define BM_MCC_VERB_CMD_QUERY 0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
+
+/* See 1.5.3.3: "Acquire Response" */
+/* See 1.5.3.4: "Query Response" */
+struct bm_pool_state {
+ u8 __reserved1[32];
+ /* "availability state" and "depletion state" */
+ struct {
+ u8 __reserved1[8];
+ /* Access using bman_depletion_***() */
+ struct bman_depletion state;
+ } as, ds;
+};
+
+struct bm_mc_result {
+ union {
+ struct {
+ u8 verb;
+ u8 __reserved1[63];
+ };
+ union {
+ struct {
+ u8 __reserved1;
+ u8 bpid;
+ u8 __reserved2[62];
+ };
+ struct bm_buffer bufs[8];
+ } acquire;
+ struct bm_pool_state query;
+ };
+} __packed;
+#define BM_MCR_VERB_VBIT 0x80
+#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
+#define BM_MCR_VERB_CMD_ERR_ECC 0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+
+/* Portal and Buffer Pools */
+/* Represents a managed portal */
+struct bman_portal;
+
+/* This object type represents Bman buffer pools. */
+struct bman_pool;
+
+/* This struct specifies parameters for a bman_pool object. */
+struct bman_pool_params {
+ /* index of the buffer pool to encapsulate (0-63), ignored if
+ * BMAN_POOL_FLAG_DYNAMIC_BPID is set.
+ */
+ u32 bpid;
+ /* bit-mask of BMAN_POOL_FLAG_*** options */
+ u32 flags;
+ /* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB:
+ * this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and*
+ * when run in the control plane (which controls Bman CCSR). This array
+ * matches the definition of bm_pool_set().
+ */
+ u32 thresholds[4];
+};
+
+/* Flags to bman_new_pool() */
+#define BMAN_POOL_FLAG_NO_RELEASE 0x00000001 /* can't release to pool */
+#define BMAN_POOL_FLAG_ONLY_RELEASE 0x00000002 /* can only release to pool */
+#define BMAN_POOL_FLAG_DYNAMIC_BPID 0x00000008 /* (de)allocate bpid */
+#define BMAN_POOL_FLAG_THRESH 0x00000010 /* set depletion thresholds */
+
+/* Flags to bman_release() */
+#define BMAN_RELEASE_FLAG_NOW 0x00000008 /* issue immediate release */
+
+
+/**
+ * bman_get_portal_index - get portal configuration index
+ */
+int bman_get_portal_index(void);
+
+/**
+ * bman_rcr_is_empty - Determine if portal's RCR is empty
+ *
+ * For use in situations where a cpu-affine caller needs to determine when all
+ * releases for the local portal have been processed by Bman but can't use the
+ * BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release().
+ * The function forces tracking of RCR consumption (which normally doesn't
+ * happen until release processing needs to find space to put new release
+ * commands), and returns zero if the ring still has unprocessed entries,
+ * non-zero if it is empty.
+ */
+int bman_rcr_is_empty(void);
+
+/**
+ * bman_alloc_bpid_range - Allocate a contiguous range of BPIDs
+ * @result: is set by the API to the base BPID of the allocated range
+ * @count: the number of BPIDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count BPIDs
+ *
+ * Returns the number of buffer pools allocated, or a negative error code. If
+ * @partial is non zero, the allocation request may return a smaller range of
+ * BPs than requested (though alignment will be as requested). If @partial is
+ * zero, the return value will either be 'count' or negative.
+ */
+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int bman_alloc_bpid(u32 *result)
+{
+ int ret = bman_alloc_bpid_range(result, 1, 0, 0);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * bman_release_bpid_range - Release the specified range of buffer pool IDs
+ * @bpid: the base BPID of the range to deallocate
+ * @count: the number of BPIDs in the range
+ *
+ * This function can also be used to seed the allocator with ranges of BPIDs
+ * that it can subsequently allocate from.
+ */
+void bman_release_bpid_range(u32 bpid, unsigned int count);
+static inline void bman_release_bpid(u32 bpid)
+{
+ bman_release_bpid_range(bpid, 1);
+}
+
+int bman_reserve_bpid_range(u32 bpid, unsigned int count);
+static inline int bman_reserve_bpid(u32 bpid)
+{
+ return bman_reserve_bpid_range(bpid, 1);
+}
+
+void bman_seed_bpid_range(u32 bpid, unsigned int count);
+
+int bman_shutdown_pool(u32 bpid);
+
+/**
+ * bman_new_pool - Allocates a Buffer Pool object
+ * @params: parameters specifying the buffer pool ID and behaviour
+ *
+ * Creates a pool object for the given @params. A portal and the depletion
+ * callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag
+ * is set. NB, the fields from @params are copied into the new pool object, so
+ * the structure provided by the caller can be released or reused after the
+ * function returns.
+ */
+struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
+
+/**
+ * bman_free_pool - Deallocates a Buffer Pool object
+ * @pool: the pool object to release
+ */
+void bman_free_pool(struct bman_pool *pool);
+
+/**
+ * bman_get_params - Returns a pool object's parameters.
+ * @pool: the pool object
+ *
+ * The returned pointer refers to state within the pool object so must not be
+ * modified and can no longer be read once the pool object is destroyed.
+ */
+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
+
+/**
+ * bman_release - Release buffer(s) to the buffer pool
+ * @pool: the buffer pool object to release to
+ * @bufs: an array of buffers to release
+ * @num: the number of buffers in @bufs (1-8)
+ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
+ *
+ */
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
+ u32 flags);
+
+/**
+ * bman_acquire - Acquire buffer(s) from a buffer pool
+ * @pool: the buffer pool object to acquire from
+ * @bufs: array for storing the acquired buffers
+ * @num: the number of buffers desired (@bufs is at least this big)
+ *
+ * Issues an "Acquire" command via the portal's management command interface.
+ * The return value will be the number of buffers obtained from the pool, or a
+ * negative error code if a h/w error or pool starvation was encountered.
+ */
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
+ u32 flags);
+
+/**
+ * bman_query_pools - Query all buffer pool states
+ * @state: storage for the queried availability and depletion states
+ */
+int bman_query_pools(struct bm_pool_state *state);
+
+/**
+ * bman_query_free_buffers - Query how many free buffers are in buffer pool
+ * @pool: the buffer pool object to query
+ *
+ * Return the number of the free buffers
+ */
+u32 bman_query_free_buffers(struct bman_pool *pool);
+
+/**
+ * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
+ * @pool: the buffer pool object to which the thresholds will be set
+ * @thresholds: the new thresholds
+ */
+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
+
+/**
+ * bm_pool_set_hw_threshold - Change the buffer pool's thresholds
+ * @pool: Pool id
+ * @low_thresh: low threshold
+ * @high_thresh: high threshold
+ */
+int bm_pool_set_hw_threshold(u32 bpid, const u32 low_thresh,
+ const u32 high_thresh);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FSL_BMAN_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman.h
new file mode 100644
index 00000000..1d1ce867
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __FSL_FMAN_H
+#define __FSL_FMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Status field in FD is updated on Rx side by FMAN with following information.
+ * Refer to field description in FM BG.
+ */
+struct fm_status_t {
+ unsigned int reserved0:3;
+ unsigned int dcl4c:1; /* Don't Check L4 Checksum */
+ unsigned int reserved1:1;
+ unsigned int ufd:1; /* Unsupported Format */
+ unsigned int lge:1; /* Length Error */
+ unsigned int dme:1; /* DMA Error */
+
+ unsigned int reserved2:4;
+ unsigned int fpe:1; /* Frame physical Error */
+ unsigned int fse:1; /* Frame Size Error */
+ unsigned int dis:1; /* Discard by Classification */
+ unsigned int reserved3:1;
+
+ unsigned int eof:1; /* Key Extraction goes out of frame */
+ unsigned int nss:1; /* No Scheme selected */
+ unsigned int kso:1; /* Key Size Overflow */
+ unsigned int reserved4:1;
+ unsigned int fcl:2; /* Frame Color */
+ unsigned int ipp:1; /* Illegal Policer Profile Selected */
+ unsigned int flm:1; /* Frame Length Mismatch */
+ unsigned int pte:1; /* Parser Timeout */
+ unsigned int isp:1; /* Invalid Soft Parser Instruction */
+ unsigned int phe:1; /* Header Error during parsing */
+ unsigned int frdr:1; /* Frame Dropped by disabled port */
+ unsigned int reserved5:4;
+} __attribute__ ((__packed__));
+
+/* Set MAC address for a particular interface */
+int fman_if_add_mac_addr(struct fman_if *p, uint8_t *eth, uint8_t addr_num);
+
+/* Remove a MAC address for a particular interface */
+void fman_if_clear_mac_addr(struct fman_if *p, uint8_t addr_num);
+
+/* Get the FMAN statistics */
+void fman_if_stats_get(struct fman_if *p, struct rte_eth_stats *stats);
+
+/* Reset the FMAN statistics */
+void fman_if_stats_reset(struct fman_if *p);
+
+/* Get all of the FMAN statistics */
+void fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);
+
+/* Set ignore pause option for a specific interface */
+void fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable);
+
+/* Set max frame length */
+void fman_if_conf_max_frame_len(struct fman_if *p, unsigned int max_frame_len);
+
+/* Enable/disable Rx promiscuous mode on specified interface */
+void fman_if_promiscuous_enable(struct fman_if *p);
+void fman_if_promiscuous_disable(struct fman_if *p);
+
+/* Enable/disable Rx on specific interfaces */
+void fman_if_enable_rx(struct fman_if *p);
+void fman_if_disable_rx(struct fman_if *p);
+
+/* Enable/disable loopback on specific interfaces */
+void fman_if_loopback_enable(struct fman_if *p);
+void fman_if_loopback_disable(struct fman_if *p);
+
+/* Set buffer pool on specific interface */
+void fman_if_set_bp(struct fman_if *fm_if, unsigned int num, int bpid,
+ size_t bufsize);
+
+/* Get Flow Control threshold parameters on specific interface */
+int fman_if_get_fc_threshold(struct fman_if *fm_if);
+
+/* Enable and Set Flow Control threshold parameters on specific interface */
+int fman_if_set_fc_threshold(struct fman_if *fm_if,
+ u32 high_water, u32 low_water, u32 bpid);
+
+/* Get Flow Control pause quanta on specific interface */
+int fman_if_get_fc_quanta(struct fman_if *fm_if);
+
+/* Set Flow Control pause quanta on specific interface */
+int fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta);
+
+/* Set default error fqid on specific interface */
+void fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid);
+
+/* Get IC transfer params */
+int fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params *icp);
+
+/* Set IC transfer params */
+int fman_if_set_ic_params(struct fman_if *fm_if,
+ const struct fman_if_ic_params *icp);
+
+/* Get interface fd->offset value */
+int fman_if_get_fdoff(struct fman_if *fm_if);
+
+/* Set interface fd->offset value */
+void fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset);
+
+/* Get interface SG enable status value */
+int fman_if_get_sg_enable(struct fman_if *fm_if);
+
+/* Set interface SG support mode */
+void fman_if_set_sg(struct fman_if *fm_if, int enable);
+
+/* Get interface Max Frame length (MTU) */
+uint16_t fman_if_get_maxfrm(struct fman_if *fm_if);
+
+/* Set interface Max Frame length (MTU) */
+void fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm);
+
+/* Set interface next invoked action for dequeue operation */
+void fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia);
+
+/* discard error packets on rx */
+void fman_if_discard_rx_errors(struct fman_if *fm_if);
+
+void fman_if_set_mcast_filter_table(struct fman_if *p);
+
+void fman_if_reset_mcast_filter_table(struct fman_if *p);
+
+int fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth);
+
+int fman_if_get_primary_mac_addr(struct fman_if *p, uint8_t *eth);
+
+
+/* Enable/disable Rx on all interfaces */
+static inline void fman_if_enable_all_rx(void)
+{
+ struct fman_if *__if;
+
+ list_for_each_entry(__if, fman_if_list, node)
+ fman_if_enable_rx(__if);
+}
+
+static inline void fman_if_disable_all_rx(void)
+{
+ struct fman_if *__if;
+
+ list_for_each_entry(__if, fman_if_list, node)
+ fman_if_disable_rx(__if);
+}
+#endif /* __FSL_FMAN_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman_crc64.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman_crc64.h
new file mode 100644
index 00000000..bf162f3a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman_crc64.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __FSL_FMAN_CRC64_H
+#define __FSL_FMAN_CRC64_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This following definitions provide a software implementation of the CRC64
+ * algorithm implemented within Fman.
+ *
+ * The following example shows how to compute a CRC64 hash value based on
+ * SRC_IP, DST_IP and ESP_SPI values
+ *
+ * #define compute_hash(saddr,daddr,spi) \
+ * do { \
+ * uint64_t result; \
+ * result = fman_crc64_init(); \
+ * result = fman_crc64_compute_32bit(saddr, result); \
+ * result = fman_crc64_compute_32bit(daddr, result); \
+ * result = fman_crc64_compute_32bit(spi, result); \
+ * return (uint32_t) result & RC_HASH_MASK; \
+ * } while (0);
+ *
+ * If hashing over a different number of fields (or of different types) is
+ * required, this can be implemented using the following primitives.
+ */
+
+/* The following table provides the constants used by the Fman CRC64
+ * implementation. The table is instantiated within the DPAA fman driver.
+ * However if the application is not going to be linked against the DPAA fman
+ * driver but will use this Fman CRC64 implementation, then it will need to
+ * instantiate this table by using the DECLARE_FMAN_CRC64_TABLE() macro.
+ */
+struct fman_crc64_t {
+ uint64_t initial;
+ uint64_t table[1 << 8];
+};
+extern struct fman_crc64_t FMAN_CRC64_ECMA_182;
+#define DECLARE_FMAN_CRC64_TABLE() \
+struct fman_crc64_t FMAN_CRC64_ECMA_182 = { \
+ 0xFFFFFFFFFFFFFFFFULL, \
+ { \
+ 0x0000000000000000ULL, 0xb32e4cbe03a75f6fULL, \
+ 0xf4843657a840a05bULL, 0x47aa7ae9abe7ff34ULL, \
+ 0x7bd0c384ff8f5e33ULL, 0xc8fe8f3afc28015cULL, \
+ 0x8f54f5d357cffe68ULL, 0x3c7ab96d5468a107ULL, \
+ 0xf7a18709ff1ebc66ULL, 0x448fcbb7fcb9e309ULL, \
+ 0x0325b15e575e1c3dULL, 0xb00bfde054f94352ULL, \
+ 0x8c71448d0091e255ULL, 0x3f5f08330336bd3aULL, \
+ 0x78f572daa8d1420eULL, 0xcbdb3e64ab761d61ULL, \
+ 0x7d9ba13851336649ULL, 0xceb5ed8652943926ULL, \
+ 0x891f976ff973c612ULL, 0x3a31dbd1fad4997dULL, \
+ 0x064b62bcaebc387aULL, 0xb5652e02ad1b6715ULL, \
+ 0xf2cf54eb06fc9821ULL, 0x41e11855055bc74eULL, \
+ 0x8a3a2631ae2dda2fULL, 0x39146a8fad8a8540ULL, \
+ 0x7ebe1066066d7a74ULL, 0xcd905cd805ca251bULL, \
+ 0xf1eae5b551a2841cULL, 0x42c4a90b5205db73ULL, \
+ 0x056ed3e2f9e22447ULL, 0xb6409f5cfa457b28ULL, \
+ 0xfb374270a266cc92ULL, 0x48190ecea1c193fdULL, \
+ 0x0fb374270a266cc9ULL, 0xbc9d3899098133a6ULL, \
+ 0x80e781f45de992a1ULL, 0x33c9cd4a5e4ecdceULL, \
+ 0x7463b7a3f5a932faULL, 0xc74dfb1df60e6d95ULL, \
+ 0x0c96c5795d7870f4ULL, 0xbfb889c75edf2f9bULL, \
+ 0xf812f32ef538d0afULL, 0x4b3cbf90f69f8fc0ULL, \
+ 0x774606fda2f72ec7ULL, 0xc4684a43a15071a8ULL, \
+ 0x83c230aa0ab78e9cULL, 0x30ec7c140910d1f3ULL, \
+ 0x86ace348f355aadbULL, 0x3582aff6f0f2f5b4ULL, \
+ 0x7228d51f5b150a80ULL, 0xc10699a158b255efULL, \
+ 0xfd7c20cc0cdaf4e8ULL, 0x4e526c720f7dab87ULL, \
+ 0x09f8169ba49a54b3ULL, 0xbad65a25a73d0bdcULL, \
+ 0x710d64410c4b16bdULL, 0xc22328ff0fec49d2ULL, \
+ 0x85895216a40bb6e6ULL, 0x36a71ea8a7ace989ULL, \
+ 0x0adda7c5f3c4488eULL, 0xb9f3eb7bf06317e1ULL, \
+ 0xfe5991925b84e8d5ULL, 0x4d77dd2c5823b7baULL, \
+ 0x64b62bcaebc387a1ULL, 0xd7986774e864d8ceULL, \
+ 0x90321d9d438327faULL, 0x231c512340247895ULL, \
+ 0x1f66e84e144cd992ULL, 0xac48a4f017eb86fdULL, \
+ 0xebe2de19bc0c79c9ULL, 0x58cc92a7bfab26a6ULL, \
+ 0x9317acc314dd3bc7ULL, 0x2039e07d177a64a8ULL, \
+ 0x67939a94bc9d9b9cULL, 0xd4bdd62abf3ac4f3ULL, \
+ 0xe8c76f47eb5265f4ULL, 0x5be923f9e8f53a9bULL, \
+ 0x1c4359104312c5afULL, 0xaf6d15ae40b59ac0ULL, \
+ 0x192d8af2baf0e1e8ULL, 0xaa03c64cb957be87ULL, \
+ 0xeda9bca512b041b3ULL, 0x5e87f01b11171edcULL, \
+ 0x62fd4976457fbfdbULL, 0xd1d305c846d8e0b4ULL, \
+ 0x96797f21ed3f1f80ULL, 0x2557339fee9840efULL, \
+ 0xee8c0dfb45ee5d8eULL, 0x5da24145464902e1ULL, \
+ 0x1a083bacedaefdd5ULL, 0xa9267712ee09a2baULL, \
+ 0x955cce7fba6103bdULL, 0x267282c1b9c65cd2ULL, \
+ 0x61d8f8281221a3e6ULL, 0xd2f6b4961186fc89ULL, \
+ 0x9f8169ba49a54b33ULL, 0x2caf25044a02145cULL, \
+ 0x6b055fede1e5eb68ULL, 0xd82b1353e242b407ULL, \
+ 0xe451aa3eb62a1500ULL, 0x577fe680b58d4a6fULL, \
+ 0x10d59c691e6ab55bULL, 0xa3fbd0d71dcdea34ULL, \
+ 0x6820eeb3b6bbf755ULL, 0xdb0ea20db51ca83aULL, \
+ 0x9ca4d8e41efb570eULL, 0x2f8a945a1d5c0861ULL, \
+ 0x13f02d374934a966ULL, 0xa0de61894a93f609ULL, \
+ 0xe7741b60e174093dULL, 0x545a57dee2d35652ULL, \
+ 0xe21ac88218962d7aULL, 0x5134843c1b317215ULL, \
+ 0x169efed5b0d68d21ULL, 0xa5b0b26bb371d24eULL, \
+ 0x99ca0b06e7197349ULL, 0x2ae447b8e4be2c26ULL, \
+ 0x6d4e3d514f59d312ULL, 0xde6071ef4cfe8c7dULL, \
+ 0x15bb4f8be788911cULL, 0xa6950335e42fce73ULL, \
+ 0xe13f79dc4fc83147ULL, 0x521135624c6f6e28ULL, \
+ 0x6e6b8c0f1807cf2fULL, 0xdd45c0b11ba09040ULL, \
+ 0x9aefba58b0476f74ULL, 0x29c1f6e6b3e0301bULL, \
+ 0xc96c5795d7870f42ULL, 0x7a421b2bd420502dULL, \
+ 0x3de861c27fc7af19ULL, 0x8ec62d7c7c60f076ULL, \
+ 0xb2bc941128085171ULL, 0x0192d8af2baf0e1eULL, \
+ 0x4638a2468048f12aULL, 0xf516eef883efae45ULL, \
+ 0x3ecdd09c2899b324ULL, 0x8de39c222b3eec4bULL, \
+ 0xca49e6cb80d9137fULL, 0x7967aa75837e4c10ULL, \
+ 0x451d1318d716ed17ULL, 0xf6335fa6d4b1b278ULL, \
+ 0xb199254f7f564d4cULL, 0x02b769f17cf11223ULL, \
+ 0xb4f7f6ad86b4690bULL, 0x07d9ba1385133664ULL, \
+ 0x4073c0fa2ef4c950ULL, 0xf35d8c442d53963fULL, \
+ 0xcf273529793b3738ULL, 0x7c0979977a9c6857ULL, \
+ 0x3ba3037ed17b9763ULL, 0x888d4fc0d2dcc80cULL, \
+ 0x435671a479aad56dULL, 0xf0783d1a7a0d8a02ULL, \
+ 0xb7d247f3d1ea7536ULL, 0x04fc0b4dd24d2a59ULL, \
+ 0x3886b22086258b5eULL, 0x8ba8fe9e8582d431ULL, \
+ 0xcc0284772e652b05ULL, 0x7f2cc8c92dc2746aULL, \
+ 0x325b15e575e1c3d0ULL, 0x8175595b76469cbfULL, \
+ 0xc6df23b2dda1638bULL, 0x75f16f0cde063ce4ULL, \
+ 0x498bd6618a6e9de3ULL, 0xfaa59adf89c9c28cULL, \
+ 0xbd0fe036222e3db8ULL, 0x0e21ac88218962d7ULL, \
+ 0xc5fa92ec8aff7fb6ULL, 0x76d4de52895820d9ULL, \
+ 0x317ea4bb22bfdfedULL, 0x8250e80521188082ULL, \
+ 0xbe2a516875702185ULL, 0x0d041dd676d77eeaULL, \
+ 0x4aae673fdd3081deULL, 0xf9802b81de97deb1ULL, \
+ 0x4fc0b4dd24d2a599ULL, 0xfceef8632775faf6ULL, \
+ 0xbb44828a8c9205c2ULL, 0x086ace348f355aadULL, \
+ 0x34107759db5dfbaaULL, 0x873e3be7d8faa4c5ULL, \
+ 0xc094410e731d5bf1ULL, 0x73ba0db070ba049eULL, \
+ 0xb86133d4dbcc19ffULL, 0x0b4f7f6ad86b4690ULL, \
+ 0x4ce50583738cb9a4ULL, 0xffcb493d702be6cbULL, \
+ 0xc3b1f050244347ccULL, 0x709fbcee27e418a3ULL, \
+ 0x3735c6078c03e797ULL, 0x841b8ab98fa4b8f8ULL, \
+ 0xadda7c5f3c4488e3ULL, 0x1ef430e13fe3d78cULL, \
+ 0x595e4a08940428b8ULL, 0xea7006b697a377d7ULL, \
+ 0xd60abfdbc3cbd6d0ULL, 0x6524f365c06c89bfULL, \
+ 0x228e898c6b8b768bULL, 0x91a0c532682c29e4ULL, \
+ 0x5a7bfb56c35a3485ULL, 0xe955b7e8c0fd6beaULL, \
+ 0xaeffcd016b1a94deULL, 0x1dd181bf68bdcbb1ULL, \
+ 0x21ab38d23cd56ab6ULL, 0x9285746c3f7235d9ULL, \
+ 0xd52f0e859495caedULL, 0x6601423b97329582ULL, \
+ 0xd041dd676d77eeaaULL, 0x636f91d96ed0b1c5ULL, \
+ 0x24c5eb30c5374ef1ULL, 0x97eba78ec690119eULL, \
+ 0xab911ee392f8b099ULL, 0x18bf525d915feff6ULL, \
+ 0x5f1528b43ab810c2ULL, 0xec3b640a391f4fadULL, \
+ 0x27e05a6e926952ccULL, 0x94ce16d091ce0da3ULL, \
+ 0xd3646c393a29f297ULL, 0x604a2087398eadf8ULL, \
+ 0x5c3099ea6de60cffULL, 0xef1ed5546e415390ULL, \
+ 0xa8b4afbdc5a6aca4ULL, 0x1b9ae303c601f3cbULL, \
+ 0x56ed3e2f9e224471ULL, 0xe5c372919d851b1eULL, \
+ 0xa26908783662e42aULL, 0x114744c635c5bb45ULL, \
+ 0x2d3dfdab61ad1a42ULL, 0x9e13b115620a452dULL, \
+ 0xd9b9cbfcc9edba19ULL, 0x6a978742ca4ae576ULL, \
+ 0xa14cb926613cf817ULL, 0x1262f598629ba778ULL, \
+ 0x55c88f71c97c584cULL, 0xe6e6c3cfcadb0723ULL, \
+ 0xda9c7aa29eb3a624ULL, 0x69b2361c9d14f94bULL, \
+ 0x2e184cf536f3067fULL, 0x9d36004b35545910ULL, \
+ 0x2b769f17cf112238ULL, 0x9858d3a9ccb67d57ULL, \
+ 0xdff2a94067518263ULL, 0x6cdce5fe64f6dd0cULL, \
+ 0x50a65c93309e7c0bULL, 0xe388102d33392364ULL, \
+ 0xa4226ac498dedc50ULL, 0x170c267a9b79833fULL, \
+ 0xdcd7181e300f9e5eULL, 0x6ff954a033a8c131ULL, \
+ 0x28532e49984f3e05ULL, 0x9b7d62f79be8616aULL, \
+ 0xa707db9acf80c06dULL, 0x14299724cc279f02ULL, \
+ 0x5383edcd67c06036ULL, 0xe0ada17364673f59ULL} \
+}
+
+/*
+ * Return the initial CRC seed. Use the value returned from this API as the
+ * "crc" parameter to the first call to add data.
+ */
+static inline uint64_t fman_crc64_init(void)
+{
+ return FMAN_CRC64_ECMA_182.initial;
+}
+
+/* Updates the CRC with arbitrary data */
+static inline uint64_t fman_crc64_update(uint64_t crc,
+ void *data, unsigned int len)
+{
+ uint8_t *p = data;
+ while (len--)
+ crc = FMAN_CRC64_ECMA_182.table[(crc ^ *(p++)) & 0xff] ^
+ (crc >> 8);
+ return crc;
+}
+
+/* Shorthands for updating the CRC with 8/16/32 bits of data.
+ * IMPORTANT NOTE: the typed "data" arguments should not be mistaken for
+ * host-endian numerical values, the assumption is that these values contain
+ * big-endian (ie. network byte order) data.
+ */
+static inline uint64_t fman_crc64_compute_32bit(uint32_t data, uint64_t crc)
+{
+ return fman_crc64_update(crc, &data, sizeof(data));
+}
+static inline uint64_t fman_crc64_compute_16bit(uint16_t data, uint64_t crc)
+{
+ return fman_crc64_update(crc, &data, sizeof(data));
+}
+static inline uint64_t fman_crc64_compute_8bit(uint8_t data, uint64_t crc)
+{
+ return fman_crc64_update(crc, &data, sizeof(data));
+}
+
+/*
+ * Finalise the CRC (using 2's complement)
+ */
+static inline uint64_t fman_crc64_finish(uint64_t seed)
+{
+ return ~seed;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FSL_FMAN_CRC64_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_qman.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_qman.h
new file mode 100644
index 00000000..b18cf037
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_qman.h
@@ -0,0 +1,2057 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __FSL_QMAN_H
+#define __FSL_QMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <dpaa_rbtree.h>
+#include <rte_eventdev.h>
+
+/* FQ lookups (turn this on for 64bit user-space) */
+#if (__WORDSIZE == 64)
+#define CONFIG_FSL_QMAN_FQ_LOOKUP
+/* if FQ lookups are supported, this controls the number of initialised,
+ * s/w-consumed FQs that can be supported at any one time.
+ */
+#define CONFIG_FSL_QMAN_FQ_LOOKUP_MAX (32 * 1024)
+#endif
+
+/* Last updated for v00.800 of the BG */
+
+/* Hardware constants */
+#define QM_CHANNEL_SWPORTAL0 0
+#define QMAN_CHANNEL_POOL1 0x21
+#define QMAN_CHANNEL_CAAM 0x80
+#define QMAN_CHANNEL_PME 0xa0
+#define QMAN_CHANNEL_POOL1_REV3 0x401
+#define QMAN_CHANNEL_CAAM_REV3 0x840
+#define QMAN_CHANNEL_PME_REV3 0x860
+extern u16 qm_channel_pool1;
+extern u16 qm_channel_caam;
+extern u16 qm_channel_pme;
+enum qm_dc_portal {
+ qm_dc_portal_fman0 = 0,
+ qm_dc_portal_fman1 = 1,
+ qm_dc_portal_caam = 2,
+ qm_dc_portal_pme = 3
+};
+
+/* Portal processing (interrupt) sources */
+#define QM_PIRQ_CCSCI 0x00200000 /* CEETM Congestion State Change */
+#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
+#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
+#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
+#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
+#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
+/*
+ * This mask contains all the interrupt sources that need handling except DQRI,
+ * ie. that if present should trigger slow-path processing.
+ */
+#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
+ QM_PIRQ_MRI | QM_PIRQ_CCSCI)
+
+/* For qman_static_dequeue_*** APIs */
+#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
+/* for n in [1,15] */
+#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
+/* for conversion from n of qm_channel */
+static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
+{
+ return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
+}
+
+/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID.
+ */
+#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
+#define QM_VDQCR_EXACT 0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
+
+/* --- QMan data structures (and associated constants) --- */
+
+/* Represents s/w corenet portal mapped data structures */
+struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */
+struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */
+struct qm_mr_entry; /* MR (Message Ring) entries */
+struct qm_mc_command; /* MC (Management Command) command */
+struct qm_mc_result; /* MC result */
+
+#define QM_FD_FORMAT_SG 0x4
+#define QM_FD_FORMAT_LONG 0x2
+#define QM_FD_FORMAT_COMPOUND 0x1
+enum qm_fd_format {
+ /*
+ * 'contig' implies a contiguous buffer, whereas 'sg' implies a
+ * scatter-gather table. 'big' implies a 29-bit length with no offset
+ * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
+ * implies a s/g-like table, where each entry itself represents a frame
+ * (contiguous or scatter-gather) and the 29-bit "length" is
+ * interpreted purely for congestion calculations, ie. a "congestion
+ * weight".
+ */
+ qm_fd_contig = 0,
+ qm_fd_contig_big = QM_FD_FORMAT_LONG,
+ qm_fd_sg = QM_FD_FORMAT_SG,
+ qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
+ qm_fd_compound = QM_FD_FORMAT_COMPOUND
+};
+
+/* Capitalised versions are un-typed but can be used in static expressions */
+#define QM_FD_CONTIG 0
+#define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
+#define QM_FD_SG QM_FD_FORMAT_SG
+#define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
+#define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND
+
+/* "Frame Descriptor (FD)" */
+struct qm_fd {
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 dd:2; /* dynamic debug */
+ u8 liodn_offset:6;
+ u8 bpid:8; /* Buffer Pool ID */
+ u8 eliodn_offset:4;
+ u8 __reserved:4;
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+#else
+ u8 liodn_offset:6;
+ u8 dd:2; /* dynamic debug */
+ u8 bpid:8; /* Buffer Pool ID */
+ u8 __reserved:4;
+ u8 eliodn_offset:4;
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+#endif
+ };
+ struct {
+ u64 __notaddress:24;
+ /* More efficient address accessor */
+ u64 addr:40;
+ };
+ u64 opaque_addr;
+ };
+ /* The 'format' field indicates the interpretation of the remaining 29
+ * bits of the 32-bit word. For packing reasons, it is duplicated in the
+ * other union elements. Note, union'd structs are difficult to use with
+ * static initialisation under gcc, in which case use the "opaque" form
+ * with one of the macros.
+ */
+ union {
+ /* For easier/faster copying of this part of the fd (eg. from a
+ * DQRR entry to an EQCR entry) copy 'opaque'
+ */
+ u32 opaque;
+ /* If 'format' is _contig or _sg, 20b length and 9b offset */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ enum qm_fd_format format:3;
+ u16 offset:9;
+ u32 length20:20;
+#else
+ u32 length20:20;
+ u16 offset:9;
+ enum qm_fd_format format:3;
+#endif
+ };
+ /* If 'format' is _contig_big or _sg_big, 29b length */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ enum qm_fd_format _format1:3;
+ u32 length29:29;
+#else
+ u32 length29:29;
+ enum qm_fd_format _format1:3;
+#endif
+ };
+ /* If 'format' is _compound, 29b "congestion weight" */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ enum qm_fd_format _format2:3;
+ u32 cong_weight:29;
+#else
+ u32 cong_weight:29;
+ enum qm_fd_format _format2:3;
+#endif
+ };
+ };
+ union {
+ u32 cmd;
+ u32 status;
+ };
+} __attribute__((aligned(8)));
+#define QM_FD_DD_NULL 0x00
+#define QM_FD_PID_MASK 0x3f
+static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
+{
+ return fd->addr;
+}
+
+static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
+{
+ return (dma_addr_t)fd->addr;
+}
+
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_fd_addr_set64(fd, v) \
+ do { \
+ struct qm_fd *__fd931 = (fd); \
+ __fd931->addr = v; \
+ } while (0)
+
+/* Scatter/Gather table entry */
+struct qm_sg_entry {
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved1[3];
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+#else
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u8 __reserved1[3];
+#endif
+ };
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u64 __notaddress:24;
+ u64 addr:40;
+#else
+ u64 addr:40;
+ u64 __notaddress:24;
+#endif
+ };
+ u64 opaque;
+ };
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 extension:1; /* Extension bit */
+ u32 final:1; /* Final bit */
+ u32 length:30;
+#else
+ u32 length:30;
+ u32 final:1; /* Final bit */
+ u32 extension:1; /* Extension bit */
+#endif
+ };
+ u32 val;
+ };
+ u8 __reserved2;
+ u8 bpid;
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved3:3;
+ u16 offset:13;
+#else
+ u16 offset:13;
+ u16 __reserved3:3;
+#endif
+ };
+ u16 val_off;
+ };
+} __packed;
+static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
+{
+ return sg->addr;
+}
+
+static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
+{
+ return (dma_addr_t)sg->addr;
+}
+
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_sg_entry_set64(sg, v) \
+ do { \
+ struct qm_sg_entry *__sg931 = (sg); \
+ __sg931->addr = v; \
+ } while (0)
+
+/* See 1.5.8.1: "Enqueue Command" */
+struct __rte_aligned(8) qm_eqcr_entry {
+ u8 __dont_write_directly__verb;
+ u8 dca;
+ u16 seqnum;
+ u32 orp; /* 24-bit */
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd; /* this has alignment 8 */
+ u8 __reserved3[32];
+} __packed;
+
+
+/* "Frame Dequeue Response" */
+struct __rte_aligned(8) qm_dqrr_entry {
+ u8 verb;
+ u8 stat;
+ u16 seqnum; /* 15-bit */
+ u8 tok;
+ u8 __reserved2[3];
+ u32 fqid; /* 24-bit */
+ u32 contextB;
+ struct qm_fd fd; /* this has alignment 8 */
+ u8 __reserved4[32];
+};
+
+#define QM_DQRR_VERB_VBIT 0x80
+#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
+#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
+#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
+#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
+#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
+#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
+#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
+
+
+/* "ERN Message Response" */
+/* "FQ State Change Notification" */
+struct qm_mr_entry {
+ union {
+ struct {
+ u8 verb;
+ u8 dca;
+ u16 seqnum;
+ u8 rc; /* Rejection Code */
+ u32 orp:24;
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd; /* this has alignment 8 */
+ } __packed __rte_aligned(8) ern;
+ struct {
+ u8 verb;
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
+ u8 __reserved1:4;
+ enum qm_dc_portal portal:2;
+#else
+ enum qm_dc_portal portal:3;
+ u8 __reserved1:3;
+ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
+#endif
+ u16 __reserved2;
+ u8 rc; /* Rejection Code */
+ u32 __reserved3:24;
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd; /* this has alignment 8 */
+ } __packed __rte_aligned(8) dcern;
+ struct {
+ u8 verb;
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[6];
+ u32 fqid; /* 24-bit */
+ u32 contextB;
+ u8 __reserved2[16];
+ } __packed __rte_aligned(8) fq; /* FQRN/FQRNI/FQRL/FQPN */
+ };
+ u8 __reserved2[32];
+} __packed __rte_aligned(8);
+#define QM_MR_VERB_VBIT 0x80
+/*
+ * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
+ * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
+ * from the other MR types by noting if the 0x20 bit is unset.
+ */
+#define QM_MR_VERB_TYPE_MASK 0x27
+#define QM_MR_VERB_DC_ERN 0x20
+#define QM_MR_VERB_FQRN 0x21
+#define QM_MR_VERB_FQRNI 0x22
+#define QM_MR_VERB_FQRL 0x23
+#define QM_MR_VERB_FQPN 0x24
+#define QM_MR_RC_MASK 0xf0 /* contains one of; */
+#define QM_MR_RC_CGR_TAILDROP 0x00
+#define QM_MR_RC_WRED 0x10
+#define QM_MR_RC_ERROR 0x20
+#define QM_MR_RC_ORPWINDOW_EARLY 0x30
+#define QM_MR_RC_ORPWINDOW_LATE 0x40
+#define QM_MR_RC_FQ_TAILDROP 0x50
+#define QM_MR_RC_ORPWINDOW_RETIRED 0x60
+#define QM_MR_RC_ORP_ZERO 0x70
+#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+#define QM_MR_DCERN_COLOUR_GREEN 0x00
+#define QM_MR_DCERN_COLOUR_YELLOW 0x01
+#define QM_MR_DCERN_COLOUR_RED 0x02
+#define QM_MR_DCERN_COLOUR_OVERRIDE 0x03
+/*
+ * An identical structure of FQD fields is present in the "Init FQ" command and
+ * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
+ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
+ * latter has two inlines to assist with converting to/from the mant+exp
+ * representation.
+ */
+struct qm_fqd_stashing {
+ /* See QM_STASHING_EXCL_<...> */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 exclusive;
+ u8 __reserved1:2;
+ /* Numbers of cachelines */
+ u8 annotation_cl:2;
+ u8 data_cl:2;
+ u8 context_cl:2;
+#else
+ u8 context_cl:2;
+ u8 data_cl:2;
+ u8 annotation_cl:2;
+ u8 __reserved1:2;
+ u8 exclusive;
+#endif
+} __packed;
+struct qm_fqd_taildrop {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved1:3;
+ u16 mant:8;
+ u16 exp:5;
+#else
+ u16 exp:5;
+ u16 mant:8;
+ u16 __reserved1:3;
+#endif
+} __packed;
+struct qm_fqd_oac {
+ /* "Overhead Accounting Control", see QM_OAC_<...> */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 oac:2; /* "Overhead Accounting Control" */
+ u8 __reserved1:6;
+#else
+ u8 __reserved1:6;
+ u8 oac:2; /* "Overhead Accounting Control" */
+#endif
+ /* Two's-complement value (-128 to +127) */
+ signed char oal; /* "Overhead Accounting Length" */
+} __packed;
+struct qm_fqd {
+ union {
+ u8 orpc;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved1:2;
+ u8 orprws:3;
+ u8 oa:1;
+ u8 olws:2;
+#else
+ u8 olws:2;
+ u8 oa:1;
+ u8 orprws:3;
+ u8 __reserved1:2;
+#endif
+ } __packed;
+ };
+ u8 cgid;
+ u16 fq_ctrl; /* See QM_FQCTRL_<...> */
+ union {
+ u16 dest_wq;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 channel:13; /* qm_channel */
+ u16 wq:3;
+#else
+ u16 wq:3;
+ u16 channel:13; /* qm_channel */
+#endif
+ } __packed dest;
+ };
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved2:1;
+ u16 ics_cred:15;
+#else
+ u16 __reserved2:1;
+ u16 ics_cred:15;
+#endif
+ /*
+ * For "Initialize Frame Queue" commands, the write-enable mask
+ * determines whether 'td' or 'oac_init' is observed. For query
+ * commands, this field is always 'td', and 'oac_query' (below) reflects
+ * the Overhead ACcounting values.
+ */
+ union {
+ uint16_t opaque_td;
+ struct qm_fqd_taildrop td;
+ struct qm_fqd_oac oac_init;
+ };
+ u32 context_b;
+ union {
+ /* Treat it as 64-bit opaque */
+ u64 opaque;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 hi;
+ u32 lo;
+#else
+ u32 lo;
+ u32 hi;
+#endif
+ };
+ /* Treat it as s/w portal stashing config */
+ /* see "FQD Context_A field used for [...]" */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ struct qm_fqd_stashing stashing;
+ /*
+ * 48-bit address of FQ context to
+ * stash, must be cacheline-aligned
+ */
+ u16 context_hi;
+ u32 context_lo;
+#else
+ u32 context_lo;
+ u16 context_hi;
+ struct qm_fqd_stashing stashing;
+#endif
+ } __packed;
+ } context_a;
+ struct qm_fqd_oac oac_query;
+} __packed;
+/* 64-bit converters for context_hi/lo */
+static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
+{
+ return ((u64)fqd->context_a.context_hi << 32) |
+ (u64)fqd->context_a.context_lo;
+}
+
+static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
+{
+ return (dma_addr_t)qm_fqd_stashing_get64(fqd);
+}
+
+static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
+{
+ return ((u64)fqd->context_a.hi << 32) |
+ (u64)fqd->context_a.lo;
+}
+
+static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
+{
+ fqd->context_a.context_hi = upper_32_bits(addr);
+ fqd->context_a.context_lo = lower_32_bits(addr);
+}
+
+static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
+{
+ fqd->context_a.hi = upper_32_bits(addr);
+ fqd->context_a.lo = lower_32_bits(addr);
+}
+
+/* convert a threshold value into mant+exp representation */
+static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
+ int roundup)
+{
+ u32 e = 0;
+ int oddbit = 0;
+
+ if (val > 0xe0000000)
+ return -ERANGE;
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ td->exp = e;
+ td->mant = val;
+ return 0;
+}
+
+/* and the other direction */
+static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
+{
+ return (u32)td->mant << td->exp;
+}
+
+
+/* See "Frame Queue Descriptor (FQD)" */
+/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
+#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
+#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
+#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
+#define QM_FQCTRL_ORP 0x0100 /* ORP Enable */
+#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
+#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
+#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
+#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
+#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
+#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
+#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
+
+/* See "FQD Context_A field used for [...] */
+/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
+#define QM_STASHING_EXCL_ANNOTATION 0x04
+#define QM_STASHING_EXCL_DATA 0x02
+#define QM_STASHING_EXCL_CTX 0x01
+
+/* See "Intra Class Scheduling" */
+/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
+#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
+#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
+
+/*
+ * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
+ * and associated commands/responses. The WRED parameters are calculated from
+ * these fields as follows;
+ * MaxTH = MA * (2 ^ Mn)
+ * Slope = SA / (2 ^ Sn)
+ * MaxP = 4 * (Pn + 1)
+ */
+struct qm_cgr_wr_parm {
+ union {
+ u32 word;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 MA:8;
+ u32 Mn:5;
+ u32 SA:7; /* must be between 64-127 */
+ u32 Sn:6;
+ u32 Pn:6;
+#else
+ u32 Pn:6;
+ u32 Sn:6;
+ u32 SA:7; /* must be between 64-127 */
+ u32 Mn:5;
+ u32 MA:8;
+#endif
+ } __packed;
+ };
+} __packed;
+/*
+ * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
+ * management commands, this is padded to a 16-bit structure field, so that's
+ * how we represent it here. The congestion state threshold is calculated from
+ * these fields as follows;
+ * CS threshold = TA * (2 ^ Tn)
+ */
+struct qm_cgr_cs_thres {
+ union {
+ u16 hword;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved:3;
+ u16 TA:8;
+ u16 Tn:5;
+#else
+ u16 Tn:5;
+ u16 TA:8;
+ u16 __reserved:3;
+#endif
+ } __packed;
+ };
+} __packed;
+/*
+ * This identical structure of CGR fields is present in the "Init/Modify CGR"
+ * commands and the "Query CGR" result. It's suctioned out here into its own
+ * struct.
+ */
+struct __qm_mc_cgr {
+ struct qm_cgr_wr_parm wr_parm_g;
+ struct qm_cgr_wr_parm wr_parm_y;
+ struct qm_cgr_wr_parm wr_parm_r;
+ u8 wr_en_g; /* boolean, use QM_CGR_EN */
+ u8 wr_en_y; /* boolean, use QM_CGR_EN */
+ u8 wr_en_r; /* boolean, use QM_CGR_EN */
+ u8 cscn_en; /* boolean, use QM_CGR_EN */
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
+#else
+ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
+ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+#endif
+ };
+ u32 cscn_targ; /* use QM_CGR_TARG_* */
+ };
+ u8 cstd_en; /* boolean, use QM_CGR_EN */
+ u8 cs; /* boolean, only used in query response */
+ union {
+ struct qm_cgr_cs_thres cs_thres;
+ /* use qm_cgr_cs_thres_set64() */
+ u16 __cs_thres;
+ };
+ u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
+} __packed;
+#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
+#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
+#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
+#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
+#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
+#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
+/* Convert CGR thresholds to/from "cs_thres" format */
+static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
+{
+ return (u64)th->TA << th->Tn;
+}
+
+static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
+ int roundup)
+{
+ u32 e = 0;
+ int oddbit = 0;
+
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ th->Tn = e;
+ th->TA = val;
+ return 0;
+}
+
+/* See 1.5.8.5.1: "Initialize FQ" */
+/* See 1.5.8.5.2: "Query FQ" */
+/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
+/* See 1.5.8.5.4: "Alter FQ State Commands " */
+/* See 1.5.8.6.1: "Initialize/Modify CGR" */
+/* See 1.5.8.6.2: "CGR Test Write" */
+/* See 1.5.8.6.3: "Query CGR" */
+/* See 1.5.8.6.4: "Query Congestion Group State" */
+struct qm_mcc_initfq {
+ u8 __reserved1;
+ u16 we_mask; /* Write Enable Mask */
+ u32 fqid; /* 24-bit */
+ u16 count; /* Initialises 'count+1' FQDs */
+ struct qm_fqd fqd; /* the FQD fields go here */
+ u8 __reserved3[30];
+} __packed;
+struct qm_mcc_queryfq {
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+struct qm_mcc_queryfq_np {
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+struct qm_mcc_alterfq {
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2;
+ u8 count; /* number of consecutive FQID */
+ u8 __reserved3[10];
+ u32 context_b; /* frame queue context b */
+ u8 __reserved4[40];
+} __packed;
+struct qm_mcc_initcgr {
+ u8 __reserved1;
+ u16 we_mask; /* Write Enable Mask */
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[2];
+ u8 cgid;
+ u8 __reserved4[32];
+} __packed;
+struct qm_mcc_cgrtestwrite {
+ u8 __reserved1[2];
+ u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u8 __reserved2[23];
+ u8 cgid;
+ u8 __reserved3[32];
+} __packed;
+struct qm_mcc_querycgr {
+ u8 __reserved1[30];
+ u8 cgid;
+ u8 __reserved2[32];
+} __packed;
+struct qm_mcc_querycongestion {
+ u8 __reserved[63];
+} __packed;
+struct qm_mcc_querywq {
+ u8 __reserved;
+ /* select channel if verb != QUERYWQ_DEDICATED */
+ union {
+ u16 channel_wq; /* ignores wq (3 lsbits) */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 id:13; /* qm_channel */
+ u16 __reserved1:3;
+#else
+ u16 __reserved1:3;
+ u16 id:13; /* qm_channel */
+#endif
+ } __packed channel;
+ };
+ u8 __reserved2[60];
+} __packed;
+
+struct qm_mc_command {
+ u8 __dont_write_directly__verb;
+ union {
+ struct qm_mcc_initfq initfq;
+ struct qm_mcc_queryfq queryfq;
+ struct qm_mcc_queryfq_np queryfq_np;
+ struct qm_mcc_alterfq alterfq;
+ struct qm_mcc_initcgr initcgr;
+ struct qm_mcc_cgrtestwrite cgrtestwrite;
+ struct qm_mcc_querycgr querycgr;
+ struct qm_mcc_querycongestion querycongestion;
+ struct qm_mcc_querywq querywq;
+ };
+} __packed;
+
+/* INITFQ-specific flags */
+#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
+#define QM_INITFQ_WE_OAC 0x0100
+#define QM_INITFQ_WE_ORPC 0x0080
+#define QM_INITFQ_WE_CGID 0x0040
+#define QM_INITFQ_WE_FQCTRL 0x0020
+#define QM_INITFQ_WE_DESTWQ 0x0010
+#define QM_INITFQ_WE_ICSCRED 0x0008
+#define QM_INITFQ_WE_TDTHRESH 0x0004
+#define QM_INITFQ_WE_CONTEXTB 0x0002
+#define QM_INITFQ_WE_CONTEXTA 0x0001
+/* INITCGR/MODIFYCGR-specific flags */
+#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
+#define QM_CGR_WE_WR_PARM_G 0x0400
+#define QM_CGR_WE_WR_PARM_Y 0x0200
+#define QM_CGR_WE_WR_PARM_R 0x0100
+#define QM_CGR_WE_WR_EN_G 0x0080
+#define QM_CGR_WE_WR_EN_Y 0x0040
+#define QM_CGR_WE_WR_EN_R 0x0020
+#define QM_CGR_WE_CSCN_EN 0x0010
+#define QM_CGR_WE_CSCN_TARG 0x0008
+#define QM_CGR_WE_CSTD_EN 0x0004
+#define QM_CGR_WE_CS_THRES 0x0002
+#define QM_CGR_WE_MODE 0x0001
+
+struct qm_mcr_initfq {
+ u8 __reserved1[62];
+} __packed;
+struct qm_mcr_queryfq {
+ u8 __reserved1[8];
+ struct qm_fqd fqd; /* the FQD fields are here */
+ u8 __reserved2[30];
+} __packed;
+struct qm_mcr_queryfq_np {
+ u8 __reserved1;
+ u8 state; /* QM_MCR_NP_STATE_*** */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved2;
+ u32 fqd_link:24;
+ u16 __reserved3:2;
+ u16 odp_seq:14;
+ u16 __reserved4:2;
+ u16 orp_nesn:14;
+ u16 __reserved5:1;
+ u16 orp_ea_hseq:15;
+ u16 __reserved6:1;
+ u16 orp_ea_tseq:15;
+ u8 __reserved7;
+ u32 orp_ea_hptr:24;
+ u8 __reserved8;
+ u32 orp_ea_tptr:24;
+ u8 __reserved9;
+ u32 pfdr_hptr:24;
+ u8 __reserved10;
+ u32 pfdr_tptr:24;
+ u8 __reserved11[5];
+ u8 __reserved12:7;
+ u8 is:1;
+ u16 ics_surp;
+ u32 byte_cnt;
+ u8 __reserved13;
+ u32 frm_cnt:24;
+ u32 __reserved14;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved15;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+#else
+ u8 __reserved2;
+ u32 fqd_link:24;
+
+ u16 odp_seq:14;
+ u16 __reserved3:2;
+
+ u16 orp_nesn:14;
+ u16 __reserved4:2;
+
+ u16 orp_ea_hseq:15;
+ u16 __reserved5:1;
+
+ u16 orp_ea_tseq:15;
+ u16 __reserved6:1;
+
+ u8 __reserved7;
+ u32 orp_ea_hptr:24;
+
+ u8 __reserved8;
+ u32 orp_ea_tptr:24;
+
+ u8 __reserved9;
+ u32 pfdr_hptr:24;
+
+ u8 __reserved10;
+ u32 pfdr_tptr:24;
+
+ u8 __reserved11[5];
+ u8 is:1;
+ u8 __reserved12:7;
+ u16 ics_surp;
+ u32 byte_cnt;
+ u8 __reserved13;
+ u32 frm_cnt:24;
+ u32 __reserved14;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved15;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+#endif
+} __packed;
+
+struct qm_mcr_alterfq {
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[61];
+} __packed;
+struct qm_mcr_initcgr {
+ u8 __reserved1[62];
+} __packed;
+struct qm_mcr_cgrtestwrite {
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[3];
+ u32 __reserved3:24;
+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 __reserved4:24;
+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ u16 lgt; /* Last Group Tick */
+ u16 wr_prob_g;
+ u16 wr_prob_y;
+ u16 wr_prob_r;
+ u8 __reserved5[8];
+} __packed;
+struct qm_mcr_querycgr {
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[3];
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 __reserved3:24;
+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+#else
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 __reserved3:24;
+#endif
+ };
+ u64 i_bcnt;
+ };
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 __reserved4:24;
+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+#else
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+ u32 __reserved4:24;
+#endif
+ };
+ u64 a_bcnt;
+ };
+ union {
+ u32 cscn_targ_swp[4];
+ u8 __reserved5[16];
+ };
+} __packed;
+
+struct __qm_mcr_querycongestion {
+ u32 state[8];
+};
+
+struct qm_mcr_querycongestion {
+ u8 __reserved[30];
+ /* Access this struct using QM_MCR_QUERYCONGESTION() */
+ struct __qm_mcr_querycongestion state;
+} __packed;
+struct qm_mcr_querywq {
+ union {
+ u16 channel_wq; /* ignores wq (3 lsbits) */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 id:13; /* qm_channel */
+ u16 __reserved:3;
+#else
+ u16 __reserved:3;
+ u16 id:13; /* qm_channel */
+#endif
+ } __packed channel;
+ };
+ u8 __reserved[28];
+ u32 wq_len[8];
+} __packed;
+
+struct qm_mc_result {
+ u8 verb;
+ u8 result;
+ union {
+ struct qm_mcr_initfq initfq;
+ struct qm_mcr_queryfq queryfq;
+ struct qm_mcr_queryfq_np queryfq_np;
+ struct qm_mcr_alterfq alterfq;
+ struct qm_mcr_initcgr initcgr;
+ struct qm_mcr_cgrtestwrite cgrtestwrite;
+ struct qm_mcr_querycgr querycgr;
+ struct qm_mcr_querycongestion querycongestion;
+ struct qm_mcr_querywq querywq;
+ };
+} __packed;
+
+#define QM_MCR_VERB_RRID 0x80
+#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL 0x00
+#define QM_MCR_RESULT_OK 0xf0
+#define QM_MCR_RESULT_ERR_FQID 0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
+#define QM_MCR_RESULT_PENDING 0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
+#define QM_MCR_NP_STATE_FE 0x10
+#define QM_MCR_NP_STATE_R 0x08
+#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS 0x00
+#define QM_MCR_NP_STATE_RETIRED 0x01
+#define QM_MCR_NP_STATE_TEN_SCHED 0x02
+#define QM_MCR_NP_STATE_TRU_SCHED 0x03
+#define QM_MCR_NP_STATE_PARKED 0x04
+#define QM_MCR_NP_STATE_ACTIVE 0x05
+#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
+#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+/* This extracts the state for congestion group 'n' from a query response.
+ * Eg.
+ * u8 cgr = [...];
+ * struct qm_mc_result *res = [...];
+ * printf("congestion group %d congestion state: %d\n", cgr,
+ * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
+ */
+#define __CGR_WORD(num) (num >> 5)
+#define __CGR_SHIFT(num) (num & 0x1f)
+#define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
+static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
+ u8 cgr)
+{
+ return p->state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
+}
+
+ /* Portal and Frame Queues */
+/* Represents a managed portal */
+struct qman_portal;
+
+/*
+ * This object type represents QMan frame queue descriptors (FQD), it is
+ * cacheline-aligned, and initialised by qman_create_fq(). The structure is
+ * defined further down.
+ */
+struct qman_fq;
+
+/*
+ * This object type represents a QMan congestion group, it is defined further
+ * down.
+ */
+struct qman_cgr;
+
+/*
+ * This enum, and the callback type that returns it, are used when handling
+ * dequeued frames via DQRR. Note that for "null" callbacks registered with the
+ * portal object (for handling dequeues that do not demux because context_b is
+ * NULL), the return value *MUST* be qman_cb_dqrr_consume.
+ */
+enum qman_cb_dqrr_result {
+ /* DQRR entry can be consumed */
+ qman_cb_dqrr_consume,
+ /* Like _consume, but requests parking - FQ must be held-active */
+ qman_cb_dqrr_park,
+ /* Does not consume, for DCA mode only. This allows out-of-order
+ * consumes by explicit calls to qman_dca() and/or the use of implicit
+ * DCA via EQCR entries.
+ */
+ qman_cb_dqrr_defer,
+ /*
+ * Stop processing without consuming this ring entry. Exits the current
+ * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
+ * an interrupt handler, the callback would typically call
+ * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
+ * otherwise the interrupt will reassert immediately.
+ */
+ qman_cb_dqrr_stop,
+ /* Like qman_cb_dqrr_stop, but consumes the current entry. */
+ qman_cb_dqrr_consume_stop
+};
+
+typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr);
+
+typedef enum qman_cb_dqrr_result (*qman_dpdk_cb_dqrr)(void *event,
+ struct qman_portal *qm,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr,
+ void **bd);
+
+/* This callback type is used when handling buffers in dpdk pull mode */
+typedef void (*qman_dpdk_pull_cb_dqrr)(struct qman_fq **fq,
+ struct qm_dqrr_entry **dqrr,
+ void **bufs,
+ int num_bufs);
+
+typedef void (*qman_dpdk_cb_prepare)(struct qm_dqrr_entry *dq, void **bufs);
+
+/*
+ * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
+ * are always consumed after the callback returns.
+ */
+typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
+ const struct qm_mr_entry *msg);
+
+/* This callback type is used when handling DCP ERNs */
+typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
+ const struct qm_mr_entry *msg);
+/*
+ * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
+ * held-active + held-suspended are just "sched". Things like "retired" will not
+ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
+ * then, to indicate it's completing and to gate attempts to retry the retire
+ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
+ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
+ * index rather than the FQ that ring entry corresponds to), so repeated park
+ * commands are allowed (if you're silly enough to try) but won't change FQ
+ * state, and the resulting park notifications move FQs from "sched" to
+ * "parked".
+ */
+enum qman_fq_state {
+ qman_fq_state_oos,
+ qman_fq_state_parked,
+ qman_fq_state_sched,
+ qman_fq_state_retired
+};
+
+
+/*
+ * Frame queue objects (struct qman_fq) are stored within memory passed to
+ * qman_create_fq(), as this allows stashing of caller-provided demux callback
+ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
+ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
+ * they should;
+ *
+ * (a) extend the qman_fq structure with their state; eg.
+ *
+ * // myfq is allocated and driver_fq callbacks filled in;
+ * struct my_fq {
+ * struct qman_fq base;
+ * int an_extra_field;
+ * [ ... add other fields to be associated with each FQ ...]
+ * } *myfq = some_my_fq_allocator();
+ * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
+ *
+ * // in a dequeue callback, access extra fields from 'fq' via a cast;
+ * struct my_fq *myfq = (struct my_fq *)fq;
+ * do_something_with(myfq->an_extra_field);
+ * [...]
+ *
+ * (b) when and if configuring the FQ for context stashing, specify how ever
+ * many cachelines are required to stash 'struct my_fq', to accelerate not
+ * only the QMan driver but the callback as well.
+ */
+
+struct qman_fq_cb {
+ union { /* for dequeued frames */
+ qman_dpdk_cb_dqrr dqrr_dpdk_cb;
+ qman_dpdk_pull_cb_dqrr dqrr_dpdk_pull_cb;
+ qman_cb_dqrr dqrr;
+ };
+ qman_dpdk_cb_prepare dqrr_prepare;
+ qman_cb_mr ern; /* for s/w ERNs */
+ qman_cb_mr fqs; /* frame-queue state changes*/
+};
+
+struct qman_fq {
+ /* Caller of qman_create_fq() provides these demux callbacks */
+ struct qman_fq_cb cb;
+
+ u32 fqid_le;
+ u16 ch_id;
+ u8 cgr_groupid;
+ u8 is_static;
+
+ /* DPDK Interface */
+ void *dpaa_intf;
+
+ struct rte_event ev;
+ /* affined portal in case of static queue */
+ struct qman_portal *qp;
+
+ volatile unsigned long flags;
+
+ enum qman_fq_state state;
+ u32 fqid;
+ spinlock_t fqlock;
+
+ struct rb_node node;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ u32 key;
+#endif
+};
+
+/*
+ * This callback type is used when handling congestion group entry/exit.
+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
+ */
+typedef void (*qman_cb_cgr)(struct qman_portal *qm,
+ struct qman_cgr *cgr, int congested);
+
+struct qman_cgr {
+ /* Set these prior to qman_create_cgr() */
+ u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
+ qman_cb_cgr cb;
+ /* These are private to the driver */
+ u16 chan; /* portal channel this object is created on */
+ struct list_head node;
+};
+
+/* Flags to qman_create_fq() */
+#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
+#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
+#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
+#define QMAN_FQ_FLAG_LOCKED 0x00000008 /* multi-core locking */
+#define QMAN_FQ_FLAG_AS_IS 0x00000010 /* query h/w state */
+#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
+
+/* Flags to qman_destroy_fq() */
+#define QMAN_FQ_DESTROY_PARKED 0x00000001 /* FQ can be parked or OOS */
+
+/* Flags from qman_fq_state() */
+#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
+#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
+#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
+#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
+#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
+#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
+
+/* Flags to qman_init_fq() */
+#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
+#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
+
+/* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
+ * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
+ * any change here should be audited in PME.)
+ */
+#define QMAN_ENQUEUE_FLAG_WATCH_CGR 0x00080000 /* watch congestion state */
+#define QMAN_ENQUEUE_FLAG_DCA 0x00008000 /* perform enqueue-DCA */
+#define QMAN_ENQUEUE_FLAG_DCA_PARK 0x00004000 /* If DCA, requests park */
+#define QMAN_ENQUEUE_FLAG_DCA_PTR(p) /* If DCA, p is DQRR entry */ \
+ (((u32)(p) << 2) & 0x00000f00)
+#define QMAN_ENQUEUE_FLAG_C_GREEN 0x00000000 /* choose one C_*** flag */
+#define QMAN_ENQUEUE_FLAG_C_YELLOW 0x00000008
+#define QMAN_ENQUEUE_FLAG_C_RED 0x00000010
+#define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
+/* For the ORP-specific qman_enqueue_orp() variant;
+ * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
+ * of a frame.
+ */
+#define QMAN_ENQUEUE_FLAG_NLIS 0x01000000
+/* - this flag performs no enqueue but fills in an ORP sequence number that
+ * would otherwise block it (eg. if a frame has been dropped).
+ */
+#define QMAN_ENQUEUE_FLAG_HOLE 0x02000000
+/* - this flag performs no enqueue but advances NESN to the given sequence
+ * number.
+ */
+#define QMAN_ENQUEUE_FLAG_NESN 0x04000000
+
+/* Flags to qman_modify_cgr() */
+#define QMAN_CGR_FLAG_USE_INIT 0x00000001
+#define QMAN_CGR_MODE_FRAME 0x00000001
+
+/**
+ * qman_get_portal_index - get portal configuration index
+ */
+int qman_get_portal_index(void);
+
+u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
+ void **bufs);
+
+/**
+ * qman_affine_channel - return the channel ID of an portal
+ * @cpu: the cpu whose affine portal is the subject of the query
+ *
+ * If @cpu is -1, the affine portal for the current CPU will be used. It is a
+ * bug to call this function for any value of @cpu (other than -1) that is not a
+ * member of the cpu mask.
+ */
+u16 qman_affine_channel(int cpu);
+
+unsigned int qman_portal_poll_rx(unsigned int poll_limit,
+ void **bufs, struct qman_portal *q);
+
+/**
+ * qman_set_vdq - Issue a volatile dequeue command
+ * @fq: Frame Queue on which the volatile dequeue command is issued
+ * @num: Number of Frames requested for volatile dequeue
+ * @vdqcr_flags: QM_VDQCR_EXACT flag to for VDQCR command
+ *
+ * This function will issue a volatile dequeue command to the QMAN.
+ */
+int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags);
+
+/**
+ * qman_dequeue - Get the DQRR entry after volatile dequeue command
+ * @fq: Frame Queue on which the volatile dequeue command is issued
+ *
+ * This function will return the DQRR entry after a volatile dequeue command
+ * is issued. It will keep returning NULL until there is no packet available on
+ * the DQRR.
+ */
+struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
+
+/**
+ * qman_dqrr_consume - Consume the DQRR entriy after volatile dequeue
+ * @fq: Frame Queue on which the volatile dequeue command is issued
+ * @dq: DQRR entry to consume. This is the one which is provided by the
+ * 'qbman_dequeue' command.
+ *
+ * This will consume the DQRR enrey and make it available for next volatile
+ * dequeue.
+ */
+void qman_dqrr_consume(struct qman_fq *fq,
+ struct qm_dqrr_entry *dq);
+
+/**
+ * qman_poll_dqrr - process DQRR (fast-path) entries
+ * @limit: the maximum number of DQRR entries to process
+ *
+ * Use of this function requires that DQRR processing not be interrupt-driven.
+ * Ie. the value returned by qman_irqsource_get() should not include
+ * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
+ * this function will return -EINVAL, otherwise the return value is >=0 and
+ * represents the number of DQRR entries processed.
+ */
+int qman_poll_dqrr(unsigned int limit);
+
+/**
+ * qman_poll
+ *
+ * Dispatcher logic on a cpu can use this to trigger any maintenance of the
+ * affine portal. There are two classes of portal processing in question;
+ * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
+ * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
+ * thresholds, congestion state changes, etc). This function does whatever
+ * processing is not triggered by interrupts.
+ *
+ * Note, if DQRR and some slow-path processing are poll-driven (rather than
+ * interrupt-driven) then this function uses a heuristic to determine how often
+ * to run slow-path processing - as slow-path processing introduces at least a
+ * minimum latency each time it is run, whereas fast-path (DQRR) processing is
+ * close to zero-cost if there is no work to be done.
+ */
+void qman_poll(void);
+
+/**
+ * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
+ *
+ * Disables DQRR processing of the portal. This is reference-counted, so
+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
+ * truly re-enable dequeuing.
+ */
+void qman_stop_dequeues(void);
+
+/**
+ * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
+ *
+ * Enables DQRR processing of the portal. This is reference-counted, so
+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
+ * truly re-enable dequeuing.
+ */
+void qman_start_dequeues(void);
+
+/**
+ * qman_static_dequeue_add - Add pool channels to the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Adds a set of pool channels to the portal's static dequeue command register
+ * (SDQCR). The requested pools are limited to those the portal has dequeue
+ * access to.
+ */
+void qman_static_dequeue_add(u32 pools, struct qman_portal *qm);
+
+/**
+ * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Removes a set of pool channels from the portal's static dequeue command
+ * register (SDQCR). The requested pools are limited to those the portal has
+ * dequeue access to.
+ */
+void qman_static_dequeue_del(u32 pools, struct qman_portal *qp);
+
+/**
+ * qman_static_dequeue_get - return the portal's current SDQCR
+ *
+ * Returns the portal's current static dequeue command register (SDQCR). The
+ * entire register is returned, so if only the currently-enabled pool channels
+ * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
+ */
+u32 qman_static_dequeue_get(struct qman_portal *qp);
+
+/**
+ * qman_dca - Perform a Discrete Consumption Acknowledgment
+ * @dq: the DQRR entry to be consumed
+ * @park_request: indicates whether the held-active @fq should be parked
+ *
+ * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
+ * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
+ * does not take a 'portal' argument but implies the core affine portal from the
+ * cpu that is currently executing the function. For reasons of locking, this
+ * function must be called from the same CPU as that which processed the DQRR
+ * entry in the first place.
+ */
+void qman_dca(const struct qm_dqrr_entry *dq, int park_request);
+
+/**
+ * qman_dca_index - Perform a Discrete Consumption Acknowledgment
+ * @index: the DQRR index to be consumed
+ * @park_request: indicates whether the held-active @fq should be parked
+ *
+ * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
+ * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
+ * does not take a 'portal' argument but implies the core affine portal from the
+ * cpu that is currently executing the function. For reasons of locking, this
+ * function must be called from the same CPU as that which processed the DQRR
+ * entry in the first place.
+ */
+void qman_dca_index(u8 index, int park_request);
+
+/**
+ * qman_eqcr_is_empty - Determine if portal's EQCR is empty
+ *
+ * For use in situations where a cpu-affine caller needs to determine when all
+ * enqueues for the local portal have been processed by Qman but can't use the
+ * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
+ * The function forces tracking of EQCR consumption (which normally doesn't
+ * happen until enqueue processing needs to find space to put new enqueue
+ * commands), and returns zero if the ring still has unprocessed entries,
+ * non-zero if it is empty.
+ */
+int qman_eqcr_is_empty(void);
+
+/**
+ * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
+ * @handler: callback for processing DCP ERNs
+ * @affine: whether this handler is specific to the locally affine portal
+ *
+ * If a hardware block's interface to Qman (ie. its direct-connect portal, or
+ * DCP) is configured not to receive enqueue rejections, then any enqueues
+ * through that DCP that are rejected will be sent to a given software portal.
+ * If @affine is non-zero, then this handler will only be used for DCP ERNs
+ * received on the portal affine to the current CPU. If multiple CPUs share a
+ * portal and they all call this function, they will be setting the handler for
+ * the same portal! If @affine is zero, then this handler will be global to all
+ * portals handled by this instance of the driver. Only those portals that do
+ * not have their own affine handler will use the global handler.
+ */
+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
+
+ /* FQ management */
+ /* ------------- */
+/**
+ * qman_create_fq - Allocates a FQ
+ * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
+ * @flags: bit-mask of QMAN_FQ_FLAG_*** options
+ * @fq: memory for storing the 'fq', with callbacks filled in
+ *
+ * Creates a frame queue object for the given @fqid, unless the
+ * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
+ * dynamically allocated (or the function fails if none are available). Once
+ * created, the caller should not touch the memory at 'fq' except as extended to
+ * adjacent memory for user-defined fields (see the definition of "struct
+ * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
+ * pre-existing frame-queues that aren't to be otherwise interfered with, it
+ * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
+ * causes the driver to honour any contextB modifications requested in the
+ * qm_init_fq() API, as this indicates the frame queue will be consumed by a
+ * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
+ * software portals, the contextB field is controlled by the driver and can't be
+ * modified by the caller. If the AS_IS flag is specified, management commands
+ * will be used on portal @p to query state for frame queue @fqid and construct
+ * a frame queue object based on that, rather than assuming/requiring that it be
+ * Out of Service.
+ */
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
+
+/**
+ * qman_destroy_fq - Deallocates a FQ
+ * @fq: the frame queue object to release
+ * @flags: bit-mask of QMAN_FQ_FREE_*** options
+ *
+ * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
+ * not deallocated but the caller regains ownership, to do with as desired. The
+ * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
+ * is specified, in which case it may also be in the 'parked' state.
+ */
+void qman_destroy_fq(struct qman_fq *fq, u32 flags);
+
+/**
+ * qman_fq_fqid - Queries the frame queue ID of a FQ object
+ * @fq: the frame queue object to query
+ */
+u32 qman_fq_fqid(struct qman_fq *fq);
+
+/**
+ * qman_fq_state - Queries the state of a FQ object
+ * @fq: the frame queue object to query
+ * @state: pointer to state enum to return the FQ scheduling state
+ * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
+ *
+ * Queries the state of the FQ object, without performing any h/w commands.
+ * This captures the state, as seen by the driver, at the time the function
+ * executes.
+ */
+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
+
+/**
+ * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
+ * @fq: the frame queue object to modify, must be 'parked' or new.
+ * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
+ * @opts: the FQ-modification settings, as defined in the low-level API
+ *
+ * The @opts parameter comes from the low-level portal API. Select
+ * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
+ * rather than parked. NB, @opts can be NULL.
+ *
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver;
+ * 1. the 'count' and 'fqid' fields are always ignored (this operation only
+ * affects one frame queue: @fq).
+ * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
+ * 'fqd' structure's 'context_b' field are sometimes overwritten;
+ * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
+ * initialised to a value used by the driver for demux.
+ * - if context_b is initialised for demux, so is context_a in case stashing
+ * is requested (see item 4).
+ * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
+ * objects.)
+ * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
+ * 'dest::channel' field will be overwritten to match the portal used to issue
+ * the command. If the WE_DESTWQ write-enable bit had already been set by the
+ * caller, the channel workqueue will be left as-is, otherwise the write-enable
+ * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
+ * isn't set, the destination channel/workqueue fields and the write-enable bit
+ * are left as-is.
+ * 4. if the driver overwrites context_a/b for demux, then if
+ * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
+ * context_a.address fields and will leave the stashing fields provided by the
+ * user alone, otherwise it will zero out the context_a.stashing fields.
+ */
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
+
+/**
+ * qman_schedule_fq - Schedules a FQ
+ * @fq: the frame queue object to schedule, must be 'parked'
+ *
+ * Schedules the frame queue, which must be Parked, which takes it to
+ * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
+ */
+int qman_schedule_fq(struct qman_fq *fq);
+
+/**
+ * qman_retire_fq - Retires a FQ
+ * @fq: the frame queue object to retire
+ * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
+ *
+ * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
+ * the retirement was started asynchronously, otherwise it returns negative for
+ * failure. When this function returns zero, @flags is set to indicate whether
+ * the retired FQ is empty and/or whether it has any ORL fragments (to show up
+ * as ERNs). Otherwise the corresponding flags will be known when a subsequent
+ * FQRN message shows up on the portal's message ring.
+ *
+ * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
+ * Active state), the completion will be via the message ring as a FQRN - but
+ * the corresponding callback may occur before this function returns!! Ie. the
+ * caller should be prepared to accept the callback as the function is called,
+ * not only once it has returned.
+ */
+int qman_retire_fq(struct qman_fq *fq, u32 *flags);
+
+/**
+ * qman_oos_fq - Puts a FQ "out of service"
+ * @fq: the frame queue object to be put out-of-service, must be 'retired'
+ *
+ * The frame queue must be retired and empty, and if any order restoration list
+ * was released as ERNs at the time of retirement, they must all be consumed.
+ */
+int qman_oos_fq(struct qman_fq *fq);
+
+/**
+ * qman_fq_flow_control - Set the XON/XOFF state of a FQ
+ * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
+ * or 'retired' or 'parked' state
+ * @xon: boolean to set fq in XON or XOFF state
+ *
+ * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
+ * otherwise the IFSI interrupt will be asserted.
+ */
+int qman_fq_flow_control(struct qman_fq *fq, int xon);
+
+/**
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+/**
+ * qman_query_fq_has_pkts - Queries non-programmable FQD fields and returns '1'
+ * if packets are in the frame queue. If there are no packets on frame
+ * queue '0' is returned.
+ * @fq: the frame queue object to be queried
+ */
+int qman_query_fq_has_pkts(struct qman_fq *fq);
+
+/**
+ * qman_query_fq_np - Queries non-programmable FQD fields
+ * @fq: the frame queue object to be queried
+ * @np: storage for the queried FQD fields
+ */
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
+
+/**
+ * qman_query_fq_frmcnt - Queries fq frame count
+ * @fq: the frame queue object to be queried
+ * @frm_cnt: number of frames in the queue
+ */
+int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt);
+
+/**
+ * qman_query_wq - Queries work queue lengths
+ * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
+ * to this software portal. Otherwise, query length of WQs in a
+ * channel specified in wq.
+ * @wq: storage for the queried WQs lengths. Also specified the channel to
+ * to query if query_dedicated is zero.
+ */
+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
+
+/**
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
+ * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
+ * "flags" retrieved from qman_fq_state().
+ */
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
+
+/**
+ * qman_enqueue - Enqueue a frame to a frame queue
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ *
+ * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
+ * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
+ * field is ignored. The return value is non-zero on error, such as ring full
+ * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
+ * specified), etc. If the ring is full and FLAG_WAIT is specified, this
+ * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
+ * interrupt will assert when Qman consumes the EQCR entry (subject to "status
+ * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will
+ * perform an implied "discrete consumption acknowledgment" on the dequeue
+ * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
+ * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
+ * this implicit DCA can delay the release of a "held active" frame queue
+ * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing
+ * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
+ * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
+ * acknowledgment should "park request" the "held active" frame queue. Ie.
+ * when the portal eventually releases that frame queue, it will be left in the
+ * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
+ * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
+ * is requested, and the FQ is a member of a congestion group, then this
+ * function returns -EAGAIN if the congestion group is currently congested.
+ * Note, this does not eliminate ERNs, as the async interface means we can be
+ * sending enqueue commands to an un-congested FQ that becomes congested before
+ * the enqueue commands are processed, but it does minimise needless thrashing
+ * of an already busy hardware resource by throttling many of the to-be-dropped
+ * enqueues "at the source".
+ */
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
+
+int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,
+ int frames_to_send);
+
+/**
+ * qman_enqueue_multi_fq - Enqueue multiple frames to their respective frame
+ * queues.
+ * @fq[]: Array of frame queue objects to enqueue to
+ * @fd: pointer to first descriptor of frame to be enqueued
+ * @frames_to_send: number of frames to be sent.
+ *
+ * This API is similar to qman_enqueue_multi(), but it takes fd which needs
+ * to be processed by different frame queues.
+ */
+int
+qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
+ int frames_to_send);
+
+typedef int (*qman_cb_precommit) (void *arg);
+
+/**
+ * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ * @orp: the frame queue object used as an order restoration point.
+ * @orp_seqnum: the sequence number of this frame in the order restoration path
+ *
+ * Similar to qman_enqueue(), but with the addition of an Order Restoration
+ * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
+ * enqueue operation to employ order restoration. Each frame queue object acts
+ * as an Order Definition Point (ODP) by providing each frame dequeued from it
+ * with an incrementing sequence number, this value is generally ignored unless
+ * that sequence of dequeued frames will need order restoration later. Each
+ * frame queue object also encapsulates an Order Restoration Point (ORP), which
+ * is a re-assembly context for re-ordering frames relative to their sequence
+ * numbers as they are enqueued. The ORP does not have to be within the frame
+ * queue that receives the enqueued frame, in fact it is usually the frame
+ * queue from which the frames were originally dequeued. For the purposes of
+ * order restoration, multiple frames (or "fragments") can be enqueued for a
+ * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
+ * enqueues except the final fragment of a given sequence number. Ordering
+ * between sequence numbers is guaranteed, even if fragments of different
+ * sequence numbers are interlaced with one another. Fragments of the same
+ * sequence number will retain the order in which they are enqueued. If no
+ * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
+ * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
+ * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
+ * sequence number should become the ORP's "Next Expected Sequence Number".
+ *
+ * Side note: a frame queue object can be used purely as an ORP, without
+ * carrying any frames at all. Care should be taken not to deallocate a frame
+ * queue object that is being actively used as an ORP, as a future allocation
+ * of the frame queue object may start using the internal ORP before the
+ * previous use has finished.
+ */
+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
+ struct qman_fq *orp, u16 orp_seqnum);
+
+/**
+ * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
+ * @result: is set by the API to the base FQID of the allocated range
+ * @count: the number of FQIDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count FQIDs
+ *
+ * Returns the number of frame queues allocated, or a negative error code. If
+ * @partial is non zero, the allocation request may return a smaller range of
+ * FQs than requested (though alignment will be as requested). If @partial is
+ * zero, the return value will either be 'count' or negative.
+ */
+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_fqid(u32 *result)
+{
+ int ret = qman_alloc_fqid_range(result, 1, 0, 0);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_fqid_range - Release the specified range of frame queue IDs
+ * @fqid: the base FQID of the range to deallocate
+ * @count: the number of FQIDs in the range
+ *
+ * This function can also be used to seed the allocator with ranges of FQIDs
+ * that it can subsequently allocate from.
+ */
+void qman_release_fqid_range(u32 fqid, unsigned int count);
+static inline void qman_release_fqid(u32 fqid)
+{
+ qman_release_fqid_range(fqid, 1);
+}
+
+void qman_seed_fqid_range(u32 fqid, unsigned int count);
+
+int qman_shutdown_fq(u32 fqid);
+
+/**
+ * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
+ * @fqid: the base FQID of the range to deallocate
+ * @count: the number of FQIDs in the range
+ */
+int qman_reserve_fqid_range(u32 fqid, unsigned int count);
+static inline int qman_reserve_fqid(u32 fqid)
+{
+ return qman_reserve_fqid_range(fqid, 1);
+}
+
+/* Pool-channel management */
+/**
+ * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
+ * @result: is set by the API to the base pool-channel ID of the allocated range
+ * @count: the number of pool-channel IDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count
+ *
+ * Returns the number of pool-channel IDs allocated, or a negative error code.
+ * If @partial is non zero, the allocation request may return a smaller range of
+ * than requested (though alignment will be as requested). If @partial is zero,
+ * the return value will either be 'count' or negative.
+ */
+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_pool(u32 *result)
+{
+ int ret = qman_alloc_pool_range(result, 1, 0, 0);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_pool_range - Release the specified range of pool-channel IDs
+ * @id: the base pool-channel ID of the range to deallocate
+ * @count: the number of pool-channel IDs in the range
+ */
+void qman_release_pool_range(u32 id, unsigned int count);
+static inline void qman_release_pool(u32 id)
+{
+ qman_release_pool_range(id, 1);
+}
+
+/**
+ * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
+ * @id: the base pool-channel ID of the range to reserve
+ * @count: the number of pool-channel IDs in the range
+ */
+int qman_reserve_pool_range(u32 id, unsigned int count);
+static inline int qman_reserve_pool(u32 id)
+{
+ return qman_reserve_pool_range(id, 1);
+}
+
+void qman_seed_pool_range(u32 id, unsigned int count);
+
+ /* CGR management */
+ /* -------------- */
+/**
+ * qman_create_cgr - Register a congestion group object
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: optional state of CGR settings
+ *
+ * Registers this object to receiving congestion entry/exit callbacks on the
+ * portal affine to the cpu portal on which this API is executed. If opts is
+ * NULL then only the callback (cgr->cb) function is registered. If @flags
+ * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
+ * any unspecified parameters) will be used rather than a modify hw hardware
+ * (which only modifies the specified parameters).
+ */
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @dcp_portal: the DCP portal to which the cgr object is registered.
+ * @opts: optional state of CGR settings
+ *
+ */
+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
+ struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_delete_cgr - Deregisters a congestion group object
+ * @cgr: the 'cgr' object to deregister
+ *
+ * "Unplugs" this CGR object from the portal affine to the cpu on which this API
+ * is executed. This must be excuted on the same affine portal on which it was
+ * created.
+ */
+int qman_delete_cgr(struct qman_cgr *cgr);
+
+/**
+ * qman_modify_cgr - Modify CGR fields
+ * @cgr: the 'cgr' object to modify
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: the CGR-modification settings
+ *
+ * The @opts parameter comes from the low-level portal API, and can be NULL.
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver, in particular the 'cgrid' field is ignored (this operation
+ * only affects the given CGR object). If @flags contains
+ * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
+ * unspecified parameters) will be used rather than a modify hw hardware (which
+ * only modifies the specified parameters).
+ */
+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_query_cgr - Queries CGR fields
+ * @cgr: the 'cgr' object to query
+ * @result: storage for the queried congestion group record
+ */
+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
+
+/**
+ * qman_query_congestion - Queries the state of all congestion groups
+ * @congestion: storage for the queried state of all congestion groups
+ */
+int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
+
+/**
+ * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
+ * @result: is set by the API to the base CGR ID of the allocated range
+ * @count: the number of CGR IDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count
+ *
+ * Returns the number of CGR IDs allocated, or a negative error code.
+ * If @partial is non zero, the allocation request may return a smaller range of
+ * than requested (though alignment will be as requested). If @partial is zero,
+ * the return value will either be 'count' or negative.
+ */
+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_cgrid(u32 *result)
+{
+ int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_cgrid_range - Release the specified range of CGR IDs
+ * @id: the base CGR ID of the range to deallocate
+ * @count: the number of CGR IDs in the range
+ */
+void qman_release_cgrid_range(u32 id, unsigned int count);
+static inline void qman_release_cgrid(u32 id)
+{
+ qman_release_cgrid_range(id, 1);
+}
+
+/**
+ * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
+ * @id: the base CGR ID of the range to reserve
+ * @count: the number of CGR IDs in the range
+ */
+int qman_reserve_cgrid_range(u32 id, unsigned int count);
+static inline int qman_reserve_cgrid(u32 id)
+{
+ return qman_reserve_cgrid_range(id, 1);
+}
+
+void qman_seed_cgrid_range(u32 id, unsigned int count);
+
+ /* Helpers */
+ /* ------- */
+/**
+ * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
+ * @fqid: the FQID that will be initialised by other s/w
+ *
+ * In many situations, a FQID is provided for communication between s/w
+ * entities, and whilst the consumer is responsible for initialising and
+ * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
+ * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
+ * qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
+ * However, data can not be enqueued to the FQ until it is initialised out of
+ * the OOS state - this function polls for that condition. It is particularly
+ * useful for users of IPC functions - each endpoint's Rx FQ is the other
+ * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
+ * and then use this API on the (NO_MODIFY) Tx FQ object in order to
+ * synchronise. The function returns zero for success, +1 if the FQ is still in
+ * the OOS state, or negative if there was an error.
+ */
+static inline int qman_poll_fq_for_init(struct qman_fq *fq)
+{
+ struct qm_mcr_queryfq_np np;
+ int err;
+
+ err = qman_query_fq_np(fq, &np);
+ if (err)
+ return err;
+ if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
+ return 1;
+ return 0;
+}
+
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define cpu_to_hw_sg(x)
+#define hw_sg_to_cpu(x)
+#else
+#define cpu_to_hw_sg(x) __cpu_to_hw_sg(x)
+#define hw_sg_to_cpu(x) __hw_sg_to_cpu(x)
+
+static inline void __cpu_to_hw_sg(struct qm_sg_entry *sgentry)
+{
+ sgentry->opaque = cpu_to_be64(sgentry->opaque);
+ sgentry->val = cpu_to_be32(sgentry->val);
+ sgentry->val_off = cpu_to_be16(sgentry->val_off);
+}
+
+static inline void __hw_sg_to_cpu(struct qm_sg_entry *sgentry)
+{
+ sgentry->opaque = be64_to_cpu(sgentry->opaque);
+ sgentry->val = be32_to_cpu(sgentry->val);
+ sgentry->val_off = be16_to_cpu(sgentry->val_off);
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FSL_QMAN_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_usd.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_usd.h
new file mode 100644
index 00000000..e1836175
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_usd.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ */
+
+#ifndef __FSL_USD_H
+#define __FSL_USD_H
+
+#include <compat.h>
+#include <fsl_qman.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Thread-entry/exit hooks; */
+int qman_thread_init(void);
+int bman_thread_init(void);
+int qman_thread_finish(void);
+int bman_thread_finish(void);
+
+#define QBMAN_ANY_PORTAL_IDX 0xffffffff
+
+/* Obtain and free raw (unitialized) portals */
+
+struct dpaa_raw_portal {
+ /* inputs */
+
+ /* set to non zero to turn on stashing */
+ uint8_t enable_stash;
+ /* Stashing attributes for the portal */
+ uint32_t cpu;
+ uint32_t cache;
+ uint32_t window;
+
+ /* Specifies the stash request queue this portal should use */
+ uint8_t sdest;
+
+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ * for don't care. The portal index will be populated by the
+ * driver when the ioctl() successfully completes.
+ */
+ uint32_t index;
+
+ /* outputs */
+ uint64_t cinh;
+ uint64_t cena;
+};
+
+int qman_allocate_raw_portal(struct dpaa_raw_portal *portal);
+int qman_free_raw_portal(struct dpaa_raw_portal *portal);
+
+int bman_allocate_raw_portal(struct dpaa_raw_portal *portal);
+int bman_free_raw_portal(struct dpaa_raw_portal *portal);
+
+/* Post-process interrupts. NB, the kernel IRQ handler disables the interrupt
+ * line before notifying us, and this post-processing re-enables it once
+ * processing is complete. As such, it is essential to call this before going
+ * into another blocking read/select/poll.
+ */
+void qman_thread_irq(void);
+void bman_thread_irq(void);
+
+/* Global setup */
+int qman_global_init(void);
+int bman_global_init(void);
+
+/* Direct portal create and destroy */
+struct qman_portal *fsl_qman_portal_create(void);
+int fsl_qman_portal_destroy(struct qman_portal *qp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FSL_USD_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/netcfg.h b/src/spdk/dpdk/drivers/bus/dpaa/include/netcfg.h
new file mode 100644
index 00000000..7818de68
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/netcfg.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2012 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ */
+
+#ifndef __NETCFG_H
+#define __NETCFG_H
+
+#include <fman.h>
+#include <argp.h>
+
+/* Configuration information related to a specific ethernet port */
+struct fm_eth_port_cfg {
+ /**< A list of PCD FQ ranges, obtained from FMC configuration */
+ struct list_head *list;
+ /**< The "Rx default" FQID, obtained from FMC configuration */
+ uint32_t rx_def;
+ /**< Other interface details are in the fman driver interface */
+ struct fman_if *fman_if;
+};
+
+struct netcfg_info {
+ uint8_t num_ethports;
+ /**< Number of ports */
+ struct fm_eth_port_cfg port_cfg[0];
+ /**< Variable structure array of size num_ethports */
+};
+
+struct interface_info {
+ char *name;
+ struct ether_addr mac_addr;
+ struct ether_addr peer_mac;
+ int mac_present;
+ int fman_enabled_mac_interface;
+};
+
+struct netcfg_interface {
+ uint8_t numof_netcfg_interface;
+ uint8_t numof_fman_enabled_macless;
+ struct interface_info interface_info[0];
+};
+
+/* pcd_file: FMC netpcd XML ("policy") file, that contains PCD information.
+ * cfg_file: FMC config XML file
+ * Returns the configuration information in newly allocated memory.
+ */
+struct netcfg_info *netcfg_acquire(void);
+
+/* cfg_ptr: configuration information pointer.
+ * Frees the resources allocated by the configuration layer.
+ */
+void netcfg_release(struct netcfg_info *cfg_ptr);
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+/* cfg_ptr: configuration information pointer.
+ * This function dumps configuration data to stdout.
+ */
+void dump_netcfg(struct netcfg_info *cfg_ptr);
+#endif
+
+#endif /* __NETCFG_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/of.h b/src/spdk/dpdk/drivers/bus/dpaa/include/of.h
new file mode 100644
index 00000000..7ea7608f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/of.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __OF_H
+#define __OF_H
+
+#include <compat.h>
+
+#ifndef OF_INIT_DEFAULT_PATH
+#define OF_INIT_DEFAULT_PATH "/proc/device-tree"
+#endif
+
+#define OF_DEFAULT_NA 1
+#define OF_DEFAULT_NS 1
+
+#define OF_FILE_BUF_MAX 256
+
+/**
+ * Layout of Device Tree:
+ * dt_dir
+ * |- dt_dir
+ * | |- dt_dir
+ * | | |- dt_dir
+ * | | | |- dt_file
+ * | | | ``- dt_file
+ * | | ``- dt_file
+ * | `-dt_file`
+ * ``- dt_file
+ *
+ * +------------------+
+ * |dt_dir |
+ * |+----------------+|
+ * ||dt_node ||
+ * ||+--------------+||
+ * |||device_node |||
+ * ||+--------------+||
+ * || list_dt_nodes ||
+ * |+----------------+|
+ * | list of subdir |
+ * | list of files |
+ * +------------------+
+ */
+
+/**
+ * Device description on of a device node in device tree.
+ */
+struct device_node {
+ char name[NAME_MAX];
+ char full_name[PATH_MAX];
+};
+
+/**
+ * List of device nodes available in a device tree layout
+ */
+struct dt_node {
+ struct device_node node; /**< Property of node */
+ int is_file; /**< FALSE==dir, TRUE==file */
+ struct list_head list; /**< Nodes within a parent subdir */
+};
+
+/**
+ * Types we use to represent directories and files
+ */
+struct dt_file;
+struct dt_dir {
+ struct dt_node node;
+ struct list_head subdirs;
+ struct list_head files;
+ struct list_head linear;
+ struct dt_dir *parent;
+ struct dt_file *compatible;
+ struct dt_file *status;
+ struct dt_file *lphandle;
+ struct dt_file *a_cells;
+ struct dt_file *s_cells;
+ struct dt_file *reg;
+};
+
+struct dt_file {
+ struct dt_node node;
+ struct dt_dir *parent;
+ ssize_t len;
+ uint64_t buf[OF_FILE_BUF_MAX >> 3];
+};
+
+const struct device_node *of_find_compatible_node(
+ const struct device_node *from,
+ const char *type __always_unused,
+ const char *compatible)
+ __attribute__((nonnull(3)));
+
+#define for_each_compatible_node(dev_node, type, compatible) \
+ for (dev_node = of_find_compatible_node(NULL, type, compatible); \
+ dev_node != NULL; \
+ dev_node = of_find_compatible_node(dev_node, type, compatible))
+
+const void *of_get_property(const struct device_node *from, const char *name,
+ size_t *lenp) __attribute__((nonnull(2)));
+bool of_device_is_available(const struct device_node *dev_node);
+
+const struct device_node *of_find_node_by_phandle(phandle ph);
+
+const struct device_node *of_get_parent(const struct device_node *dev_node);
+
+const struct device_node *of_get_next_child(const struct device_node *dev_node,
+ const struct device_node *prev);
+
+const void *of_get_mac_address(const struct device_node *np);
+
+#define for_each_child_node(parent, child) \
+ for (child = of_get_next_child(parent, NULL); child != NULL; \
+ child = of_get_next_child(parent, child))
+
+uint32_t of_n_addr_cells(const struct device_node *dev_node);
+uint32_t of_n_size_cells(const struct device_node *dev_node);
+
+const uint32_t *of_get_address(const struct device_node *dev_node, size_t idx,
+ uint64_t *size, uint32_t *flags);
+
+uint64_t of_translate_address(const struct device_node *dev_node,
+ const u32 *addr) __attribute__((nonnull));
+
+bool of_device_is_compatible(const struct device_node *dev_node,
+ const char *compatible);
+
+/* of_init() must be called prior to initialisation or use of any driver
+ * subsystem that is device-tree-dependent. Eg. Qman/Bman, config layers, etc.
+ * The path should usually be "/proc/device-tree".
+ */
+int of_init_path(const char *dt_path);
+
+/* of_finish() allows a controlled tear-down of the device-tree layer, eg. if a
+ * full reload is desired without a process exit.
+ */
+void of_finish(void);
+
+/* Use of this wrapper is recommended. */
+static inline int of_init(void)
+{
+ return of_init_path(OF_INIT_DEFAULT_PATH);
+}
+
+/* Read a numeric property according to its size and return it as a 64-bit
+ * value.
+ */
+static inline uint64_t of_read_number(const __be32 *cell, int size)
+{
+ uint64_t r = 0;
+
+ while (size--)
+ r = (r << 32) | be32toh(*(cell++));
+ return r;
+}
+
+#endif /* __OF_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/process.h b/src/spdk/dpdk/drivers/bus/dpaa/include/process.h
new file mode 100644
index 00000000..d9ec94ee
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/process.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ */
+
+#ifndef __PROCESS_H
+#define __PROCESS_H
+
+#include <compat.h>
+
+/* The process device underlies process-wide user/kernel interactions, such as
+ * mapping dma_mem memory and providing accompanying ioctl()s. (This isn't used
+ * for portals, which use one UIO device each.).
+ */
+#define PROCESS_PATH "/dev/fsl-usdpaa"
+
+/* Allocation of resource IDs uses a generic interface. This enum is used to
+ * distinguish between the type of underlying object being manipulated.
+ */
+enum dpaa_id_type {
+ dpaa_id_fqid,
+ dpaa_id_bpid,
+ dpaa_id_qpool,
+ dpaa_id_cgrid,
+ dpaa_id_max /* <-- not a valid type, represents the number of types */
+};
+
+int process_alloc(enum dpaa_id_type id_type, uint32_t *base, uint32_t num,
+ uint32_t align, int partial);
+void process_release(enum dpaa_id_type id_type, uint32_t base, uint32_t num);
+
+int process_reserve(enum dpaa_id_type id_type, uint32_t base, uint32_t num);
+
+/* Mapping and using QMan/BMan portals */
+enum dpaa_portal_type {
+ dpaa_portal_qman,
+ dpaa_portal_bman,
+};
+
+struct dpaa_portal_map {
+ void *cinh;
+ void *cena;
+};
+
+struct dpaa_ioctl_portal_map {
+ /* Input parameter, is a qman or bman portal required. */
+ enum dpaa_portal_type type;
+ /* Specifes a specific portal index to map or 0xffffffff
+ * for don't care.
+ */
+ uint32_t index;
+
+ /* Return value if the map succeeds, this gives the mapped
+ * cache-inhibited (cinh) and cache-enabled (cena) addresses.
+ */
+ struct dpaa_portal_map addr;
+
+ /* Qman-specific return values */
+ u16 channel;
+ uint32_t pools;
+};
+
+int process_portal_map(struct dpaa_ioctl_portal_map *params);
+int process_portal_unmap(struct dpaa_portal_map *map);
+
+struct dpaa_ioctl_irq_map {
+ enum dpaa_portal_type type; /* Type of portal to map */
+ int fd; /* File descriptor that contains the portal */
+ void *portal_cinh; /* Cache inhibited area to identify the portal */
+};
+
+int process_portal_irq_map(int fd, struct dpaa_ioctl_irq_map *irq);
+int process_portal_irq_unmap(int fd);
+
+#endif /* __PROCESS_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/meson.build b/src/spdk/dpdk/drivers/bus/dpaa/meson.build
new file mode 100644
index 00000000..d10b62c0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/meson.build
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['eventdev']
+sources = files('base/fman/fman.c',
+ 'base/fman/fman_hw.c',
+ 'base/fman/netcfg_layer.c',
+ 'base/fman/of.c',
+ 'base/qbman/bman.c',
+ 'base/qbman/bman_driver.c',
+ 'base/qbman/dpaa_alloc.c',
+ 'base/qbman/dpaa_sys.c',
+ 'base/qbman/process.c',
+ 'base/qbman/qman.c',
+ 'base/qbman/qman_driver.c',
+ 'dpaa_bus.c')
+
+allow_experimental_apis = true
+
+if cc.has_argument('-Wno-cast-qual')
+ cflags += '-Wno-cast-qual'
+endif
+
+includes += include_directories('include', 'base/qbman')
+cflags += ['-D_GNU_SOURCE']
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/rte_bus_dpaa_version.map b/src/spdk/dpdk/drivers/bus/dpaa/rte_bus_dpaa_version.map
new file mode 100644
index 00000000..7d6d6243
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/rte_bus_dpaa_version.map
@@ -0,0 +1,104 @@
+DPDK_17.11 {
+ global:
+
+ bman_acquire;
+ bman_free_pool;
+ bman_get_params;
+ bman_global_init;
+ bman_new_pool;
+ bman_query_free_buffers;
+ bman_release;
+ dpaa_logtype_mempool;
+ dpaa_logtype_pmd;
+ dpaa_netcfg;
+ fman_ccsr_map_fd;
+ fman_dealloc_bufs_mask_hi;
+ fman_dealloc_bufs_mask_lo;
+ fman_if_add_mac_addr;
+ fman_if_clear_mac_addr;
+ fman_if_disable_rx;
+ fman_if_enable_rx;
+ fman_if_discard_rx_errors;
+ fman_if_get_fc_threshold;
+ fman_if_get_fc_quanta;
+ fman_if_get_fdoff;
+ fman_if_loopback_disable;
+ fman_if_loopback_enable;
+ fman_if_promiscuous_disable;
+ fman_if_promiscuous_enable;
+ fman_if_reset_mcast_filter_table;
+ fman_if_set_bp;
+ fman_if_set_fc_threshold;
+ fman_if_set_fc_quanta;
+ fman_if_set_fdoff;
+ fman_if_set_ic_params;
+ fman_if_set_maxfrm;
+ fman_if_set_mcast_filter_table;
+ fman_if_stats_get;
+ fman_if_stats_get_all;
+ fman_if_stats_reset;
+ fman_ip_rev;
+ netcfg_acquire;
+ netcfg_release;
+ of_find_compatible_node;
+ of_get_property;
+ qm_channel_caam;
+ qman_create_fq;
+ qman_dequeue;
+ qman_dqrr_consume;
+ qman_enqueue;
+ qman_enqueue_multi;
+ qman_fq_fqid;
+ qman_fq_state;
+ qman_global_init;
+ qman_init_fq;
+ qman_poll_dqrr;
+ qman_query_fq_np;
+ qman_set_vdq;
+ qman_reserve_fqid_range;
+ qman_volatile_dequeue;
+ rte_dpaa_driver_register;
+ rte_dpaa_driver_unregister;
+ rte_dpaa_mem_ptov;
+ rte_dpaa_portal_init;
+
+ local: *;
+};
+
+DPDK_18.02 {
+ global:
+
+ dpaa_logtype_eventdev;
+ dpaa_svr_family;
+ per_lcore_dpaa_io;
+ per_lcore_held_bufs;
+ qm_channel_pool1;
+ qman_alloc_cgrid_range;
+ qman_alloc_pool_range;
+ qman_create_cgr;
+ qman_dca_index;
+ qman_delete_cgr;
+ qman_enqueue_multi_fq;
+ qman_modify_cgr;
+ qman_oos_fq;
+ qman_portal_dequeue;
+ qman_portal_poll_rx;
+ qman_query_fq_frm_cnt;
+ qman_release_cgrid_range;
+ qman_retire_fq;
+ qman_static_dequeue_add;
+ rte_dpaa_portal_fq_close;
+ rte_dpaa_portal_fq_init;
+
+ local: *;
+} DPDK_17.11;
+
+DPDK_18.08 {
+ global:
+
+ fman_if_get_sg_enable;
+ fman_if_set_sg;
+ of_get_mac_address;
+
+ local: *;
+} DPDK_18.02;
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_bus.h b/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_bus.h
new file mode 100644
index 00000000..15dc6a4a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_bus.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+#ifndef __RTE_DPAA_BUS_H__
+#define __RTE_DPAA_BUS_H__
+
+#include <rte_bus.h>
+#include <rte_mempool.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
+
+#define DPAA_MEMPOOL_OPS_NAME "dpaa"
+
+#define DEV_TO_DPAA_DEVICE(ptr) \
+ container_of(ptr, struct rte_dpaa_device, device)
+
+/* DPAA SoC identifier; If this is not available, it can be concluded
+ * that board is non-DPAA. Single slot is currently supported.
+ */
+#define DPAA_SOC_ID_FILE "/sys/devices/soc0/soc_id"
+
+#define SVR_LS1043A_FAMILY 0x87920000
+#define SVR_LS1046A_FAMILY 0x87070000
+#define SVR_MASK 0xffff0000
+
+extern unsigned int dpaa_svr_family;
+
+extern RTE_DEFINE_PER_LCORE(bool, dpaa_io);
+
+struct rte_dpaa_device;
+struct rte_dpaa_driver;
+
+/* DPAA Device and Driver lists for DPAA bus */
+TAILQ_HEAD(rte_dpaa_device_list, rte_dpaa_device);
+TAILQ_HEAD(rte_dpaa_driver_list, rte_dpaa_driver);
+
+/* Configuration variables exported from DPAA bus */
+extern struct netcfg_info *dpaa_netcfg;
+
+enum rte_dpaa_type {
+ FSL_DPAA_ETH = 1,
+ FSL_DPAA_CRYPTO,
+};
+
+struct rte_dpaa_bus {
+ struct rte_bus bus;
+ struct rte_dpaa_device_list device_list;
+ struct rte_dpaa_driver_list driver_list;
+ int device_count;
+};
+
+struct dpaa_device_id {
+ uint8_t fman_id; /**< Fman interface ID, for ETH type device */
+ uint8_t mac_id; /**< Fman MAC interface ID, for ETH type device */
+ uint16_t dev_id; /**< Device Identifier from DPDK */
+};
+
+struct rte_dpaa_device {
+ TAILQ_ENTRY(rte_dpaa_device) next;
+ struct rte_device device;
+ union {
+ struct rte_eth_dev *eth_dev;
+ struct rte_cryptodev *crypto_dev;
+ };
+ struct rte_dpaa_driver *driver;
+ struct dpaa_device_id id;
+ enum rte_dpaa_type device_type; /**< Ethernet or crypto type device */
+ char name[RTE_ETH_NAME_MAX_LEN];
+};
+
+typedef int (*rte_dpaa_probe_t)(struct rte_dpaa_driver *dpaa_drv,
+ struct rte_dpaa_device *dpaa_dev);
+typedef int (*rte_dpaa_remove_t)(struct rte_dpaa_device *dpaa_dev);
+
+struct rte_dpaa_driver {
+ TAILQ_ENTRY(rte_dpaa_driver) next;
+ struct rte_driver driver;
+ struct rte_dpaa_bus *dpaa_bus;
+ enum rte_dpaa_type drv_type;
+ rte_dpaa_probe_t probe;
+ rte_dpaa_remove_t remove;
+};
+
+struct dpaa_portal {
+ uint32_t bman_idx; /**< BMAN Portal ID*/
+ uint32_t qman_idx; /**< QMAN Portal ID*/
+ uint64_t tid;/**< Parent Thread id for this portal */
+};
+
+/* Various structures representing contiguous memory maps */
+struct dpaa_memseg {
+ TAILQ_ENTRY(dpaa_memseg) next;
+ char *vaddr;
+ rte_iova_t iova;
+ size_t len;
+};
+
+TAILQ_HEAD(dpaa_memseg_list, dpaa_memseg);
+extern struct dpaa_memseg_list rte_dpaa_memsegs;
+
+/* Either iterate over the list of internal memseg references or fallback to
+ * EAL memseg based iova2virt.
+ */
+static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr)
+{
+ struct dpaa_memseg *ms;
+
+ /* Check if the address is already part of the memseg list internally
+ * maintained by the dpaa driver.
+ */
+ TAILQ_FOREACH(ms, &rte_dpaa_memsegs, next) {
+ if (paddr >= ms->iova && paddr <
+ ms->iova + ms->len)
+ return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova));
+ }
+
+ /* If not, Fallback to full memseg list searching */
+ return rte_mem_iova2virt(paddr);
+}
+
+/**
+ * Register a DPAA driver.
+ *
+ * @param driver
+ * A pointer to a rte_dpaa_driver structure describing the driver
+ * to be registered.
+ */
+void rte_dpaa_driver_register(struct rte_dpaa_driver *driver);
+
+/**
+ * Unregister a DPAA driver.
+ *
+ * @param driver
+ * A pointer to a rte_dpaa_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver);
+
+/**
+ * Initialize a DPAA portal
+ *
+ * @param arg
+ * Per thread ID
+ *
+ * @return
+ * 0 in case of success, error otherwise
+ */
+int rte_dpaa_portal_init(void *arg);
+
+int rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq);
+
+int rte_dpaa_portal_fq_close(struct qman_fq *fq);
+
+/**
+ * Cleanup a DPAA Portal
+ */
+void dpaa_portal_finish(void *arg);
+
+/** Helper for DPAA device registration from driver (eth, crypto) instance */
+#define RTE_PMD_REGISTER_DPAA(nm, dpaa_drv) \
+RTE_INIT(dpaainitfn_ ##nm) \
+{\
+ (dpaa_drv).driver.name = RTE_STR(nm);\
+ rte_dpaa_driver_register(&dpaa_drv); \
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+/* Create storage for dqrr entries per lcore */
+#define DPAA_PORTAL_DEQUEUE_DEPTH 16
+struct dpaa_portal_dqrr {
+ void *mbuf[DPAA_PORTAL_DEQUEUE_DEPTH];
+ uint64_t dqrr_held;
+ uint8_t dqrr_size;
+};
+
+RTE_DECLARE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
+
+#define DPAA_PER_LCORE_DQRR_SIZE RTE_PER_LCORE(held_bufs).dqrr_size
+#define DPAA_PER_LCORE_DQRR_HELD RTE_PER_LCORE(held_bufs).dqrr_held
+#define DPAA_PER_LCORE_DQRR_MBUF(i) RTE_PER_LCORE(held_bufs).mbuf[i]
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_DPAA_BUS_H__ */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_logs.h b/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_logs.h
new file mode 100644
index 00000000..e4143543
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_logs.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef _DPAA_LOGS_H_
+#define _DPAA_LOGS_H_
+
+#include <rte_log.h>
+
+extern int dpaa_logtype_bus;
+extern int dpaa_logtype_mempool;
+extern int dpaa_logtype_pmd;
+extern int dpaa_logtype_eventdev;
+
+#define DPAA_BUS_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_bus, "dpaa: " fmt "\n", ##args)
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
+#define DPAA_BUS_HWWARN(cond, fmt, args...) \
+ do {\
+ if (cond) \
+ DPAA_BUS_LOG(DEBUG, "WARN: " fmt, ##args); \
+ } while (0)
+#else
+#define DPAA_BUS_HWWARN(cond, fmt, args...) do { } while (0)
+#endif
+
+#define DPAA_BUS_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa_logtype_bus, "dpaa: %s(): " fmt "\n", \
+ __func__, ##args)
+
+#define BUS_INIT_FUNC_TRACE() DPAA_BUS_DEBUG(" >>")
+
+#define DPAA_BUS_INFO(fmt, args...) \
+ DPAA_BUS_LOG(INFO, fmt, ## args)
+#define DPAA_BUS_ERR(fmt, args...) \
+ DPAA_BUS_LOG(ERR, fmt, ## args)
+#define DPAA_BUS_WARN(fmt, args...) \
+ DPAA_BUS_LOG(WARNING, fmt, ## args)
+
+/* Mempool related logs */
+
+#define DPAA_MEMPOOL_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_mempool, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define MEMPOOL_INIT_FUNC_TRACE() DPAA_MEMPOOL_LOG(DEBUG, " >>")
+
+#define DPAA_MEMPOOL_DPDEBUG(fmt, args...) \
+ RTE_LOG_DP(DEBUG, PMD, fmt, ## args)
+#define DPAA_MEMPOOL_DEBUG(fmt, args...) \
+ DPAA_MEMPOOL_LOG(DEBUG, fmt, ## args)
+#define DPAA_MEMPOOL_ERR(fmt, args...) \
+ DPAA_MEMPOOL_LOG(ERR, fmt, ## args)
+#define DPAA_MEMPOOL_INFO(fmt, args...) \
+ DPAA_MEMPOOL_LOG(INFO, fmt, ## args)
+#define DPAA_MEMPOOL_WARN(fmt, args...) \
+ DPAA_MEMPOOL_LOG(WARNING, fmt, ## args)
+
+/* PMD related logs */
+
+#define DPAA_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_pmd, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() DPAA_PMD_LOG(DEBUG, " >>")
+
+#define DPAA_PMD_DEBUG(fmt, args...) \
+ DPAA_PMD_LOG(DEBUG, fmt, ## args)
+#define DPAA_PMD_ERR(fmt, args...) \
+ DPAA_PMD_LOG(ERR, fmt, ## args)
+#define DPAA_PMD_INFO(fmt, args...) \
+ DPAA_PMD_LOG(INFO, fmt, ## args)
+#define DPAA_PMD_WARN(fmt, args...) \
+ DPAA_PMD_LOG(WARNING, fmt, ## args)
+
+#define DPAA_EVENTDEV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_eventdev, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define EVENTDEV_INIT_FUNC_TRACE() DPAA_EVENTDEV_LOG(DEBUG, " >>")
+
+#define DPAA_EVENTDEV_DEBUG(fmt, args...) \
+ DPAA_EVENTDEV_LOG(DEBUG, fmt, ## args)
+#define DPAA_EVENTDEV_ERR(fmt, args...) \
+ DPAA_EVENTDEV_LOG(ERR, fmt, ## args)
+#define DPAA_EVENTDEV_INFO(fmt, args...) \
+ DPAA_EVENTDEV_LOG(INFO, fmt, ## args)
+#define DPAA_EVENTDEV_WARN(fmt, args...) \
+ DPAA_EVENTDEV_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#endif /* _DPAA_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/Makefile b/src/spdk/dpdk/drivers/bus/fslmc/Makefile
new file mode 100644
index 00000000..515d0f53
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/Makefile
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 NXP
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_bus_fslmc.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev
+
+# versioning export map
+EXPORT_MAP := rte_bus_fslmc_version.map
+
+# library version
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
+ qbman/qbman_portal.c \
+ qbman/qbman_debug.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
+ mc/dpmng.c \
+ mc/dpbp.c \
+ mc/dpio.c \
+ mc/mc_sys.c \
+ mc/dpcon.c \
+ mc/dpci.c \
+ mc/dpdmai.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpio.c
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpbp.c
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpci.c
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc_vfio.c
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc_bus.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/fslmc_bus.c b/src/spdk/dpdk/drivers/bus/fslmc/fslmc_bus.c
new file mode 100644
index 00000000..d2900edc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/fslmc_bus.c
@@ -0,0 +1,528 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2016 NXP
+ *
+ */
+
+#include <string.h>
+#include <dirent.h>
+#include <stdbool.h>
+
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+#include <rte_devargs.h>
+#include <rte_memcpy.h>
+#include <rte_ethdev_driver.h>
+
+#include <rte_fslmc.h>
+#include <fslmc_vfio.h>
+#include "fslmc_logs.h"
+
+int dpaa2_logtype_bus;
+
+#define VFIO_IOMMU_GROUP_PATH "/sys/kernel/iommu_groups"
+#define FSLMC_BUS_NAME fslmc
+
+struct rte_fslmc_bus rte_fslmc_bus;
+uint8_t dpaa2_virt_mode;
+
+uint32_t
+rte_fslmc_get_device_count(enum rte_dpaa2_dev_type device_type)
+{
+ if (device_type > DPAA2_DEVTYPE_MAX)
+ return 0;
+ return rte_fslmc_bus.device_count[device_type];
+}
+
+RTE_DEFINE_PER_LCORE(struct dpaa2_portal_dqrr, dpaa2_held_bufs);
+
+static void
+cleanup_fslmc_device_list(void)
+{
+ struct rte_dpaa2_device *dev;
+ struct rte_dpaa2_device *t_dev;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, t_dev) {
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ }
+}
+
+static int
+compare_dpaa2_devname(struct rte_dpaa2_device *dev1,
+ struct rte_dpaa2_device *dev2)
+{
+ int comp;
+
+ if (dev1->dev_type > dev2->dev_type) {
+ comp = 1;
+ } else if (dev1->dev_type < dev2->dev_type) {
+ comp = -1;
+ } else {
+ /* Check the ID as types match */
+ if (dev1->object_id > dev2->object_id)
+ comp = 1;
+ else if (dev1->object_id < dev2->object_id)
+ comp = -1;
+ else
+ comp = 0; /* Duplicate device name */
+ }
+
+ return comp;
+}
+
+static void
+insert_in_device_list(struct rte_dpaa2_device *newdev)
+{
+ int comp, inserted = 0;
+ struct rte_dpaa2_device *dev = NULL;
+ struct rte_dpaa2_device *tdev = NULL;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, tdev) {
+ comp = compare_dpaa2_devname(newdev, dev);
+ if (comp < 0) {
+ TAILQ_INSERT_BEFORE(dev, newdev, next);
+ inserted = 1;
+ break;
+ }
+ }
+
+ if (!inserted)
+ TAILQ_INSERT_TAIL(&rte_fslmc_bus.device_list, newdev, next);
+}
+
+static struct rte_devargs *
+fslmc_devargs_lookup(struct rte_dpaa2_device *dev)
+{
+ struct rte_devargs *devargs;
+ char dev_name[32];
+
+ RTE_EAL_DEVARGS_FOREACH("fslmc", devargs) {
+ devargs->bus->parse(devargs->name, &dev_name);
+ if (strcmp(dev_name, dev->device.name) == 0) {
+ DPAA2_BUS_INFO("**Devargs matched %s", dev_name);
+ return devargs;
+ }
+ }
+ return NULL;
+}
+
+static void
+dump_device_list(void)
+{
+ struct rte_dpaa2_device *dev;
+ uint32_t global_log_level;
+ int local_log_level;
+
+ /* Only if the log level has been set to Debugging, print list */
+ global_log_level = rte_log_get_global_level();
+ local_log_level = rte_log_get_level(dpaa2_logtype_bus);
+ if (global_log_level == RTE_LOG_DEBUG ||
+ local_log_level == RTE_LOG_DEBUG) {
+ DPAA2_BUS_LOG(DEBUG, "List of devices scanned on bus:");
+ TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
+ DPAA2_BUS_LOG(DEBUG, "\t\t%s", dev->device.name);
+ }
+ }
+}
+
+static int
+scan_one_fslmc_device(char *dev_name)
+{
+ char *dup_dev_name, *t_ptr;
+ struct rte_dpaa2_device *dev;
+
+ if (!dev_name)
+ return -1;
+
+ /* Ignore the Container name itself */
+ if (!strncmp("dprc", dev_name, 4))
+ return 0;
+
+ /* Creating a temporary copy to perform cut-parse over string */
+ dup_dev_name = strdup(dev_name);
+ if (!dup_dev_name) {
+ DPAA2_BUS_ERR("Unable to allocate device name memory");
+ return -ENOMEM;
+ }
+
+ /* For all other devices, we allocate rte_dpaa2_device.
+ * For those devices where there is no driver, probe would release
+ * the memory associated with the rte_dpaa2_device after necessary
+ * initialization.
+ */
+ dev = calloc(1, sizeof(struct rte_dpaa2_device));
+ if (!dev) {
+ DPAA2_BUS_ERR("Unable to allocate device object");
+ free(dup_dev_name);
+ return -ENOMEM;
+ }
+
+ /* Parse the device name and ID */
+ t_ptr = strtok(dup_dev_name, ".");
+ if (!t_ptr) {
+ DPAA2_BUS_ERR("Incorrect device name observed");
+ goto cleanup;
+ }
+ if (!strncmp("dpni", t_ptr, 4))
+ dev->dev_type = DPAA2_ETH;
+ else if (!strncmp("dpseci", t_ptr, 6))
+ dev->dev_type = DPAA2_CRYPTO;
+ else if (!strncmp("dpcon", t_ptr, 5))
+ dev->dev_type = DPAA2_CON;
+ else if (!strncmp("dpbp", t_ptr, 4))
+ dev->dev_type = DPAA2_BPOOL;
+ else if (!strncmp("dpio", t_ptr, 4))
+ dev->dev_type = DPAA2_IO;
+ else if (!strncmp("dpci", t_ptr, 4))
+ dev->dev_type = DPAA2_CI;
+ else if (!strncmp("dpmcp", t_ptr, 5))
+ dev->dev_type = DPAA2_MPORTAL;
+ else if (!strncmp("dpdmai", t_ptr, 6))
+ dev->dev_type = DPAA2_QDMA;
+ else
+ dev->dev_type = DPAA2_UNKNOWN;
+
+ /* Update the device found into the device_count table */
+ rte_fslmc_bus.device_count[dev->dev_type]++;
+
+ t_ptr = strtok(NULL, ".");
+ if (!t_ptr) {
+ DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr);
+ goto cleanup;
+ }
+
+ sscanf(t_ptr, "%hu", &dev->object_id);
+ dev->device.name = strdup(dev_name);
+ if (!dev->device.name) {
+ DPAA2_BUS_ERR("Unable to clone device name. Out of memory");
+ goto cleanup;
+ }
+ dev->device.devargs = fslmc_devargs_lookup(dev);
+
+ /* Add device in the fslmc device list */
+ insert_in_device_list(dev);
+
+ /* Don't need the duplicated device filesystem entry anymore */
+ if (dup_dev_name)
+ free(dup_dev_name);
+
+ return 0;
+cleanup:
+ if (dup_dev_name)
+ free(dup_dev_name);
+ if (dev)
+ free(dev);
+ return -1;
+}
+
+static int
+rte_fslmc_parse(const char *name, void *addr)
+{
+ uint16_t dev_id;
+ char *t_ptr;
+ char *sep = strchr(name, ':');
+
+ if (strncmp(name, RTE_STR(FSLMC_BUS_NAME),
+ strlen(RTE_STR(FSLMC_BUS_NAME)))) {
+ return -EINVAL;
+ }
+
+ if (!sep) {
+ DPAA2_BUS_ERR("Incorrect device name observed");
+ return -EINVAL;
+ }
+
+ t_ptr = (char *)(sep + 1);
+
+ if (strncmp("dpni", t_ptr, 4) &&
+ strncmp("dpseci", t_ptr, 6) &&
+ strncmp("dpcon", t_ptr, 5) &&
+ strncmp("dpbp", t_ptr, 4) &&
+ strncmp("dpio", t_ptr, 4) &&
+ strncmp("dpci", t_ptr, 4) &&
+ strncmp("dpmcp", t_ptr, 5) &&
+ strncmp("dpdmai", t_ptr, 6)) {
+ DPAA2_BUS_ERR("Unknown or unsupported device");
+ return -EINVAL;
+ }
+
+ t_ptr = strchr(name, '.');
+ if (!t_ptr) {
+ DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr);
+ return -EINVAL;
+ }
+
+ t_ptr = (char *)(t_ptr + 1);
+ if (sscanf(t_ptr, "%hu", &dev_id) <= 0) {
+ DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr);
+ return -EINVAL;
+ }
+
+ if (addr)
+ strcpy(addr, (char *)(sep + 1));
+ return 0;
+}
+
+static int
+rte_fslmc_scan(void)
+{
+ int ret;
+ int device_count = 0;
+ char fslmc_dirpath[PATH_MAX];
+ DIR *dir;
+ struct dirent *entry;
+ static int process_once;
+ int groupid;
+
+ if (process_once) {
+ DPAA2_BUS_DEBUG("Fslmc bus already scanned. Not rescanning");
+ return 0;
+ }
+ process_once = 1;
+
+ ret = fslmc_get_container_group(&groupid);
+ if (ret != 0)
+ goto scan_fail;
+
+ /* Scan devices on the group */
+ sprintf(fslmc_dirpath, "%s/%d/devices", VFIO_IOMMU_GROUP_PATH,
+ groupid);
+ dir = opendir(fslmc_dirpath);
+ if (!dir) {
+ DPAA2_BUS_ERR("Unable to open VFIO group directory");
+ goto scan_fail;
+ }
+
+ while ((entry = readdir(dir)) != NULL) {
+ if (entry->d_name[0] == '.' || entry->d_type != DT_LNK)
+ continue;
+
+ ret = scan_one_fslmc_device(entry->d_name);
+ if (ret != 0) {
+ /* Error in parsing directory - exit gracefully */
+ goto scan_fail_cleanup;
+ }
+ device_count += 1;
+ }
+
+ closedir(dir);
+
+ DPAA2_BUS_INFO("FSLMC Bus scan completed");
+ /* If debugging is enabled, device list is dumped to log output */
+ dump_device_list();
+
+ return 0;
+
+scan_fail_cleanup:
+ closedir(dir);
+
+ /* Remove all devices in the list */
+ cleanup_fslmc_device_list();
+scan_fail:
+ DPAA2_BUS_DEBUG("FSLMC Bus Not Available. Skipping");
+ /* Irrespective of failure, scan only return success */
+ return 0;
+}
+
+static int
+rte_fslmc_match(struct rte_dpaa2_driver *dpaa2_drv,
+ struct rte_dpaa2_device *dpaa2_dev)
+{
+ if (dpaa2_drv->drv_type == dpaa2_dev->dev_type)
+ return 0;
+
+ return 1;
+}
+
+static int
+rte_fslmc_probe(void)
+{
+ int ret = 0;
+ int probe_all;
+
+ struct rte_dpaa2_device *dev;
+ struct rte_dpaa2_driver *drv;
+
+ if (TAILQ_EMPTY(&rte_fslmc_bus.device_list))
+ return 0;
+
+ ret = fslmc_vfio_setup_group();
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to setup VFIO %d", ret);
+ return 0;
+ }
+
+ /* Map existing segments as well as, in case of hotpluggable memory,
+ * install callback handler.
+ */
+ ret = rte_fslmc_vfio_dmamap();
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to DMA map existing VAs: (%d)", ret);
+ /* Not continuing ahead */
+ DPAA2_BUS_ERR("FSLMC VFIO Mapping failed");
+ return 0;
+ }
+
+ ret = fslmc_vfio_process_group();
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to setup devices %d", ret);
+ return 0;
+ }
+
+ probe_all = rte_fslmc_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST;
+
+ TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
+ TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) {
+ ret = rte_fslmc_match(drv, dev);
+ if (ret)
+ continue;
+
+ if (!drv->probe)
+ continue;
+
+ if (dev->device.devargs &&
+ dev->device.devargs->policy == RTE_DEV_BLACKLISTED) {
+ DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping",
+ dev->device.name);
+ continue;
+ }
+
+ if (probe_all ||
+ (dev->device.devargs &&
+ dev->device.devargs->policy ==
+ RTE_DEV_WHITELISTED)) {
+ ret = drv->probe(drv, dev);
+ if (ret)
+ DPAA2_BUS_ERR("Unable to probe");
+ }
+ break;
+ }
+ }
+
+ if (rte_eal_iova_mode() == RTE_IOVA_VA)
+ dpaa2_virt_mode = 1;
+
+ return 0;
+}
+
+static struct rte_device *
+rte_fslmc_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ const struct rte_dpaa2_device *dstart;
+ struct rte_dpaa2_device *dev;
+
+ if (start != NULL) {
+ dstart = RTE_DEV_TO_FSLMC_CONST(start);
+ dev = TAILQ_NEXT(dstart, next);
+ } else {
+ dev = TAILQ_FIRST(&rte_fslmc_bus.device_list);
+ }
+ while (dev != NULL) {
+ if (cmp(&dev->device, data) == 0)
+ return &dev->device;
+ dev = TAILQ_NEXT(dev, next);
+ }
+
+ return NULL;
+}
+
+/*register a fslmc bus based dpaa2 driver */
+void
+rte_fslmc_driver_register(struct rte_dpaa2_driver *driver)
+{
+ RTE_VERIFY(driver);
+
+ TAILQ_INSERT_TAIL(&rte_fslmc_bus.driver_list, driver, next);
+ /* Update Bus references */
+ driver->fslmc_bus = &rte_fslmc_bus;
+}
+
+/*un-register a fslmc bus based dpaa2 driver */
+void
+rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver)
+{
+ struct rte_fslmc_bus *fslmc_bus;
+
+ fslmc_bus = driver->fslmc_bus;
+
+ TAILQ_REMOVE(&fslmc_bus->driver_list, driver, next);
+ /* Update Bus references */
+ driver->fslmc_bus = NULL;
+}
+
+/*
+ * All device has iova as va
+ */
+static inline int
+fslmc_all_device_support_iova(void)
+{
+ int ret = 0;
+ struct rte_dpaa2_device *dev;
+ struct rte_dpaa2_driver *drv;
+
+ TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
+ TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) {
+ ret = rte_fslmc_match(drv, dev);
+ if (ret)
+ continue;
+ /* if the driver is not supporting IOVA */
+ if (!(drv->drv_flags & RTE_DPAA2_DRV_IOVA_AS_VA))
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/*
+ * Get iommu class of DPAA2 devices on the bus.
+ */
+static enum rte_iova_mode
+rte_dpaa2_get_iommu_class(void)
+{
+ bool is_vfio_noiommu_enabled = 1;
+ bool has_iova_va;
+
+ if (TAILQ_EMPTY(&rte_fslmc_bus.device_list))
+ return RTE_IOVA_DC;
+
+ /* check if all devices on the bus support Virtual addressing or not */
+ has_iova_va = fslmc_all_device_support_iova();
+
+#ifdef VFIO_PRESENT
+ is_vfio_noiommu_enabled = rte_vfio_noiommu_is_enabled() == true ?
+ true : false;
+#endif
+
+ if (has_iova_va && !is_vfio_noiommu_enabled)
+ return RTE_IOVA_VA;
+
+ return RTE_IOVA_PA;
+}
+
+struct rte_fslmc_bus rte_fslmc_bus = {
+ .bus = {
+ .scan = rte_fslmc_scan,
+ .probe = rte_fslmc_probe,
+ .parse = rte_fslmc_parse,
+ .find_device = rte_fslmc_find_device,
+ .get_iommu_class = rte_dpaa2_get_iommu_class,
+ },
+ .device_list = TAILQ_HEAD_INITIALIZER(rte_fslmc_bus.device_list),
+ .driver_list = TAILQ_HEAD_INITIALIZER(rte_fslmc_bus.driver_list),
+ .device_count = {0},
+};
+
+RTE_REGISTER_BUS(FSLMC_BUS_NAME, rte_fslmc_bus.bus);
+
+RTE_INIT(fslmc_init_log)
+{
+ /* Bus level logs */
+ dpaa2_logtype_bus = rte_log_register("bus.fslmc");
+ if (dpaa2_logtype_bus >= 0)
+ rte_log_set_level(dpaa2_logtype_bus, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/fslmc_logs.h b/src/spdk/dpdk/drivers/bus/fslmc/fslmc_logs.h
new file mode 100644
index 00000000..dd74cb7d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/fslmc_logs.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _FSLMC_LOGS_H_
+#define _FSLMC_LOGS_H_
+
+extern int dpaa2_logtype_bus;
+
+#define DPAA2_BUS_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_bus, "fslmc: " fmt "\n", \
+ ##args)
+
+/* Debug logs are with Function names */
+#define DPAA2_BUS_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_bus, "fslmc: %s(): " fmt "\n", \
+ __func__, ##args)
+
+#define BUS_INIT_FUNC_TRACE() DPAA2_BUS_DEBUG(" >>")
+
+#define DPAA2_BUS_INFO(fmt, args...) \
+ DPAA2_BUS_LOG(INFO, fmt, ## args)
+#define DPAA2_BUS_ERR(fmt, args...) \
+ DPAA2_BUS_LOG(ERR, fmt, ## args)
+#define DPAA2_BUS_WARN(fmt, args...) \
+ DPAA2_BUS_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_BUS_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_BUS_DP_DEBUG(fmt, args...) \
+ DPAA2_BUS_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_BUS_DP_INFO(fmt, args...) \
+ DPAA2_BUS_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_BUS_DP_WARN(fmt, args...) \
+ DPAA2_BUS_DP_LOG(WARNING, fmt, ## args)
+
+#endif /* _FSLMC_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/fslmc_vfio.c b/src/spdk/dpdk/drivers/bus/fslmc/fslmc_vfio.c
new file mode 100644
index 00000000..4c2cd2a8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/fslmc_vfio.c
@@ -0,0 +1,789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <sys/vfs.h>
+#include <libgen.h>
+#include <dirent.h>
+#include <sys/eventfd.h>
+
+#include <eal_filesystem.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_bus.h>
+#include <rte_eal_memconfig.h>
+
+#include "rte_fslmc.h"
+#include "fslmc_vfio.h"
+#include "fslmc_logs.h"
+#include <mc/fsl_dpmng.h>
+
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
+/** Pathname of FSL-MC devices directory. */
+#define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices"
+
+#define FSLMC_CONTAINER_MAX_LEN 8 /**< Of the format dprc.XX */
+
+/* Number of VFIO containers & groups with in */
+static struct fslmc_vfio_group vfio_group;
+static struct fslmc_vfio_container vfio_container;
+static int container_device_fd;
+static char *g_container;
+static uint32_t *msi_intr_vaddr;
+void *(*rte_mcp_ptr_list);
+
+static struct rte_dpaa2_object_list dpaa2_obj_list =
+ TAILQ_HEAD_INITIALIZER(dpaa2_obj_list);
+
+/*register a fslmc bus based dpaa2 driver */
+void
+rte_fslmc_object_register(struct rte_dpaa2_object *object)
+{
+ RTE_VERIFY(object);
+
+ TAILQ_INSERT_TAIL(&dpaa2_obj_list, object, next);
+}
+
+int
+fslmc_get_container_group(int *groupid)
+{
+ int ret;
+ char *container;
+
+ if (!g_container) {
+ container = getenv("DPRC");
+ if (container == NULL) {
+ DPAA2_BUS_DEBUG("DPAA2: DPRC not available");
+ return -EINVAL;
+ }
+
+ if (strlen(container) >= FSLMC_CONTAINER_MAX_LEN) {
+ DPAA2_BUS_ERR("Invalid container name: %s", container);
+ return -1;
+ }
+
+ g_container = strdup(container);
+ if (!g_container) {
+ DPAA2_BUS_ERR("Mem alloc failure; Container name");
+ return -ENOMEM;
+ }
+ }
+
+ /* get group number */
+ ret = rte_vfio_get_group_num(SYSFS_FSL_MC_DEVICES,
+ g_container, groupid);
+ if (ret <= 0) {
+ DPAA2_BUS_ERR("Unable to find %s IOMMU group", g_container);
+ return -1;
+ }
+
+ DPAA2_BUS_DEBUG("Container: %s has VFIO iommu group id = %d",
+ g_container, *groupid);
+
+ return 0;
+}
+
+static int
+vfio_connect_container(void)
+{
+ int fd, ret;
+
+ if (vfio_container.used) {
+ DPAA2_BUS_DEBUG("No container available");
+ return -1;
+ }
+
+ /* Try connecting to vfio container if already created */
+ if (!ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER,
+ &vfio_container.fd)) {
+ DPAA2_BUS_DEBUG(
+ "Container pre-exists with FD[0x%x] for this group",
+ vfio_container.fd);
+ vfio_group.container = &vfio_container;
+ return 0;
+ }
+
+ /* Opens main vfio file descriptor which represents the "container" */
+ fd = rte_vfio_get_container_fd();
+ if (fd < 0) {
+ DPAA2_BUS_ERR("Failed to open VFIO container");
+ return -errno;
+ }
+
+ /* Check whether support for SMMU type IOMMU present or not */
+ if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
+ /* Connect group to container */
+ ret = ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, &fd);
+ if (ret) {
+ DPAA2_BUS_ERR("Failed to setup group container");
+ close(fd);
+ return -errno;
+ }
+
+ ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
+ if (ret) {
+ DPAA2_BUS_ERR("Failed to setup VFIO iommu");
+ close(fd);
+ return -errno;
+ }
+ } else {
+ DPAA2_BUS_ERR("No supported IOMMU available");
+ close(fd);
+ return -EINVAL;
+ }
+
+ vfio_container.used = 1;
+ vfio_container.fd = fd;
+ vfio_container.group = &vfio_group;
+ vfio_group.container = &vfio_container;
+
+ return 0;
+}
+
+static int vfio_map_irq_region(struct fslmc_vfio_group *group)
+{
+ int ret;
+ unsigned long *vaddr = NULL;
+ struct vfio_iommu_type1_dma_map map = {
+ .argsz = sizeof(map),
+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
+ .vaddr = 0x6030000,
+ .iova = 0x6030000,
+ .size = 0x1000,
+ };
+
+ vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
+ PROT_READ, MAP_SHARED, container_device_fd, 0x6030000);
+ if (vaddr == MAP_FAILED) {
+ DPAA2_BUS_ERR("Unable to map region (errno = %d)", errno);
+ return -errno;
+ }
+
+ msi_intr_vaddr = (uint32_t *)((char *)(vaddr) + 64);
+ map.vaddr = (unsigned long)vaddr;
+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &map);
+ if (ret == 0)
+ return 0;
+
+ DPAA2_BUS_ERR("Unable to map DMA address (errno = %d)", errno);
+ return -errno;
+}
+
+static int fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
+static int fslmc_unmap_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
+
+static void
+fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
+ void *arg __rte_unused)
+{
+ struct rte_memseg_list *msl;
+ struct rte_memseg *ms;
+ size_t cur_len = 0, map_len = 0;
+ uint64_t virt_addr;
+ rte_iova_t iova_addr;
+ int ret;
+
+ msl = rte_mem_virt2memseg_list(addr);
+
+ while (cur_len < len) {
+ const void *va = RTE_PTR_ADD(addr, cur_len);
+
+ ms = rte_mem_virt2memseg(va, msl);
+ iova_addr = ms->iova;
+ virt_addr = ms->addr_64;
+ map_len = ms->len;
+
+ DPAA2_BUS_DEBUG("Request for %s, va=%p, "
+ "virt_addr=0x%" PRIx64 ", "
+ "iova=0x%" PRIx64 ", map_len=%zu",
+ type == RTE_MEM_EVENT_ALLOC ?
+ "alloc" : "dealloc",
+ va, virt_addr, iova_addr, map_len);
+
+ if (type == RTE_MEM_EVENT_ALLOC)
+ ret = fslmc_map_dma(virt_addr, iova_addr, map_len);
+ else
+ ret = fslmc_unmap_dma(virt_addr, iova_addr, map_len);
+
+ if (ret != 0) {
+ DPAA2_BUS_ERR("DMA Mapping/Unmapping failed. "
+ "Map=%d, addr=%p, len=%zu, err:(%d)",
+ type, va, map_len, ret);
+ return;
+ }
+
+ cur_len += map_len;
+ }
+
+ if (type == RTE_MEM_EVENT_ALLOC)
+ DPAA2_BUS_DEBUG("Total Mapped: addr=%p, len=%zu",
+ addr, len);
+ else
+ DPAA2_BUS_DEBUG("Total Unmapped: addr=%p, len=%zu",
+ addr, len);
+}
+
+static int
+fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr __rte_unused, size_t len)
+{
+ struct fslmc_vfio_group *group;
+ struct vfio_iommu_type1_dma_map dma_map = {
+ .argsz = sizeof(struct vfio_iommu_type1_dma_map),
+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
+ };
+ int ret;
+
+ dma_map.size = len;
+ dma_map.vaddr = vaddr;
+
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+ dma_map.iova = iovaddr;
+#else
+ dma_map.iova = dma_map.vaddr;
+#endif
+
+ /* SET DMA MAP for IOMMU */
+ group = &vfio_group;
+
+ if (!group->container) {
+ DPAA2_BUS_ERR("Container is not connected ");
+ return -1;
+ }
+
+ DPAA2_BUS_DEBUG("--> Map address: 0x%"PRIx64", size: %"PRIu64"",
+ (uint64_t)dma_map.vaddr, (uint64_t)dma_map.size);
+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map);
+ if (ret) {
+ DPAA2_BUS_ERR("VFIO_IOMMU_MAP_DMA API(errno = %d)",
+ errno);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
+{
+ struct fslmc_vfio_group *group;
+ struct vfio_iommu_type1_dma_unmap dma_unmap = {
+ .argsz = sizeof(struct vfio_iommu_type1_dma_unmap),
+ .flags = 0,
+ };
+ int ret;
+
+ dma_unmap.size = len;
+ dma_unmap.iova = vaddr;
+
+ /* SET DMA MAP for IOMMU */
+ group = &vfio_group;
+
+ if (!group->container) {
+ DPAA2_BUS_ERR("Container is not connected ");
+ return -1;
+ }
+
+ DPAA2_BUS_DEBUG("--> Unmap address: 0x%"PRIx64", size: %"PRIu64"",
+ (uint64_t)dma_unmap.iova, (uint64_t)dma_unmap.size);
+ ret = ioctl(group->container->fd, VFIO_IOMMU_UNMAP_DMA, &dma_unmap);
+ if (ret) {
+ DPAA2_BUS_ERR("VFIO_IOMMU_UNMAP_DMA API(errno = %d)",
+ errno);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, void *arg)
+{
+ int *n_segs = arg;
+ int ret;
+
+ ret = fslmc_map_dma(ms->addr_64, ms->iova, ms->len);
+ if (ret)
+ DPAA2_BUS_ERR("Unable to VFIO map (addr=%p, len=%zu)",
+ ms->addr, ms->len);
+ else
+ (*n_segs)++;
+
+ return ret;
+}
+
+int rte_fslmc_vfio_dmamap(void)
+{
+ int i = 0, ret;
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
+
+ /* Lock before parsing and registering callback to memory subsystem */
+ rte_rwlock_read_lock(mem_lock);
+
+ if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) {
+ rte_rwlock_read_unlock(mem_lock);
+ return -1;
+ }
+
+ ret = rte_mem_event_callback_register("fslmc_memevent_clb",
+ fslmc_memevent_cb, NULL);
+ if (ret && rte_errno == ENOTSUP)
+ DPAA2_BUS_DEBUG("Memory event callbacks not supported");
+ else if (ret)
+ DPAA2_BUS_DEBUG("Unable to install memory handler");
+ else
+ DPAA2_BUS_DEBUG("Installed memory callback handler");
+
+ DPAA2_BUS_DEBUG("Total %d segments found.", i);
+
+ /* TODO - This is a W.A. as VFIO currently does not add the mapping of
+ * the interrupt region to SMMU. This should be removed once the
+ * support is added in the Kernel.
+ */
+ vfio_map_irq_region(&vfio_group);
+
+ /* Existing segments have been mapped and memory callback for hotplug
+ * has been installed.
+ */
+ rte_rwlock_read_unlock(mem_lock);
+
+ return 0;
+}
+
+static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
+{
+ intptr_t v_addr = (intptr_t)MAP_FAILED;
+ int32_t ret, mc_fd;
+
+ struct vfio_device_info d_info = { .argsz = sizeof(d_info) };
+ struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
+
+ /* getting the mcp object's fd*/
+ mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj);
+ if (mc_fd < 0) {
+ DPAA2_BUS_ERR("Error in VFIO get dev %s fd from group %d",
+ mcp_obj, group->fd);
+ return v_addr;
+ }
+
+ /* getting device info*/
+ ret = ioctl(mc_fd, VFIO_DEVICE_GET_INFO, &d_info);
+ if (ret < 0) {
+ DPAA2_BUS_ERR("Error in VFIO getting DEVICE_INFO");
+ goto MC_FAILURE;
+ }
+
+ /* getting device region info*/
+ ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
+ if (ret < 0) {
+ DPAA2_BUS_ERR("Error in VFIO getting REGION_INFO");
+ goto MC_FAILURE;
+ }
+
+ v_addr = (size_t)mmap(NULL, reg_info.size,
+ PROT_WRITE | PROT_READ, MAP_SHARED,
+ mc_fd, reg_info.offset);
+
+MC_FAILURE:
+ close(mc_fd);
+
+ return v_addr;
+}
+
+#define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
+
+int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle, int index)
+{
+ int len, ret;
+ char irq_set_buf[IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int *fd_ptr;
+
+ len = sizeof(irq_set_buf);
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->count = 1;
+ irq_set->flags =
+ VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = index;
+ irq_set->start = 0;
+ fd_ptr = (int *)&irq_set->data;
+ *fd_ptr = intr_handle->fd;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (ret) {
+ DPAA2_BUS_ERR("Error:dpaa2 SET IRQs fd=%d, err = %d(%s)",
+ intr_handle->fd, errno, strerror(errno));
+ return ret;
+ }
+
+ return ret;
+}
+
+int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index)
+{
+ struct vfio_irq_set *irq_set;
+ char irq_set_buf[IRQ_SET_BUF_LEN];
+ int len, ret;
+
+ len = sizeof(struct vfio_irq_set);
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = index;
+ irq_set->start = 0;
+ irq_set->count = 0;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (ret)
+ DPAA2_BUS_ERR(
+ "Error disabling dpaa2 interrupts for fd %d",
+ intr_handle->fd);
+
+ return ret;
+}
+
+/* set up interrupt support (but not enable interrupts) */
+int
+rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
+ int vfio_dev_fd,
+ int num_irqs)
+{
+ int i, ret;
+
+ /* start from MSI-X interrupt type */
+ for (i = 0; i < num_irqs; i++) {
+ struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
+ int fd = -1;
+
+ irq_info.index = i;
+
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
+ if (ret < 0) {
+ DPAA2_BUS_ERR("Cannot get IRQ(%d) info, error %i (%s)",
+ i, errno, strerror(errno));
+ return -1;
+ }
+
+ /* if this vector cannot be used with eventfd,
+ * fail if we explicitly
+ * specified interrupt type, otherwise continue
+ */
+ if ((irq_info.flags & VFIO_IRQ_INFO_EVENTFD) == 0)
+ continue;
+
+ /* set up an eventfd for interrupts */
+ fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ if (fd < 0) {
+ DPAA2_BUS_ERR("Cannot set up eventfd, error %i (%s)",
+ errno, strerror(errno));
+ return -1;
+ }
+
+ intr_handle->fd = fd;
+ intr_handle->type = RTE_INTR_HANDLE_VFIO_MSI;
+ intr_handle->vfio_dev_fd = vfio_dev_fd;
+
+ return 0;
+ }
+
+ /* if we're here, we haven't found a suitable interrupt vector */
+ return -1;
+}
+
+/*
+ * fslmc_process_iodevices for processing only IO (ETH, CRYPTO, and possibly
+ * EVENT) devices.
+ */
+static int
+fslmc_process_iodevices(struct rte_dpaa2_device *dev)
+{
+ int dev_fd;
+ struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
+ struct rte_dpaa2_object *object = NULL;
+
+ dev_fd = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD,
+ dev->device.name);
+ if (dev_fd <= 0) {
+ DPAA2_BUS_ERR("Unable to obtain device FD for device:%s",
+ dev->device.name);
+ return -1;
+ }
+
+ if (ioctl(dev_fd, VFIO_DEVICE_GET_INFO, &device_info)) {
+ DPAA2_BUS_ERR("Unable to obtain information for device:%s",
+ dev->device.name);
+ return -1;
+ }
+
+ switch (dev->dev_type) {
+ case DPAA2_ETH:
+ rte_dpaa2_vfio_setup_intr(&dev->intr_handle, dev_fd,
+ device_info.num_irqs);
+ break;
+ case DPAA2_CON:
+ case DPAA2_IO:
+ case DPAA2_CI:
+ case DPAA2_BPOOL:
+ TAILQ_FOREACH(object, &dpaa2_obj_list, next) {
+ if (dev->dev_type == object->dev_type)
+ object->create(dev_fd, &device_info,
+ dev->object_id);
+ else
+ continue;
+ }
+ break;
+ default:
+ break;
+ }
+
+ DPAA2_BUS_LOG(DEBUG, "Device (%s) abstracted from VFIO",
+ dev->device.name);
+ return 0;
+}
+
+static int
+fslmc_process_mcp(struct rte_dpaa2_device *dev)
+{
+ int ret;
+ intptr_t v_addr;
+ char *dev_name = NULL;
+ struct fsl_mc_io dpmng = {0};
+ struct mc_version mc_ver_info = {0};
+
+ rte_mcp_ptr_list = malloc(sizeof(void *) * 1);
+ if (!rte_mcp_ptr_list) {
+ DPAA2_BUS_ERR("Unable to allocate MC portal memory");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ dev_name = strdup(dev->device.name);
+ if (!dev_name) {
+ DPAA2_BUS_ERR("Unable to allocate MC device name memory");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ v_addr = vfio_map_mcp_obj(&vfio_group, dev_name);
+ if (v_addr == (intptr_t)MAP_FAILED) {
+ DPAA2_BUS_ERR("Error mapping region (errno = %d)", errno);
+ ret = -1;
+ goto cleanup;
+ }
+
+ /* check the MC version compatibility */
+ dpmng.regs = (void *)v_addr;
+ if (mc_get_version(&dpmng, CMD_PRI_LOW, &mc_ver_info)) {
+ DPAA2_BUS_ERR("Unable to obtain MC version");
+ ret = -1;
+ goto cleanup;
+ }
+
+ if ((mc_ver_info.major != MC_VER_MAJOR) ||
+ (mc_ver_info.minor < MC_VER_MINOR)) {
+ DPAA2_BUS_ERR("DPAA2 MC version not compatible!"
+ " Expected %d.%d.x, Detected %d.%d.%d",
+ MC_VER_MAJOR, MC_VER_MINOR,
+ mc_ver_info.major, mc_ver_info.minor,
+ mc_ver_info.revision);
+ ret = -1;
+ goto cleanup;
+ }
+ rte_mcp_ptr_list[0] = (void *)v_addr;
+
+ free(dev_name);
+ return 0;
+
+cleanup:
+ if (dev_name)
+ free(dev_name);
+
+ if (rte_mcp_ptr_list) {
+ free(rte_mcp_ptr_list);
+ rte_mcp_ptr_list = NULL;
+ }
+
+ return ret;
+}
+
+int
+fslmc_vfio_process_group(void)
+{
+ int ret;
+ int found_mportal = 0;
+ struct rte_dpaa2_device *dev, *dev_temp;
+
+ /* Search the MCP as that should be initialized first. */
+ TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
+ if (dev->dev_type == DPAA2_MPORTAL) {
+ ret = fslmc_process_mcp(dev);
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to map MC Portal");
+ return -1;
+ }
+ if (!found_mportal)
+ found_mportal = 1;
+
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ /* Ideally there is only a single dpmcp, but in case
+ * multiple exists, looping on remaining devices.
+ */
+ }
+ }
+
+ /* Cannot continue if there is not even a single mportal */
+ if (!found_mportal) {
+ DPAA2_BUS_ERR("No MC Portal device found. Not continuing");
+ return -1;
+ }
+
+ TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
+ switch (dev->dev_type) {
+ case DPAA2_ETH:
+ case DPAA2_CRYPTO:
+ case DPAA2_QDMA:
+ ret = fslmc_process_iodevices(dev);
+ if (ret) {
+ DPAA2_BUS_DEBUG("Dev (%s) init failed",
+ dev->device.name);
+ return ret;
+ }
+ break;
+ case DPAA2_CON:
+ case DPAA2_IO:
+ case DPAA2_CI:
+ case DPAA2_BPOOL:
+ /* Call the object creation routine and remove the
+ * device entry from device list
+ */
+ ret = fslmc_process_iodevices(dev);
+ if (ret) {
+ DPAA2_BUS_DEBUG("Dev (%s) init failed",
+ dev->device.name);
+ return -1;
+ }
+
+ /* This device is not required to be in the DPDK
+ * exposed device list.
+ */
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ break;
+ case DPAA2_UNKNOWN:
+ default:
+ /* Unknown - ignore */
+ DPAA2_BUS_DEBUG("Found unknown device (%s)",
+ dev->device.name);
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ }
+ }
+
+ return 0;
+}
+
+int
+fslmc_vfio_setup_group(void)
+{
+ int groupid;
+ int ret;
+ struct vfio_group_status status = { .argsz = sizeof(status) };
+
+ /* if already done once */
+ if (container_device_fd)
+ return 0;
+
+ ret = fslmc_get_container_group(&groupid);
+ if (ret)
+ return ret;
+
+ /* In case this group was already opened, continue without any
+ * processing.
+ */
+ if (vfio_group.groupid == groupid) {
+ DPAA2_BUS_ERR("groupid already exists %d", groupid);
+ return 0;
+ }
+
+ /* Get the actual group fd */
+ ret = rte_vfio_get_group_fd(groupid);
+ if (ret < 0)
+ return ret;
+ vfio_group.fd = ret;
+
+ /* Check group viability */
+ ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_STATUS, &status);
+ if (ret) {
+ DPAA2_BUS_ERR("VFIO error getting group status");
+ close(vfio_group.fd);
+ rte_vfio_clear_group(vfio_group.fd);
+ return ret;
+ }
+
+ if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
+ DPAA2_BUS_ERR("VFIO group not viable");
+ close(vfio_group.fd);
+ rte_vfio_clear_group(vfio_group.fd);
+ return -EPERM;
+ }
+ /* Since Group is VIABLE, Store the groupid */
+ vfio_group.groupid = groupid;
+
+ /* check if group does not have a container yet */
+ if (!(status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
+ /* Now connect this IOMMU group to given container */
+ ret = vfio_connect_container();
+ if (ret) {
+ DPAA2_BUS_ERR(
+ "Error connecting container with groupid %d",
+ groupid);
+ close(vfio_group.fd);
+ rte_vfio_clear_group(vfio_group.fd);
+ return ret;
+ }
+ }
+
+ /* Get Device information */
+ ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, g_container);
+ if (ret < 0) {
+ DPAA2_BUS_ERR("Error getting device %s fd from group %d",
+ g_container, vfio_group.groupid);
+ close(vfio_group.fd);
+ rte_vfio_clear_group(vfio_group.fd);
+ return ret;
+ }
+ container_device_fd = ret;
+ DPAA2_BUS_DEBUG("VFIO Container FD is [0x%X]",
+ container_device_fd);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/fslmc_vfio.h b/src/spdk/dpdk/drivers/bus/fslmc/fslmc_vfio.h
new file mode 100644
index 00000000..9e2c4fee
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/fslmc_vfio.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _FSLMC_VFIO_H_
+#define _FSLMC_VFIO_H_
+
+#include <rte_vfio.h>
+
+#define DPAA2_MC_DPNI_DEVID 7
+#define DPAA2_MC_DPSECI_DEVID 3
+#define DPAA2_MC_DPCON_DEVID 5
+#define DPAA2_MC_DPIO_DEVID 9
+#define DPAA2_MC_DPBP_DEVID 10
+#define DPAA2_MC_DPCI_DEVID 11
+
+typedef struct fslmc_vfio_device {
+ int fd; /* fslmc root container device ?? */
+ int index; /*index of child object */
+ struct fslmc_vfio_device *child; /* Child object */
+} fslmc_vfio_device;
+
+typedef struct fslmc_vfio_group {
+ int fd; /* /dev/vfio/"groupid" */
+ int groupid;
+ struct fslmc_vfio_container *container;
+ int object_index;
+ struct fslmc_vfio_device *vfio_device;
+} fslmc_vfio_group;
+
+typedef struct fslmc_vfio_container {
+ int fd; /* /dev/vfio/vfio */
+ int used;
+ int index; /* index in group list */
+ struct fslmc_vfio_group *group;
+} fslmc_vfio_container;
+
+int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle, int index);
+int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index);
+
+int rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
+ int vfio_dev_fd,
+ int num_irqs);
+
+int fslmc_vfio_setup_group(void);
+int fslmc_vfio_process_group(void);
+char *fslmc_get_container(void);
+int fslmc_get_container_group(int *gropuid);
+int rte_fslmc_vfio_dmamap(void);
+
+#endif /* _FSLMC_VFIO_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/dpbp.c b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpbp.c
new file mode 100644
index 00000000..0215d22d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpbp.c
@@ -0,0 +1,346 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpbp.h>
+#include <fsl_dpbp_cmd.h>
+
+/**
+ * dpbp_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpbp_id: DPBP unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpbp_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpbp_id,
+ uint16_t *token)
+{
+ struct dpbp_cmd_open *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
+ cmd_flags, 0);
+ cmd_params = (struct dpbp_cmd_open *)cmd.params;
+ cmd_params->dpbp_id = cpu_to_le32(dpbp_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+
+/**
+ * dpbp_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpbp_create() - Create the DPBP object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id; use in subsequent API calls
+ *
+ * Create the DPBP object, allocate required resources and
+ * perform required initialization.
+ *
+ * This function accepts an authentication token of a parent
+ * container that this object should be assigned to and returns
+ * an object id. This object_id will be used in all subsequent calls to
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpbp_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ (void)(cfg); /* unused */
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
+ cmd_flags, dprc_token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpbp_destroy() - Destroy the DPBP object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @obj_id: ID of DPBP object
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpbp_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t obj_id)
+{
+ struct dpbp_cmd_destroy *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
+ cmd_flags, dprc_token);
+
+ cmd_params = (struct dpbp_cmd_destroy *)cmd.params;
+ cmd_params->object_id = cpu_to_le32(obj_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpbp_enable() - Enable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpbp_disable() - Disable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpbp_is_enabled() - Check if the DPBP is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct dpbp_rsp_is_enabled *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpbp_rsp_is_enabled *)cmd.params;
+ *en = rsp_params->enabled & DPBP_ENABLE;
+
+ return 0;
+}
+
+/**
+ * dpbp_reset() - Reset the DPBP, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpbp_attr *attr)
+{
+ struct dpbp_rsp_get_attributes *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
+ attr->bpid = le16_to_cpu(rsp_params->bpid);
+ attr->id = le32_to_cpu(rsp_params->id);
+
+ return 0;
+}
+
+/**
+ * dpbp_get_api_version - Get Data Path Buffer Pool API version
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of Buffer Pool API
+ * @minor_ver: Minor version of Buffer Pool API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct dpbp_rsp_get_api_version *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpbp_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpbp_get_num_free_bufs() - Get number of free buffers in the buffer pool
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @num_free_bufs: Number of free buffers
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+
+int dpbp_get_num_free_bufs(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint32_t *num_free_bufs)
+{
+ struct dpbp_rsp_get_num_free_bufs *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_FREE_BUFFERS_NUM,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpbp_rsp_get_num_free_bufs *)cmd.params;
+ *num_free_bufs = le32_to_cpu(rsp_params->num_free_bufs);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/dpci.c b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpci.c
new file mode 100644
index 00000000..ff366bfa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpci.c
@@ -0,0 +1,440 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpci.h>
+#include <fsl_dpci_cmd.h>
+
+/**
+ * dpci_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpci_id: DPCI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpci_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpci_id,
+ uint16_t *token)
+{
+ struct dpci_cmd_open *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpci_cmd_open *)cmd.params;
+ cmd_params->dpci_id = cpu_to_le32(dpci_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpci_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpci_create() - Create the DPCI object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPCI object, allocate required resources and perform required
+ * initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpci_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct dpci_cmd_create *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpci_cmd_create *)cmd.params;
+ cmd_params->num_of_priorities = cfg->num_of_priorities;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpci_destroy() - Destroy the DPCI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct dpci_cmd_destroy *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpci_cmd_destroy *)cmd.params;
+ cmd_params->dpci_id = cpu_to_le32(object_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpci_enable() - Enable the DPCI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpci_disable() - Disable the DPCI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpci_is_enabled() - Check if the DPCI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct dpci_rsp_is_enabled *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_IS_ENABLED, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpci_rsp_is_enabled *)cmd.params;
+ *en = dpci_get_field(rsp_params->en, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpci_reset() - Reset the DPCI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpci_attr *attr)
+{
+ struct dpci_rsp_get_attr *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpci_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->num_of_priorities = rsp_params->num_of_priorities;
+
+ return 0;
+}
+
+int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpci_rx_queue_cfg *cfg)
+{
+ struct dpci_cmd_set_rx_queue *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpci_cmd_set_rx_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->priority = priority;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpci_set_field(cmd_params->dest_type,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpci_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPCI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_rx_queue_attr *attr)
+{
+ struct dpci_cmd_get_queue *cmd_params;
+ struct dpci_rsp_get_rx_queue *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpci_cmd_get_queue *)cmd.params;
+ cmd_params->priority = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpci_rsp_get_rx_queue *)cmd.params;
+ attr->user_ctx = le64_to_cpu(rsp_params->user_ctx);
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ attr->dest_cfg.priority = rsp_params->dest_priority;
+ attr->dest_cfg.dest_type = dpci_get_field(rsp_params->dest_type,
+ DEST_TYPE);
+
+ return 0;
+}
+
+/**
+ * dpci_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @priority: Select the queue relative to number of
+ * priorities of the peer DPCI object
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_tx_queue_attr *attr)
+{
+ struct dpci_cmd_get_queue *cmd_params;
+ struct dpci_rsp_get_tx_queue *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpci_cmd_get_queue *)cmd.params;
+ cmd_params->priority = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpci_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+
+ return 0;
+}
+
+/**
+ * dpci_get_api_version() - Get communication interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path communication interface API
+ * @minor_ver: Minor version of data path communication interface API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct dpci_rsp_get_api_version *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpci_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/dpcon.c b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpcon.c
new file mode 100644
index 00000000..3f6e04b9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpcon.c
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpcon.h>
+#include <fsl_dpcon_cmd.h>
+
+/**
+ * dpcon_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpcon_id: DPCON unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpcon_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpcon_id,
+ uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ struct dpcon_cmd_open *dpcon_cmd;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ dpcon_cmd = (struct dpcon_cmd_open *)cmd.params;
+ dpcon_cmd->dpcon_id = cpu_to_le32(dpcon_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpcon_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpcon_create() - Create the DPCON object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id; use in subsequent API calls
+ *
+ * Create the DPCON object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * This function accepts an authentication token of a parent
+ * container that this object should be assigned to and returns
+ * an object id. This object_id will be used in all subsequent calls to
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpcon_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct dpcon_cmd_create *dpcon_cmd;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ dpcon_cmd = (struct dpcon_cmd_create *)cmd.params;
+ dpcon_cmd->num_priorities = cfg->num_priorities;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpcon_destroy() - Destroy the DPCON object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @obj_id: ID of DPCON object
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpcon_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t obj_id)
+{
+ struct dpcon_cmd_destroy *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpcon_cmd_destroy *)cmd.params;
+ cmd_params->object_id = cpu_to_le32(obj_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpcon_enable() - Enable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpcon_disable() - Disable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpcon_is_enabled() - Check if the DPCON is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct dpcon_rsp_is_enabled *dpcon_rsp;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ dpcon_rsp = (struct dpcon_rsp_is_enabled *)cmd.params;
+ *en = dpcon_rsp->enabled & DPCON_ENABLE;
+
+ return 0;
+}
+
+/**
+ * dpcon_reset() - Reset the DPCON, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpcon_get_attributes() - Retrieve DPCON attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @attr: Object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpcon_attr *attr)
+{
+ struct dpcon_rsp_get_attr *dpcon_rsp;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ dpcon_rsp = (struct dpcon_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(dpcon_rsp->id);
+ attr->qbman_ch_id = le16_to_cpu(dpcon_rsp->qbman_ch_id);
+ attr->num_priorities = dpcon_rsp->num_priorities;
+
+ return 0;
+}
+
+/**
+ * dpcon_get_api_version - Get Data Path Concentrator API version
+ * @mc_io: Pointer to MC portal's DPCON object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of DPCON API
+ * @minor_ver: Minor version of DPCON API
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct dpcon_rsp_get_api_version *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpcon_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/dpdmai.c b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpdmai.c
new file mode 100644
index 00000000..528889df
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpdmai.c
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpdmai.h>
+#include <fsl_dpdmai_cmd.h>
+
+/**
+ * dpdmai_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpdmai_id: DPDMAI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpdmai_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpdmai_id,
+ uint16_t *token)
+{
+ struct dpdmai_cmd_open *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpdmai_cmd_open *)cmd.params;
+ cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpdmai_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_create() - Create the DPDMAI object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPDMAI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpdmai_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct dpdmai_cmd_create *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpdmai_cmd_create *)cmd.params;
+ cmd_params->priorities[0] = cfg->priorities[0];
+ cmd_params->priorities[1] = cfg->priorities[1];
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpdmai_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct dpdmai_cmd_destroy *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpdmai_cmd_destroy *)cmd.params;
+ cmd_params->dpdmai_id = cpu_to_le32(object_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct dpdmai_rsp_is_enabled *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_is_enabled *)cmd.params;
+ *en = dpdmai_get_field(rsp_params->en, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpdmai_attr *attr)
+{
+ struct dpdmai_rsp_get_attr *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->num_of_priorities = rsp_params->num_of_priorities;
+
+ return 0;
+}
+
+/**
+ * dpdmai_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation; use
+ * DPDMAI_ALL_QUEUES to configure all Rx queues
+ * identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpdmai_rx_queue_cfg *cfg)
+{
+ struct dpdmai_cmd_set_rx_queue *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmai_cmd_set_rx_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->priority = priority;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpdmai_set_field(cmd_params->dest_type,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpdmai_rx_queue_attr *attr)
+{
+ struct dpdmai_cmd_get_queue *cmd_params;
+ struct dpdmai_rsp_get_rx_queue *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmai_cmd_get_queue *)cmd.params;
+ cmd_params->priority = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_get_rx_queue *)cmd.params;
+ attr->user_ctx = le64_to_cpu(rsp_params->user_ctx);
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ attr->dest_cfg.priority = le32_to_cpu(rsp_params->dest_priority);
+ attr->dest_cfg.dest_type = dpdmai_get_field(rsp_params->dest_type,
+ DEST_TYPE);
+
+ return 0;
+}
+
+/**
+ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpdmai_tx_queue_attr *attr)
+{
+ struct dpdmai_cmd_get_queue *cmd_params;
+ struct dpdmai_rsp_get_tx_queue *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmai_cmd_get_queue *)cmd.params;
+ cmd_params->priority = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/dpio.c b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpio.c
new file mode 100644
index 00000000..966277cc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpio.c
@@ -0,0 +1,470 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpio.h>
+#include <fsl_dpio_cmd.h>
+
+/**
+ * dpio_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpio_id: DPIO unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpio_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and any MC portals
+ * assigned to the parent container; this token must be used in
+ * all subsequent commands for this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpio_id,
+ uint16_t *token)
+{
+ struct dpio_cmd_open *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpio_cmd_open *)cmd.params;
+ cmd_params->dpio_id = cpu_to_le32(dpio_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpio_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_create() - Create the DPIO object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPIO object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpio_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct dpio_cmd_create *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpio_cmd_create *)cmd.params;
+ cmd_params->num_priorities = cfg->num_priorities;
+ dpio_set_field(cmd_params->channel_mode,
+ CHANNEL_MODE,
+ cfg->channel_mode);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpio_destroy() - Destroy the DPIO object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct dpio_cmd_destroy *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+
+ /* set object id to destroy */
+ cmd_params = (struct dpio_cmd_destroy *)cmd.params;
+ cmd_params->dpio_id = cpu_to_le32(object_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_is_enabled() - Check if the DPIO is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct dpio_rsp_is_enabled *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_IS_ENABLED, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpio_rsp_is_enabled *)cmd.params;
+ *en = dpio_get_field(rsp_params->en, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpio_reset() - Reset the DPIO, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpio_attr *attr)
+{
+ struct dpio_rsp_get_attr *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpio_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->qbman_portal_id = le16_to_cpu(rsp_params->qbman_portal_id);
+ attr->num_priorities = rsp_params->num_priorities;
+ attr->qbman_portal_ce_offset =
+ le64_to_cpu(rsp_params->qbman_portal_ce_offset);
+ attr->qbman_portal_ci_offset =
+ le64_to_cpu(rsp_params->qbman_portal_ci_offset);
+ attr->qbman_version = le32_to_cpu(rsp_params->qbman_version);
+ attr->clk = le32_to_cpu(rsp_params->clk);
+ attr->channel_mode = dpio_get_field(rsp_params->channel_mode,
+ ATTR_CHANNEL_MODE);
+
+ return 0;
+}
+
+/**
+ * dpio_set_stashing_destination() - Set the stashing destination.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @sdest: Stashing destination value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t sdest)
+{
+ struct dpio_stashing_dest *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpio_stashing_dest *)cmd.params;
+ cmd_params->sdest = sdest;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_get_stashing_destination() - Get the stashing destination..
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @sdest: Returns the stashing destination value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t *sdest)
+{
+ struct dpio_stashing_dest *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_STASHING_DEST,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpio_stashing_dest *)cmd.params;
+ *sdest = rsp_params->sdest;
+
+ return 0;
+}
+
+/**
+ * dpio_add_static_dequeue_channel() - Add a static dequeue channel.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @dpcon_id: DPCON object ID
+ * @channel_index: Returned channel index to be used in qbman API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id,
+ uint8_t *channel_index)
+{
+ struct dpio_rsp_add_static_dequeue_channel *rsp_params;
+ struct dpio_cmd_static_dequeue_channel *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpio_cmd_static_dequeue_channel *)cmd.params;
+ cmd_params->dpcon_id = cpu_to_le32(dpcon_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpio_rsp_add_static_dequeue_channel *)cmd.params;
+ *channel_index = rsp_params->channel_index;
+
+ return 0;
+}
+
+/**
+ * dpio_remove_static_dequeue_channel() - Remove a static dequeue channel.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @dpcon_id: DPCON object ID
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id)
+{
+ struct dpio_cmd_static_dequeue_channel *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpio_cmd_static_dequeue_channel *)cmd.params;
+ cmd_params->dpcon_id = cpu_to_le32(dpcon_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_get_api_version() - Get Data Path I/O API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path i/o API
+ * @minor_ver: Minor version of data path i/o API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct dpio_rsp_get_api_version *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpio_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/dpmng.c b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpmng.c
new file mode 100644
index 00000000..27708087
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/dpmng.c
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpmng.h>
+#include <fsl_dpmng_cmd.h>
+
+/**
+ * mc_get_version() - Retrieves the Management Complex firmware
+ * version information
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @mc_ver_info: Returned version information structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int mc_get_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_version *mc_ver_info)
+{
+ struct mc_command cmd = { 0 };
+ struct dpmng_rsp_get_version *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
+ mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
+ mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
+ mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
+
+ return 0;
+}
+
+/**
+ * mc_get_soc_version() - Retrieves the Management Complex firmware
+ * version information
+ * @mc_io Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @mc_platform_info: Returned version information structure. The structure
+ * contains the values of SVR and PVR registers.
+ * Please consult platform specific reference manual
+ * for detailed information.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int mc_get_soc_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_soc_version *mc_platform_info)
+{
+ struct dpmng_rsp_get_soc_version *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_SOC_VERSION,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmng_rsp_get_soc_version *)cmd.params;
+ mc_platform_info->svr = le32_to_cpu(rsp_params->svr);
+ mc_platform_info->pvr = le32_to_cpu(rsp_params->pvr);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpbp.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpbp.h
new file mode 100644
index 00000000..11183626
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpbp.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef __FSL_DPBP_H
+#define __FSL_DPBP_H
+
+/*
+ * Data Path Buffer Pool API
+ * Contains initialization APIs and runtime control APIs for DPBP
+ */
+
+struct fsl_mc_io;
+
+int dpbp_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpbp_id,
+ uint16_t *token);
+
+int dpbp_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpbp_cfg - Structure representing DPBP configuration
+ * @options: place holder
+ */
+struct dpbp_cfg {
+ uint32_t options;
+};
+
+int dpbp_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpbp_cfg *cfg,
+ uint32_t *obj_id);
+
+int dpbp_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t obj_id);
+
+int dpbp_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpbp_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpbp_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpbp_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpbp_attr - Structure representing DPBP attributes
+ * @id: DPBP object ID
+ * @bpid: Hardware buffer pool ID; should be used as an argument in
+ * acquire/release operations on buffers
+ */
+struct dpbp_attr {
+ int id;
+ uint16_t bpid;
+};
+
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpbp_attr *attr);
+
+/**
+ * DPBP notifications options
+ */
+
+/**
+ * BPSCN write will attempt to allocate into a cache (coherent write)
+ */
+int dpbp_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+int dpbp_get_num_free_bufs(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint32_t *num_free_bufs);
+
+#endif /* __FSL_DPBP_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
new file mode 100644
index 00000000..18402ced
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef _FSL_DPBP_CMD_H
+#define _FSL_DPBP_CMD_H
+
+/* DPBP Version */
+#define DPBP_VER_MAJOR 3
+#define DPBP_VER_MINOR 3
+
+/* Command versioning */
+#define DPBP_CMD_BASE_VERSION 1
+#define DPBP_CMD_ID_OFFSET 4
+
+#define DPBP_CMD(id) ((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
+#define DPBP_CMDID_OPEN DPBP_CMD(0x804)
+#define DPBP_CMDID_CREATE DPBP_CMD(0x904)
+#define DPBP_CMDID_DESTROY DPBP_CMD(0x984)
+#define DPBP_CMDID_GET_API_VERSION DPBP_CMD(0xa04)
+
+#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
+#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
+#define DPBP_CMDID_GET_ATTR DPBP_CMD(0x004)
+#define DPBP_CMDID_RESET DPBP_CMD(0x005)
+#define DPBP_CMDID_IS_ENABLED DPBP_CMD(0x006)
+
+#define DPBP_CMDID_SET_IRQ_ENABLE DPBP_CMD(0x012)
+#define DPBP_CMDID_GET_IRQ_ENABLE DPBP_CMD(0x013)
+#define DPBP_CMDID_SET_IRQ_MASK DPBP_CMD(0x014)
+#define DPBP_CMDID_GET_IRQ_MASK DPBP_CMD(0x015)
+#define DPBP_CMDID_GET_IRQ_STATUS DPBP_CMD(0x016)
+#define DPBP_CMDID_CLEAR_IRQ_STATUS DPBP_CMD(0x017)
+
+#define DPBP_CMDID_SET_NOTIFICATIONS DPBP_CMD(0x1b0)
+#define DPBP_CMDID_GET_NOTIFICATIONS DPBP_CMD(0x1b1)
+
+#define DPBP_CMDID_GET_FREE_BUFFERS_NUM DPBP_CMD(0x1b2)
+
+#pragma pack(push, 1)
+struct dpbp_cmd_open {
+ uint32_t dpbp_id;
+};
+
+struct dpbp_cmd_destroy {
+ uint32_t object_id;
+};
+
+#define DPBP_ENABLE 0x1
+
+struct dpbp_rsp_is_enabled {
+ uint8_t enabled;
+};
+
+struct dpbp_rsp_get_attributes {
+ uint16_t pad;
+ uint16_t bpid;
+ uint32_t id;
+};
+
+struct dpbp_cmd_set_notifications {
+ uint32_t depletion_entry;
+ uint32_t depletion_exit;
+ uint32_t surplus_entry;
+ uint32_t surplus_exit;
+ uint16_t options;
+ uint16_t pad[3];
+ uint64_t message_ctx;
+ uint64_t message_iova;
+};
+
+struct dpbp_rsp_get_notifications {
+ uint32_t depletion_entry;
+ uint32_t depletion_exit;
+ uint32_t surplus_entry;
+ uint32_t surplus_exit;
+ uint16_t options;
+ uint16_t pad[3];
+ uint64_t message_ctx;
+ uint64_t message_iova;
+};
+
+struct dpbp_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+struct dpbp_rsp_get_num_free_bufs {
+ uint32_t num_free_bufs;
+};
+
+#pragma pack(pop)
+#endif /* _FSL_DPBP_CMD_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpci.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpci.h
new file mode 100644
index 00000000..f69ed3f3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpci.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#ifndef __FSL_DPCI_H
+#define __FSL_DPCI_H
+
+/* Data Path Communication Interface API
+ * Contains initialization APIs and runtime control APIs for DPCI
+ */
+
+struct fsl_mc_io;
+
+/** General DPCI macros */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPCI object
+ */
+#define DPCI_PRIO_NUM 2
+
+/**
+ * Indicates an invalid frame queue
+ */
+#define DPCI_FQID_NOT_VALID (uint32_t)(-1)
+
+/**
+ * All queues considered; see dpci_set_rx_queue()
+ */
+#define DPCI_ALL_QUEUES (uint8_t)(-1)
+
+int dpci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpci_id,
+ uint16_t *token);
+
+int dpci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * Enable the Order Restoration support
+ */
+#define DPCI_OPT_HAS_OPR 0x000040
+
+/**
+ * Order Point Records are shared for the entire DPCI
+ */
+#define DPCI_OPT_OPR_SHARED 0x000080
+
+/**
+ * struct dpci_cfg - Structure representing DPCI configuration
+ * @options: Any combination of the following options:
+ * DPCI_OPT_HAS_OPR
+ * DPCI_OPT_OPR_SHARED
+ * @num_of_priorities: Number of receive priorities (queues) for the DPCI;
+ * note, that the number of transmit priorities (queues)
+ * is determined by the number of receive priorities of
+ * the peer DPCI object
+ */
+struct dpci_cfg {
+ uint32_t options;
+ uint8_t num_of_priorities;
+};
+
+int dpci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpci_cfg *cfg,
+ uint32_t *obj_id);
+
+int dpci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+int dpci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpci_attr - Structure representing DPCI attributes
+ * @id: DPCI object ID
+ * @num_of_priorities: Number of receive priorities
+ */
+struct dpci_attr {
+ int id;
+ uint8_t num_of_priorities;
+};
+
+int dpci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpci_attr *attr);
+
+/**
+ * enum dpci_dest - DPCI destination types
+ * @DPCI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is
+ * expected to dequeue from the queue based on polling or
+ * other user-defined method
+ * @DPCI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected
+ * to dequeue from the queue only after notification is
+ * received
+ * @DPCI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified
+ * DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpci_dest {
+ DPCI_DEST_NONE = 0,
+ DPCI_DEST_DPIO = 1,
+ DPCI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpci_dest_cfg - Structure representing DPCI destination configuration
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid
+ * values are 0-1 or 0-7, depending on the number of priorities
+ * in that channel; not relevant for 'DPCI_DEST_NONE' option
+ */
+struct dpci_dest_cfg {
+ enum dpci_dest dest_type;
+ int dest_id;
+ uint8_t priority;
+};
+
+/** DPCI queue modification options */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPCI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPCI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * struct dpci_rx_queue_cfg - Structure representing RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPCI_QUEUE_OPT_<X>' flags
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPCI_QUEUE_OPT_USER_CTX' is contained in
+ * 'options'
+ * @dest_cfg: Queue destination parameters;
+ * valid only if 'DPCI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpci_rx_queue_cfg {
+ uint32_t options;
+ uint64_t user_ctx;
+ struct dpci_dest_cfg dest_cfg;
+};
+
+int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpci_rx_queue_cfg *cfg);
+
+/**
+ * struct dpci_rx_queue_attr - Structure representing Rx queue attributes
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpci_rx_queue_attr {
+ uint64_t user_ctx;
+ struct dpci_dest_cfg dest_cfg;
+ uint32_t fqid;
+};
+
+int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_rx_queue_attr *attr);
+
+/**
+ * struct dpci_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to peer DPCI;
+ * returns 'DPCI_FQID_NOT_VALID' if a no peer is connected or if
+ * the selected priority exceeds the number of priorities of the
+ * peer DPCI object
+ */
+struct dpci_tx_queue_attr {
+ uint32_t fqid;
+};
+
+int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_tx_queue_attr *attr);
+
+int dpci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+#endif /* __FSL_DPCI_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpci_cmd.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
new file mode 100644
index 00000000..634248ac
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#ifndef _FSL_DPCI_CMD_H
+#define _FSL_DPCI_CMD_H
+
+/* DPCI Version */
+#define DPCI_VER_MAJOR 3
+#define DPCI_VER_MINOR 3
+
+#define DPCI_CMD_BASE_VERSION 1
+#define DPCI_CMD_BASE_VERSION_V2 2
+#define DPCI_CMD_ID_OFFSET 4
+
+#define DPCI_CMD_V1(id) ((id << DPCI_CMD_ID_OFFSET) | DPCI_CMD_BASE_VERSION)
+#define DPCI_CMD_V2(id) ((id << DPCI_CMD_ID_OFFSET) | DPCI_CMD_BASE_VERSION_V2)
+
+/* Command IDs */
+#define DPCI_CMDID_CLOSE DPCI_CMD_V1(0x800)
+#define DPCI_CMDID_OPEN DPCI_CMD_V1(0x807)
+#define DPCI_CMDID_CREATE DPCI_CMD_V2(0x907)
+#define DPCI_CMDID_DESTROY DPCI_CMD_V1(0x987)
+#define DPCI_CMDID_GET_API_VERSION DPCI_CMD_V1(0xa07)
+
+#define DPCI_CMDID_ENABLE DPCI_CMD_V1(0x002)
+#define DPCI_CMDID_DISABLE DPCI_CMD_V1(0x003)
+#define DPCI_CMDID_GET_ATTR DPCI_CMD_V1(0x004)
+#define DPCI_CMDID_RESET DPCI_CMD_V1(0x005)
+#define DPCI_CMDID_IS_ENABLED DPCI_CMD_V1(0x006)
+
+#define DPCI_CMDID_SET_RX_QUEUE DPCI_CMD_V1(0x0e0)
+#define DPCI_CMDID_GET_LINK_STATE DPCI_CMD_V1(0x0e1)
+#define DPCI_CMDID_GET_PEER_ATTR DPCI_CMD_V1(0x0e2)
+#define DPCI_CMDID_GET_RX_QUEUE DPCI_CMD_V1(0x0e3)
+#define DPCI_CMDID_GET_TX_QUEUE DPCI_CMD_V1(0x0e4)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPCI_MASK(field) \
+ GENMASK(DPCI_##field##_SHIFT + DPCI_##field##_SIZE - 1, \
+ DPCI_##field##_SHIFT)
+#define dpci_set_field(var, field, val) \
+ ((var) |= (((val) << DPCI_##field##_SHIFT) & DPCI_MASK(field)))
+#define dpci_get_field(var, field) \
+ (((var) & DPCI_MASK(field)) >> DPCI_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dpci_cmd_open {
+ uint32_t dpci_id;
+};
+
+struct dpci_cmd_create {
+ uint8_t num_of_priorities;
+ uint8_t pad[15];
+ uint32_t options;
+};
+
+struct dpci_cmd_destroy {
+ uint32_t dpci_id;
+};
+
+#define DPCI_ENABLE_SHIFT 0
+#define DPCI_ENABLE_SIZE 1
+
+struct dpci_rsp_is_enabled {
+ /* only the LSB bit */
+ uint8_t en;
+};
+
+struct dpci_rsp_get_attr {
+ uint32_t id;
+ uint16_t pad;
+ uint8_t num_of_priorities;
+};
+
+struct dpci_rsp_get_peer_attr {
+ uint32_t id;
+ uint32_t pad;
+ uint8_t num_of_priorities;
+};
+
+#define DPCI_UP_SHIFT 0
+#define DPCI_UP_SIZE 1
+
+struct dpci_rsp_get_link_state {
+ /* only the LSB bit */
+ uint8_t up;
+};
+
+#define DPCI_DEST_TYPE_SHIFT 0
+#define DPCI_DEST_TYPE_SIZE 4
+
+struct dpci_cmd_set_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t priority;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad;
+ uint64_t user_ctx;
+ uint32_t options;
+};
+
+struct dpci_cmd_get_queue {
+ uint8_t pad[5];
+ uint8_t priority;
+};
+
+struct dpci_rsp_get_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t pad;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad1;
+ uint64_t user_ctx;
+ uint32_t fqid;
+};
+
+struct dpci_rsp_get_tx_queue {
+ uint32_t pad;
+ uint32_t fqid;
+};
+
+struct dpci_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+#pragma pack(pop)
+#endif /* _FSL_DPCI_CMD_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpcon.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpcon.h
new file mode 100644
index 00000000..36dd5f3c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpcon.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#ifndef __FSL_DPCON_H
+#define __FSL_DPCON_H
+
+/* Data Path Concentrator API
+ * Contains initialization APIs and runtime control APIs for DPCON
+ */
+
+struct fsl_mc_io;
+
+/** General DPCON macros */
+
+/**
+ * Use it to disable notifications; see dpcon_set_notification()
+ */
+#define DPCON_INVALID_DPIO_ID (int)(-1)
+
+int dpcon_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpcon_id,
+ uint16_t *token);
+
+int dpcon_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpcon_cfg - Structure representing DPCON configuration
+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
+ */
+struct dpcon_cfg {
+ uint8_t num_priorities;
+};
+
+int dpcon_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpcon_cfg *cfg,
+ uint32_t *obj_id);
+
+int dpcon_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t obj_id);
+
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpcon_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpcon_attr - Structure representing DPCON attributes
+ * @id: DPCON object ID
+ * @qbman_ch_id: Channel ID to be used by dequeue operation
+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
+ */
+struct dpcon_attr {
+ int id;
+ uint16_t qbman_ch_id;
+ uint8_t num_priorities;
+};
+
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpcon_attr *attr);
+
+int dpcon_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+#endif /* __FSL_DPCON_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h
new file mode 100644
index 00000000..1641e320
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef _FSL_DPCON_CMD_H
+#define _FSL_DPCON_CMD_H
+
+/* DPCON Version */
+#define DPCON_VER_MAJOR 3
+#define DPCON_VER_MINOR 3
+
+
+/* Command versioning */
+#define DPCON_CMD_BASE_VERSION 1
+#define DPCON_CMD_ID_OFFSET 4
+
+#define DPCON_CMD(id) ((id << DPCON_CMD_ID_OFFSET) | DPCON_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
+#define DPCON_CMDID_OPEN DPCON_CMD(0x808)
+#define DPCON_CMDID_CREATE DPCON_CMD(0x908)
+#define DPCON_CMDID_DESTROY DPCON_CMD(0x988)
+#define DPCON_CMDID_GET_API_VERSION DPCON_CMD(0xa08)
+
+#define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
+#define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
+#define DPCON_CMDID_GET_ATTR DPCON_CMD(0x004)
+#define DPCON_CMDID_RESET DPCON_CMD(0x005)
+#define DPCON_CMDID_IS_ENABLED DPCON_CMD(0x006)
+
+#define DPCON_CMDID_SET_NOTIFICATION DPCON_CMD(0x100)
+
+#pragma pack(push, 1)
+struct dpcon_cmd_open {
+ uint32_t dpcon_id;
+};
+
+struct dpcon_cmd_create {
+ uint8_t num_priorities;
+};
+
+struct dpcon_cmd_destroy {
+ uint32_t object_id;
+};
+
+#define DPCON_ENABLE 1
+
+struct dpcon_rsp_is_enabled {
+ uint8_t enabled;
+};
+
+struct dpcon_rsp_get_attr {
+ uint32_t id;
+ uint16_t qbman_ch_id;
+ uint8_t num_priorities;
+ uint8_t pad;
+};
+
+struct dpcon_cmd_set_notification {
+ uint32_t dpio_id;
+ uint8_t priority;
+ uint8_t pad[3];
+ uint64_t user_ctx;
+};
+
+struct dpcon_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+#pragma pack(pop)
+#endif /* _FSL_DPCON_CMD_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpdmai.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpdmai.h
new file mode 100644
index 00000000..03e46ec1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpdmai.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __FSL_DPDMAI_H
+#define __FSL_DPDMAI_H
+
+struct fsl_mc_io;
+
+/* Data Path DMA Interface API
+ * Contains initialization APIs and runtime control APIs for DPDMAI
+ */
+
+/* General DPDMAI macros */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPDMAI object
+ */
+#define DPDMAI_PRIO_NUM 2
+
+/**
+ * All queues considered; see dpdmai_set_rx_queue()
+ */
+#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
+
+int dpdmai_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpdmai_id,
+ uint16_t *token);
+
+int dpdmai_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpdmai_cfg - Structure representing DPDMAI configuration
+ * @priorities: Priorities for the DMA hardware processing; valid priorities are
+ * configured with values 1-8; the entry following last valid entry
+ * should be configured with 0
+ */
+struct dpdmai_cfg {
+ uint8_t priorities[DPDMAI_PRIO_NUM];
+};
+
+int dpdmai_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpdmai_cfg *cfg,
+ uint32_t *obj_id);
+
+int dpdmai_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+int dpdmai_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpdmai_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpdmai_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpdmai_attr - Structure representing DPDMAI attributes
+ * @id: DPDMAI object ID
+ * @num_of_priorities: number of priorities
+ */
+struct dpdmai_attr {
+ int id;
+ uint8_t num_of_priorities;
+};
+
+int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpdmai_attr *attr);
+
+/**
+ * enum dpdmai_dest - DPDMAI destination types
+ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to dequeue
+ * from the queue based on polling or other user-defined method
+ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpdmai_dest {
+ DPDMAI_DEST_NONE = 0,
+ DPDMAI_DEST_DPIO = 1,
+ DPDMAI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPDMAI_DEST_NONE' option
+ */
+struct dpdmai_dest_cfg {
+ enum dpdmai_dest dest_type;
+ int dest_id;
+ uint8_t priority;
+};
+
+/* DPDMAI queue modification options */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPDMAI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
+ * @dest_cfg: Queue destination parameters;
+ * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpdmai_rx_queue_cfg {
+ uint32_t options;
+ uint64_t user_ctx;
+ struct dpdmai_dest_cfg dest_cfg;
+
+};
+
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpdmai_rx_queue_cfg *cfg);
+
+/**
+ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpdmai_rx_queue_attr {
+ uint64_t user_ctx;
+ struct dpdmai_dest_cfg dest_cfg;
+ uint32_t fqid;
+};
+
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpdmai_rx_queue_attr *attr);
+
+/**
+ * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to DMA hardware
+ */
+
+struct dpdmai_tx_queue_attr {
+ uint32_t fqid;
+};
+
+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpdmai_tx_queue_attr *attr);
+
+#endif /* __FSL_DPDMAI_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h
new file mode 100644
index 00000000..618e19ea
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _FSL_DPDMAI_CMD_H
+#define _FSL_DPDMAI_CMD_H
+
+/* DPDMAI Version */
+#define DPDMAI_VER_MAJOR 3
+#define DPDMAI_VER_MINOR 2
+
+/* Command versioning */
+#define DPDMAI_CMD_BASE_VERSION 1
+#define DPDMAI_CMD_ID_OFFSET 4
+
+#define DPDMAI_CMD(id) ((id << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPDMAI_CMDID_CLOSE DPDMAI_CMD(0x800)
+#define DPDMAI_CMDID_OPEN DPDMAI_CMD(0x80E)
+#define DPDMAI_CMDID_CREATE DPDMAI_CMD(0x90E)
+#define DPDMAI_CMDID_DESTROY DPDMAI_CMD(0x98E)
+#define DPDMAI_CMDID_GET_API_VERSION DPDMAI_CMD(0xa0E)
+
+#define DPDMAI_CMDID_ENABLE DPDMAI_CMD(0x002)
+#define DPDMAI_CMDID_DISABLE DPDMAI_CMD(0x003)
+#define DPDMAI_CMDID_GET_ATTR DPDMAI_CMD(0x004)
+#define DPDMAI_CMDID_RESET DPDMAI_CMD(0x005)
+#define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMD(0x006)
+
+#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMD(0x1A0)
+#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMD(0x1A1)
+#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMD(0x1A2)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPDMAI_MASK(field) \
+ GENMASK(DPDMAI_##field##_SHIFT + DPDMAI_##field##_SIZE - 1, \
+ DPDMAI_##field##_SHIFT)
+#define dpdmai_set_field(var, field, val) \
+ ((var) |= (((val) << DPDMAI_##field##_SHIFT) & DPDMAI_MASK(field)))
+#define dpdmai_get_field(var, field) \
+ (((var) & DPDMAI_MASK(field)) >> DPDMAI_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dpdmai_cmd_open {
+ uint32_t dpdmai_id;
+};
+
+struct dpdmai_cmd_create {
+ uint8_t pad;
+ uint8_t priorities[2];
+};
+
+struct dpdmai_cmd_destroy {
+ uint32_t dpdmai_id;
+};
+
+#define DPDMAI_ENABLE_SHIFT 0
+#define DPDMAI_ENABLE_SIZE 1
+
+struct dpdmai_rsp_is_enabled {
+ /* only the LSB bit */
+ uint8_t en;
+};
+
+struct dpdmai_rsp_get_attr {
+ uint32_t id;
+ uint8_t num_of_priorities;
+};
+
+#define DPDMAI_DEST_TYPE_SHIFT 0
+#define DPDMAI_DEST_TYPE_SIZE 4
+
+struct dpdmai_cmd_set_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t priority;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad;
+ uint64_t user_ctx;
+ uint32_t options;
+};
+
+struct dpdmai_cmd_get_queue {
+ uint8_t pad[5];
+ uint8_t priority;
+};
+
+struct dpdmai_rsp_get_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t pad1;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad2;
+ uint64_t user_ctx;
+ uint32_t fqid;
+};
+
+struct dpdmai_rsp_get_tx_queue {
+ uint64_t pad;
+ uint32_t fqid;
+};
+
+#pragma pack(pop)
+#endif /* _FSL_DPDMAI_CMD_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpio.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpio.h
new file mode 100644
index 00000000..3158f531
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpio.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef __FSL_DPIO_H
+#define __FSL_DPIO_H
+
+/* Data Path I/O Portal API
+ * Contains initialization APIs and runtime control APIs for DPIO
+ */
+
+struct fsl_mc_io;
+
+int dpio_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpio_id,
+ uint16_t *token);
+
+int dpio_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * enum dpio_channel_mode - DPIO notification channel mode
+ * @DPIO_NO_CHANNEL: No support for notification channel
+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
+ * dedicated channel in the DPIO; user should point the queue's
+ * destination in the relevant interface to this DPIO
+ */
+enum dpio_channel_mode {
+ DPIO_NO_CHANNEL = 0,
+ DPIO_LOCAL_CHANNEL = 1,
+};
+
+/**
+ * struct dpio_cfg - Structure representing DPIO configuration
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification channel (1-8);
+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
+ */
+struct dpio_cfg {
+ enum dpio_channel_mode channel_mode;
+ uint8_t num_priorities;
+};
+
+
+int dpio_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpio_cfg *cfg,
+ uint32_t *obj_id);
+
+int dpio_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+int dpio_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpio_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpio_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpio_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t sdest);
+
+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t *sdest);
+
+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id,
+ uint8_t *channel_index);
+
+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id);
+
+/**
+ * struct dpio_attr - Structure representing DPIO attributes
+ * @id: DPIO object ID
+ * @qbman_portal_ce_offset: Offset of the software portal cache-enabled area
+ * @qbman_portal_ci_offset: Offset of the software portal
+ * cache-inhibited area
+ * @qbman_portal_id: Software portal ID
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification
+ * channel (1-8); relevant only if
+ * 'channel_mode = DPIO_LOCAL_CHANNEL'
+ * @qbman_version: QBMAN version
+ */
+struct dpio_attr {
+ int id;
+ uint64_t qbman_portal_ce_offset;
+ uint64_t qbman_portal_ci_offset;
+ uint16_t qbman_portal_id;
+ enum dpio_channel_mode channel_mode;
+ uint8_t num_priorities;
+ uint32_t qbman_version;
+ uint32_t clk;
+};
+
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpio_attr *attr);
+
+int dpio_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+#endif /* __FSL_DPIO_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpio_cmd.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
new file mode 100644
index 00000000..16a9bc41
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef _FSL_DPIO_CMD_H
+#define _FSL_DPIO_CMD_H
+
+/* DPIO Version */
+#define DPIO_VER_MAJOR 4
+#define DPIO_VER_MINOR 2
+
+#define DPIO_CMD_BASE_VERSION 1
+#define DPIO_CMD_ID_OFFSET 4
+
+#define DPIO_CMD(id) (((id) << DPIO_CMD_ID_OFFSET) | DPIO_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPIO_CMDID_CLOSE DPIO_CMD(0x800)
+#define DPIO_CMDID_OPEN DPIO_CMD(0x803)
+#define DPIO_CMDID_CREATE DPIO_CMD(0x903)
+#define DPIO_CMDID_DESTROY DPIO_CMD(0x983)
+#define DPIO_CMDID_GET_API_VERSION DPIO_CMD(0xa03)
+
+#define DPIO_CMDID_ENABLE DPIO_CMD(0x002)
+#define DPIO_CMDID_DISABLE DPIO_CMD(0x003)
+#define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004)
+#define DPIO_CMDID_RESET DPIO_CMD(0x005)
+#define DPIO_CMDID_IS_ENABLED DPIO_CMD(0x006)
+
+#define DPIO_CMDID_SET_IRQ_ENABLE DPIO_CMD(0x012)
+#define DPIO_CMDID_GET_IRQ_ENABLE DPIO_CMD(0x013)
+#define DPIO_CMDID_SET_IRQ_MASK DPIO_CMD(0x014)
+#define DPIO_CMDID_GET_IRQ_MASK DPIO_CMD(0x015)
+#define DPIO_CMDID_GET_IRQ_STATUS DPIO_CMD(0x016)
+#define DPIO_CMDID_CLEAR_IRQ_STATUS DPIO_CMD(0x017)
+
+#define DPIO_CMDID_SET_STASHING_DEST DPIO_CMD(0x120)
+#define DPIO_CMDID_GET_STASHING_DEST DPIO_CMD(0x121)
+#define DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL DPIO_CMD(0x122)
+#define DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL DPIO_CMD(0x123)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPIO_MASK(field) \
+ GENMASK(DPIO_##field##_SHIFT + DPIO_##field##_SIZE - 1, \
+ DPIO_##field##_SHIFT)
+#define dpio_set_field(var, field, val) \
+ ((var) |= (((val) << DPIO_##field##_SHIFT) & DPIO_MASK(field)))
+#define dpio_get_field(var, field) \
+ (((var) & DPIO_MASK(field)) >> DPIO_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dpio_cmd_open {
+ uint32_t dpio_id;
+};
+
+#define DPIO_CHANNEL_MODE_SHIFT 0
+#define DPIO_CHANNEL_MODE_SIZE 2
+
+struct dpio_cmd_create {
+ uint16_t pad1;
+ /* from LSB: channel_mode:2 */
+ uint8_t channel_mode;
+ uint8_t pad2;
+ uint8_t num_priorities;
+};
+
+struct dpio_cmd_destroy {
+ uint32_t dpio_id;
+};
+
+#define DPIO_ENABLE_SHIFT 0
+#define DPIO_ENABLE_SIZE 1
+
+struct dpio_rsp_is_enabled {
+ /* only the LSB */
+ uint8_t en;
+};
+
+#define DPIO_ATTR_CHANNEL_MODE_SHIFT 0
+#define DPIO_ATTR_CHANNEL_MODE_SIZE 4
+
+struct dpio_rsp_get_attr {
+ uint32_t id;
+ uint16_t qbman_portal_id;
+ uint8_t num_priorities;
+ /* from LSB: channel_mode:4 */
+ uint8_t channel_mode;
+ uint64_t qbman_portal_ce_offset;
+ uint64_t qbman_portal_ci_offset;
+ uint32_t qbman_version;
+ uint32_t pad;
+ uint32_t clk;
+};
+
+struct dpio_stashing_dest {
+ uint8_t sdest;
+};
+
+struct dpio_cmd_static_dequeue_channel {
+ uint32_t dpcon_id;
+};
+
+struct dpio_rsp_add_static_dequeue_channel {
+ uint8_t channel_index;
+};
+
+struct dpio_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+#pragma pack(pop)
+#endif /* _FSL_DPIO_CMD_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpmng.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpmng.h
new file mode 100644
index 00000000..afaf9b71
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpmng.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+#ifndef __FSL_DPMNG_H
+#define __FSL_DPMNG_H
+
+/*
+ * Management Complex General API
+ * Contains general API for the Management Complex firmware
+ */
+
+struct fsl_mc_io;
+
+/**
+ * Management Complex firmware version information
+ */
+#define MC_VER_MAJOR 10
+#define MC_VER_MINOR 3
+
+/**
+ * struct mc_version
+ * @major: Major version number: incremented on API compatibility changes
+ * @minor: Minor version number: incremented on API additions (that are
+ * backward compatible); reset when major version is incremented
+ * @revision: Internal revision number: incremented on implementation changes
+ * and/or bug fixes that have no impact on API
+ */
+struct mc_version {
+ uint32_t major;
+ uint32_t minor;
+ uint32_t revision;
+};
+
+int mc_get_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_version *mc_ver_info);
+
+/**
+ * struct mc_platform
+ * @svr: System version (content of platform SVR register)
+ * @pvr: Processor version (content of platform PVR register)
+ */
+struct mc_soc_version {
+ uint32_t svr;
+ uint32_t pvr;
+};
+
+int mc_get_soc_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_soc_version *mc_platform_info);
+#endif /* __FSL_DPMNG_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h
new file mode 100644
index 00000000..ac380be1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __FSL_DPMNG_CMD_H
+#define __FSL_DPMNG_CMD_H
+
+/* Command versioning */
+#define DPMNG_CMD_BASE_VERSION 1
+#define DPMNG_CMD_ID_OFFSET 4
+
+#define DPMNG_CMD(id) ((id << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
+#define DPMNG_CMDID_GET_SOC_VERSION DPMNG_CMD(0x832)
+
+#pragma pack(push, 1)
+struct dpmng_rsp_get_version {
+ uint32_t revision;
+ uint32_t version_major;
+ uint32_t version_minor;
+};
+
+struct dpmng_rsp_get_soc_version {
+ uint32_t svr;
+ uint32_t pvr;
+};
+
+#pragma pack(pop)
+
+#endif /* __FSL_DPMNG_CMD_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_mc_cmd.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_mc_cmd.h
new file mode 100644
index 00000000..ac919610
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_mc_cmd.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef __FSL_MC_CMD_H
+#define __FSL_MC_CMD_H
+
+#include <rte_byteorder.h>
+#include <stdint.h>
+
+#define MC_CMD_NUM_OF_PARAMS 7
+
+#define phys_addr_t uint64_t
+
+#define u64 uint64_t
+#define u32 uint32_t
+#define u16 uint16_t
+#define u8 uint8_t
+
+#define cpu_to_le64 rte_cpu_to_le_64
+#define cpu_to_le32 rte_cpu_to_le_32
+#define cpu_to_le16 rte_cpu_to_le_16
+
+#define le64_to_cpu rte_le_to_cpu_64
+#define le32_to_cpu rte_le_to_cpu_32
+#define le16_to_cpu rte_le_to_cpu_16
+
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+#define GENMASK(h, l) \
+ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+struct mc_cmd_header {
+ union {
+ struct {
+ uint8_t src_id;
+ uint8_t flags_hw;
+ uint8_t status;
+ uint8_t flags_sw;
+ uint16_t token;
+ uint16_t cmd_id;
+ };
+ uint32_t word[2];
+ };
+};
+
+struct mc_command {
+ uint64_t header;
+ uint64_t params[MC_CMD_NUM_OF_PARAMS];
+};
+
+struct mc_rsp_create {
+ uint32_t object_id;
+};
+
+enum mc_cmd_status {
+ MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
+ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
+ MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */
+ MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */
+ MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */
+ MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */
+ MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */
+ MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */
+ MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */
+ MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */
+ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */
+ MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */
+};
+
+/*
+ * MC command flags
+ */
+
+/* High priority flag */
+#define MC_CMD_FLAG_PRI 0x80
+/* Command completion flag */
+#define MC_CMD_FLAG_INTR_DIS 0x01
+
+#define MC_CMD_HDR_FLAGS_MASK 0xFF00FF00
+
+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd);
+
+static inline uint64_t mc_encode_cmd_header(uint16_t cmd_id,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ uint64_t header = 0;
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
+
+ hdr->cmd_id = cpu_to_le16(cmd_id);
+ hdr->token = cpu_to_le16(token);
+ hdr->status = MC_CMD_STATUS_READY;
+ hdr->word[0] |= cpu_to_le32(cmd_flags & MC_CMD_HDR_FLAGS_MASK);
+
+ return header;
+}
+
+static inline uint16_t mc_cmd_hdr_read_token(struct mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ uint16_t token = le16_to_cpu(hdr->token);
+
+ return token;
+}
+
+static inline uint32_t mc_cmd_read_object_id(struct mc_command *cmd)
+{
+ struct mc_rsp_create *rsp_params;
+
+ rsp_params = (struct mc_rsp_create *)cmd->params;
+ return le32_to_cpu(rsp_params->object_id);
+}
+
+static inline enum mc_cmd_status mc_cmd_read_status(struct mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ uint8_t status = hdr->status;
+
+ return (enum mc_cmd_status)status;
+}
+
+/**
+ * mc_write_command - writes a command to a Management Complex (MC) portal
+ *
+ * @portal: pointer to an MC portal
+ * @cmd: pointer to a filled command
+ */
+static inline void mc_write_command(struct mc_command __iomem *portal,
+ struct mc_command *cmd)
+{
+ struct mc_cmd_header *cmd_header = (struct mc_cmd_header *)&cmd->header;
+ char *header = (char *)&portal->header;
+ int i;
+
+ /* copy command parameters into the portal */
+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
+ iowrite64(cmd->params[i], &portal->params[i]);
+
+ /* submit the command by writing the header */
+ iowrite32(le32_to_cpu(cmd_header->word[1]), (((uint32_t *)header) + 1));
+ iowrite32(le32_to_cpu(cmd_header->word[0]), (uint32_t *)header);
+}
+
+/**
+ * mc_read_response - reads the response for the last MC command from a
+ * Management Complex (MC) portal
+ *
+ * @portal: pointer to an MC portal
+ * @resp: pointer to command response buffer
+ *
+ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise.
+ */
+static inline enum mc_cmd_status mc_read_response(
+ struct mc_command __iomem *portal,
+ struct mc_command *resp)
+{
+ int i;
+ enum mc_cmd_status status;
+
+ /* Copy command response header from MC portal: */
+ resp->header = ioread64(&portal->header);
+ status = mc_cmd_read_status(resp);
+ if (status != MC_CMD_STATUS_OK)
+ return status;
+
+ /* Copy command response data from MC portal: */
+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
+ resp->params[i] = ioread64(&portal->params[i]);
+
+ return status;
+}
+
+#endif /* __FSL_MC_CMD_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_mc_sys.h b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_mc_sys.h
new file mode 100644
index 00000000..d0c7b39f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/fsl_mc_sys.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+#ifndef _FSL_MC_SYS_H
+#define _FSL_MC_SYS_H
+
+#ifdef __linux_driver__
+
+#include <linux/errno.h>
+#include <asm/io.h>
+#include <linux/slab.h>
+
+struct fsl_mc_io {
+ void *regs;
+};
+
+#ifndef ENOTSUP
+#define ENOTSUP 95
+#endif
+
+#define ioread64(_p) readq(_p)
+#define iowrite64(_v, _p) writeq(_v, _p)
+
+#else /* __linux_driver__ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <errno.h>
+#include <sys/uio.h>
+#include <linux/byteorder/little_endian.h>
+
+#ifndef dmb
+#define dmb() {__asm__ __volatile__("" : : : "memory"); }
+#endif
+#define __iormb() dmb()
+#define __iowmb() dmb()
+#define __arch_getq(a) (*(volatile uint64_t *)(a))
+#define __arch_putq(v, a) (*(volatile uint64_t *)(a) = (v))
+#define __arch_putq32(v, a) (*(volatile uint32_t *)(a) = (v))
+#define readq(c) \
+ ({ uint64_t __v = __arch_getq(c); __iormb(); __v; })
+#define writeq(v, c) \
+ ({ uint64_t __v = v; __iowmb(); __arch_putq(__v, c); __v; })
+#define writeq32(v, c) \
+ ({ uint32_t __v = v; __iowmb(); __arch_putq32(__v, c); __v; })
+#define ioread64(_p) readq(_p)
+#define iowrite64(_v, _p) writeq(_v, _p)
+#define iowrite32(_v, _p) writeq32(_v, _p)
+#define __iomem
+
+/*GPP is supposed to use MC commands with low priority*/
+#define CMD_PRI_LOW 0 /*!< Low Priority command indication */
+
+struct fsl_mc_io {
+ void *regs;
+};
+
+#endif /* __linux_driver__ */
+
+#endif /* _FSL_MC_SYS_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/mc/mc_sys.c b/src/spdk/dpdk/drivers/bus/fslmc/mc/mc_sys.c
new file mode 100644
index 00000000..efafdc31
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/mc/mc_sys.c
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+
+#include <rte_spinlock.h>
+
+/** User space framework uses MC Portal in shared mode. Following change
+ * introduces lock in MC FLIB
+ */
+
+/**
+ * A static spinlock initializer.
+ */
+static rte_spinlock_t mc_portal_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int mc_status_to_error(enum mc_cmd_status status)
+{
+ switch (status) {
+ case MC_CMD_STATUS_OK:
+ return 0;
+ case MC_CMD_STATUS_AUTH_ERR:
+ return -EACCES; /* Token error */
+ case MC_CMD_STATUS_NO_PRIVILEGE:
+ return -EPERM; /* Permission denied */
+ case MC_CMD_STATUS_DMA_ERR:
+ return -EIO; /* Input/Output error */
+ case MC_CMD_STATUS_CONFIG_ERR:
+ return -EINVAL; /* Device not configured */
+ case MC_CMD_STATUS_TIMEOUT:
+ return -ETIMEDOUT; /* Operation timed out */
+ case MC_CMD_STATUS_NO_RESOURCE:
+ return -ENAVAIL; /* Resource temporarily unavailable */
+ case MC_CMD_STATUS_NO_MEMORY:
+ return -ENOMEM; /* Cannot allocate memory */
+ case MC_CMD_STATUS_BUSY:
+ return -EBUSY; /* Device busy */
+ case MC_CMD_STATUS_UNSUPPORTED_OP:
+ return -ENOTSUP; /* Operation not supported by device */
+ case MC_CMD_STATUS_INVALID_STATE:
+ return -ENODEV; /* Invalid device state */
+ default:
+ break;
+ }
+
+ /* Not expected to reach here */
+ return -EINVAL;
+}
+
+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
+{
+ enum mc_cmd_status status;
+ uint64_t response;
+
+ if (!mc_io || !mc_io->regs)
+ return -EACCES;
+
+ /* --- Call lock function here in case portal is shared --- */
+ rte_spinlock_lock(&mc_portal_lock);
+
+ mc_write_command(mc_io->regs, cmd);
+
+ /* Spin until status changes */
+ do {
+ response = ioread64(mc_io->regs);
+ status = mc_cmd_read_status((struct mc_command *)&response);
+
+ /* --- Call wait function here to prevent blocking ---
+ * Change the loop condition accordingly to exit on timeout.
+ */
+ } while (status == MC_CMD_STATUS_READY);
+
+ /* Read the response back into the command buffer */
+ mc_read_response(mc_io->regs, cmd);
+
+ /* --- Call unlock function here in case portal is shared --- */
+ rte_spinlock_unlock(&mc_portal_lock);
+
+ return mc_status_to_error(status);
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/meson.build b/src/spdk/dpdk/drivers/bus/fslmc/meson.build
new file mode 100644
index 00000000..22a56a6f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['eventdev', 'kvargs']
+sources = files('fslmc_bus.c',
+ 'fslmc_vfio.c',
+ 'mc/dpbp.c',
+ 'mc/dpci.c',
+ 'mc/dpcon.c',
+ 'mc/dpdmai.c',
+ 'mc/dpio.c',
+ 'mc/dpmng.c',
+ 'mc/mc_sys.c',
+ 'portal/dpaa2_hw_dpbp.c',
+ 'portal/dpaa2_hw_dpci.c',
+ 'portal/dpaa2_hw_dpio.c',
+ 'qbman/qbman_portal.c',
+ 'qbman/qbman_debug.c')
+
+allow_experimental_apis = true
+
+includes += include_directories('mc', 'qbman/include', 'portal')
+cflags += ['-D_GNU_SOURCE']
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
new file mode 100644
index 00000000..39c5adf9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev_driver.h>
+#include <rte_mbuf_pool_ops.h>
+
+#include <fslmc_logs.h>
+#include <rte_fslmc.h>
+#include <mc/fsl_dpbp.h>
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
+TAILQ_HEAD(dpbp_dev_list, dpaa2_dpbp_dev);
+static struct dpbp_dev_list dpbp_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpbp_dev_list); /*!< DPBP device list */
+
+static int
+dpaa2_create_dpbp_device(int vdev_fd __rte_unused,
+ struct vfio_device_info *obj_info __rte_unused,
+ int dpbp_id)
+{
+ struct dpaa2_dpbp_dev *dpbp_node;
+ int ret;
+ static int register_once;
+
+ /* Allocate DPAA2 dpbp handle */
+ dpbp_node = rte_malloc(NULL, sizeof(struct dpaa2_dpbp_dev), 0);
+ if (!dpbp_node) {
+ DPAA2_BUS_ERR("Memory allocation failed for DPBP Device");
+ return -1;
+ }
+
+ /* Open the dpbp object */
+ dpbp_node->dpbp.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+ ret = dpbp_open(&dpbp_node->dpbp,
+ CMD_PRI_LOW, dpbp_id, &dpbp_node->token);
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to open buffer pool object: err(%d)",
+ ret);
+ rte_free(dpbp_node);
+ return -1;
+ }
+
+ /* Clean the device first */
+ ret = dpbp_reset(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to reset buffer pool device. err(%d)",
+ ret);
+ dpbp_close(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
+ rte_free(dpbp_node);
+ return -1;
+ }
+
+ dpbp_node->dpbp_id = dpbp_id;
+ rte_atomic16_init(&dpbp_node->in_use);
+
+ TAILQ_INSERT_TAIL(&dpbp_dev_list, dpbp_node, next);
+
+ if (!register_once) {
+ rte_mbuf_set_platform_mempool_ops(DPAA2_MEMPOOL_OPS_NAME);
+ register_once = 1;
+ }
+
+ return 0;
+}
+
+struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void)
+{
+ struct dpaa2_dpbp_dev *dpbp_dev = NULL;
+
+ /* Get DPBP dev handle from list using index */
+ TAILQ_FOREACH(dpbp_dev, &dpbp_dev_list, next) {
+ if (dpbp_dev && rte_atomic16_test_and_set(&dpbp_dev->in_use))
+ break;
+ }
+
+ return dpbp_dev;
+}
+
+void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp)
+{
+ struct dpaa2_dpbp_dev *dpbp_dev = NULL;
+
+ /* Match DPBP handle and mark it free */
+ TAILQ_FOREACH(dpbp_dev, &dpbp_dev_list, next) {
+ if (dpbp_dev == dpbp) {
+ rte_atomic16_dec(&dpbp_dev->in_use);
+ return;
+ }
+ }
+}
+
+int dpaa2_dpbp_supported(void)
+{
+ if (TAILQ_EMPTY(&dpbp_dev_list))
+ return -1;
+ return 0;
+}
+
+static struct rte_dpaa2_object rte_dpaa2_dpbp_obj = {
+ .dev_type = DPAA2_BPOOL,
+ .create = dpaa2_create_dpbp_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpbp, rte_dpaa2_dpbp_obj);
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
new file mode 100644
index 00000000..5ad0374d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev_driver.h>
+
+#include <fslmc_logs.h>
+#include <rte_fslmc.h>
+#include <mc/fsl_dpci.h>
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
+TAILQ_HEAD(dpci_dev_list, dpaa2_dpci_dev);
+static struct dpci_dev_list dpci_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpci_dev_list); /*!< DPCI device list */
+
+static int
+rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
+ struct vfio_device_info *obj_info __rte_unused,
+ int dpci_id)
+{
+ struct dpaa2_dpci_dev *dpci_node;
+ struct dpci_attr attr;
+ struct dpci_rx_queue_cfg rx_queue_cfg;
+ struct dpci_rx_queue_attr rx_attr;
+ struct dpci_tx_queue_attr tx_attr;
+ int ret, i;
+
+ /* Allocate DPAA2 dpci handle */
+ dpci_node = rte_malloc(NULL, sizeof(struct dpaa2_dpci_dev), 0);
+ if (!dpci_node) {
+ DPAA2_BUS_ERR("Memory allocation failed for DPCI Device");
+ return -ENOMEM;
+ }
+
+ /* Open the dpci object */
+ dpci_node->dpci.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+ ret = dpci_open(&dpci_node->dpci,
+ CMD_PRI_LOW, dpci_id, &dpci_node->token);
+ if (ret) {
+ DPAA2_BUS_ERR("Resource alloc failure with err code: %d", ret);
+ goto err;
+ }
+
+ /* Get the device attributes */
+ ret = dpci_get_attributes(&dpci_node->dpci,
+ CMD_PRI_LOW, dpci_node->token, &attr);
+ if (ret != 0) {
+ DPAA2_BUS_ERR("Reading device failed with err code: %d", ret);
+ goto err;
+ }
+
+ for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
+ struct dpaa2_queue *rxq;
+
+ memset(&rx_queue_cfg, 0, sizeof(struct dpci_rx_queue_cfg));
+ ret = dpci_set_rx_queue(&dpci_node->dpci,
+ CMD_PRI_LOW,
+ dpci_node->token,
+ i, &rx_queue_cfg);
+ if (ret) {
+ DPAA2_BUS_ERR("Setting Rx queue failed with err code: %d",
+ ret);
+ goto err;
+ }
+
+ /* Allocate DQ storage for the DPCI Rx queues */
+ rxq = &(dpci_node->rx_queue[i]);
+ rxq->q_storage = rte_malloc("dq_storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->q_storage) {
+ DPAA2_BUS_ERR("q_storage allocation failed\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+ ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+ if (ret) {
+ DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed\n");
+ goto err;
+ }
+ }
+
+ /* Enable the device */
+ ret = dpci_enable(&dpci_node->dpci,
+ CMD_PRI_LOW, dpci_node->token);
+ if (ret != 0) {
+ DPAA2_BUS_ERR("Enabling device failed with err code: %d", ret);
+ goto err;
+ }
+
+ for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
+ /* Get the Rx FQID's */
+ ret = dpci_get_rx_queue(&dpci_node->dpci,
+ CMD_PRI_LOW,
+ dpci_node->token, i,
+ &rx_attr);
+ if (ret != 0) {
+ DPAA2_BUS_ERR("Rx queue fetch failed with err code: %d",
+ ret);
+ goto err;
+ }
+ dpci_node->rx_queue[i].fqid = rx_attr.fqid;
+
+ ret = dpci_get_tx_queue(&dpci_node->dpci,
+ CMD_PRI_LOW,
+ dpci_node->token, i,
+ &tx_attr);
+ if (ret != 0) {
+ DPAA2_BUS_ERR("Reading device failed with err code: %d",
+ ret);
+ goto err;
+ }
+ dpci_node->tx_queue[i].fqid = tx_attr.fqid;
+ }
+
+ dpci_node->dpci_id = dpci_id;
+ rte_atomic16_init(&dpci_node->in_use);
+
+ TAILQ_INSERT_TAIL(&dpci_dev_list, dpci_node, next);
+
+ return 0;
+
+err:
+ for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
+ struct dpaa2_queue *rxq = &(dpci_node->rx_queue[i]);
+
+ if (rxq->q_storage) {
+ dpaa2_free_dq_storage(rxq->q_storage);
+ rte_free(rxq->q_storage);
+ }
+ }
+ rte_free(dpci_node);
+
+ return ret;
+}
+
+struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void)
+{
+ struct dpaa2_dpci_dev *dpci_dev = NULL;
+
+ /* Get DPCI dev handle from list using index */
+ TAILQ_FOREACH(dpci_dev, &dpci_dev_list, next) {
+ if (dpci_dev && rte_atomic16_test_and_set(&dpci_dev->in_use))
+ break;
+ }
+
+ return dpci_dev;
+}
+
+void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev *dpci)
+{
+ struct dpaa2_dpci_dev *dpci_dev = NULL;
+
+ /* Match DPCI handle and mark it free */
+ TAILQ_FOREACH(dpci_dev, &dpci_dev_list, next) {
+ if (dpci_dev == dpci) {
+ rte_atomic16_dec(&dpci_dev->in_use);
+ return;
+ }
+ }
+}
+
+static struct rte_dpaa2_object rte_dpaa2_dpci_obj = {
+ .dev_type = DPAA2_CI,
+ .create = rte_dpaa2_create_dpci_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpci, rte_dpaa2_dpci_obj);
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
new file mode 100644
index 00000000..99f70be1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -0,0 +1,527 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/epoll.h>
+#include<sys/eventfd.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+
+#include <fslmc_logs.h>
+#include <rte_fslmc.h>
+#include "dpaa2_hw_pvt.h"
+#include "dpaa2_hw_dpio.h"
+#include <mc/fsl_dpmng.h>
+
+#define NUM_HOST_CPUS RTE_MAX_LCORE
+
+struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
+RTE_DEFINE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
+
+struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
+
+TAILQ_HEAD(dpio_dev_list, dpaa2_dpio_dev);
+static struct dpio_dev_list dpio_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpio_dev_list); /*!< DPIO device list */
+static uint32_t io_space_count;
+
+/* Variable to store DPAA2 platform type */
+uint32_t dpaa2_svr_family;
+
+/*Stashing Macros default for LS208x*/
+static int dpaa2_core_cluster_base = 0x04;
+static int dpaa2_cluster_sz = 2;
+
+/* For LS208X platform There are four clusters with following mapping:
+ * Cluster 1 (ID = x04) : CPU0, CPU1;
+ * Cluster 2 (ID = x05) : CPU2, CPU3;
+ * Cluster 3 (ID = x06) : CPU4, CPU5;
+ * Cluster 4 (ID = x07) : CPU6, CPU7;
+ */
+/* For LS108X platform There are two clusters with following mapping:
+ * Cluster 1 (ID = x02) : CPU0, CPU1, CPU2, CPU3;
+ * Cluster 2 (ID = x03) : CPU4, CPU5, CPU6, CPU7;
+ */
+/* For LX2160 platform There are four clusters with following mapping:
+ * Cluster 1 (ID = x00) : CPU0, CPU1;
+ * Cluster 2 (ID = x01) : CPU2, CPU3;
+ * Cluster 3 (ID = x02) : CPU4, CPU5;
+ * Cluster 4 (ID = x03) : CPU6, CPU7;
+ * Cluster 1 (ID = x04) : CPU8, CPU9;
+ * Cluster 2 (ID = x05) : CPU10, CP11;
+ * Cluster 3 (ID = x06) : CPU12, CPU13;
+ * Cluster 4 (ID = x07) : CPU14, CPU15;
+ */
+
+static int
+dpaa2_core_cluster_sdest(int cpu_id)
+{
+ int x = cpu_id / dpaa2_cluster_sz;
+
+ return dpaa2_core_cluster_base + x;
+}
+
+#ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
+static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id)
+{
+#define STRING_LEN 28
+#define COMMAND_LEN 50
+ uint32_t cpu_mask = 1;
+ int ret;
+ size_t len = 0;
+ char *temp = NULL, *token = NULL;
+ char string[STRING_LEN], command[COMMAND_LEN];
+ FILE *file;
+
+ snprintf(string, STRING_LEN, "dpio.%d", dpio_id);
+ file = fopen("/proc/interrupts", "r");
+ if (!file) {
+ DPAA2_BUS_WARN("Failed to open /proc/interrupts file");
+ return;
+ }
+ while (getline(&temp, &len, file) != -1) {
+ if ((strstr(temp, string)) != NULL) {
+ token = strtok(temp, ":");
+ break;
+ }
+ }
+
+ if (!token) {
+ DPAA2_BUS_WARN("Failed to get interrupt id for dpio.%d",
+ dpio_id);
+ if (temp)
+ free(temp);
+ fclose(file);
+ return;
+ }
+
+ cpu_mask = cpu_mask << rte_lcore_id();
+ snprintf(command, COMMAND_LEN, "echo %X > /proc/irq/%s/smp_affinity",
+ cpu_mask, token);
+ ret = system(command);
+ if (ret < 0)
+ DPAA2_BUS_WARN(
+ "Failed to affine interrupts on respective core");
+ else
+ DPAA2_BUS_DEBUG(" %s command is executed", command);
+
+ free(temp);
+ fclose(file);
+}
+
+static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
+{
+ struct epoll_event epoll_ev;
+ int eventfd, dpio_epoll_fd, ret;
+ int threshold = 0x3, timeout = 0xFF;
+
+ dpio_epoll_fd = epoll_create(1);
+ ret = rte_dpaa2_intr_enable(&dpio_dev->intr_handle, 0);
+ if (ret) {
+ DPAA2_BUS_ERR("Interrupt registeration failed");
+ return -1;
+ }
+
+ if (getenv("DPAA2_PORTAL_INTR_THRESHOLD"))
+ threshold = atoi(getenv("DPAA2_PORTAL_INTR_THRESHOLD"));
+
+ if (getenv("DPAA2_PORTAL_INTR_TIMEOUT"))
+ sscanf(getenv("DPAA2_PORTAL_INTR_TIMEOUT"), "%x", &timeout);
+
+ qbman_swp_interrupt_set_trigger(dpio_dev->sw_portal,
+ QBMAN_SWP_INTERRUPT_DQRI);
+ qbman_swp_interrupt_clear_status(dpio_dev->sw_portal, 0xffffffff);
+ qbman_swp_interrupt_set_inhibit(dpio_dev->sw_portal, 0);
+ qbman_swp_dqrr_thrshld_write(dpio_dev->sw_portal, threshold);
+ qbman_swp_intr_timeout_write(dpio_dev->sw_portal, timeout);
+
+ eventfd = dpio_dev->intr_handle.fd;
+ epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET;
+ epoll_ev.data.fd = eventfd;
+
+ ret = epoll_ctl(dpio_epoll_fd, EPOLL_CTL_ADD, eventfd, &epoll_ev);
+ if (ret < 0) {
+ DPAA2_BUS_ERR("epoll_ctl failed");
+ return -1;
+ }
+ dpio_dev->epoll_fd = dpio_epoll_fd;
+
+ dpaa2_affine_dpio_intr_to_respective_core(dpio_dev->hw_id);
+
+ return 0;
+}
+#endif
+
+static int
+configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
+{
+ struct qbman_swp_desc p_des;
+ struct dpio_attr attr;
+
+ dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io));
+ if (!dpio_dev->dpio) {
+ DPAA2_BUS_ERR("Memory allocation failure");
+ return -1;
+ }
+
+ dpio_dev->dpio->regs = dpio_dev->mc_portal;
+ if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
+ &dpio_dev->token)) {
+ DPAA2_BUS_ERR("Failed to allocate IO space");
+ free(dpio_dev->dpio);
+ return -1;
+ }
+
+ if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
+ DPAA2_BUS_ERR("Failed to reset dpio");
+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ free(dpio_dev->dpio);
+ return -1;
+ }
+
+ if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
+ DPAA2_BUS_ERR("Failed to Enable dpio");
+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ free(dpio_dev->dpio);
+ return -1;
+ }
+
+ if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW,
+ dpio_dev->token, &attr)) {
+ DPAA2_BUS_ERR("DPIO Get attribute failed");
+ dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ free(dpio_dev->dpio);
+ return -1;
+ }
+
+ /* Configure & setup SW portal */
+ p_des.block = NULL;
+ p_des.idx = attr.qbman_portal_id;
+ p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr);
+ p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr);
+ p_des.irq = -1;
+ p_des.qman_version = attr.qbman_version;
+
+ dpio_dev->sw_portal = qbman_swp_init(&p_des);
+ if (dpio_dev->sw_portal == NULL) {
+ DPAA2_BUS_ERR("QBMan SW Portal Init failed");
+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ free(dpio_dev->dpio);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
+{
+ int sdest, ret;
+
+ /* Set the Stashing Destination */
+ if (cpu_id < 0) {
+ cpu_id = rte_get_master_lcore();
+ if (cpu_id < 0) {
+ DPAA2_BUS_ERR("Getting CPU Index failed");
+ return -1;
+ }
+ }
+ /* Set the STASH Destination depending on Current CPU ID.
+ * Valid values of SDEST are 4,5,6,7. Where,
+ */
+
+ sdest = dpaa2_core_cluster_sdest(cpu_id);
+ DPAA2_BUS_DEBUG("Portal= %d CPU= %u SDEST= %d",
+ dpio_dev->index, cpu_id, sdest);
+
+ ret = dpio_set_stashing_destination(dpio_dev->dpio, CMD_PRI_LOW,
+ dpio_dev->token, sdest);
+ if (ret) {
+ DPAA2_BUS_ERR("%d ERROR in SDEST", ret);
+ return -1;
+ }
+
+#ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
+ if (dpaa2_dpio_intr_init(dpio_dev)) {
+ DPAA2_BUS_ERR("Interrupt registration failed for dpio");
+ return -1;
+ }
+#endif
+
+ return 0;
+}
+
+struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id)
+{
+ struct dpaa2_dpio_dev *dpio_dev = NULL;
+ int ret;
+
+ /* Get DPIO dev handle from list using index */
+ TAILQ_FOREACH(dpio_dev, &dpio_dev_list, next) {
+ if (dpio_dev && rte_atomic16_test_and_set(&dpio_dev->ref_count))
+ break;
+ }
+ if (!dpio_dev)
+ return NULL;
+
+ DPAA2_BUS_DEBUG("New Portal %p (%d) affined thread - %lu",
+ dpio_dev, dpio_dev->index, syscall(SYS_gettid));
+
+ ret = dpaa2_configure_stashing(dpio_dev, cpu_id);
+ if (ret)
+ DPAA2_BUS_ERR("dpaa2_configure_stashing failed");
+
+ return dpio_dev;
+}
+
+int
+dpaa2_affine_qbman_swp(void)
+{
+ unsigned int lcore_id = rte_lcore_id();
+ uint64_t tid = syscall(SYS_gettid);
+
+ if (lcore_id == LCORE_ID_ANY)
+ lcore_id = rte_get_master_lcore();
+ /* if the core id is not supported */
+ else if (lcore_id >= RTE_MAX_LCORE)
+ return -1;
+
+ if (dpaa2_io_portal[lcore_id].dpio_dev) {
+ DPAA2_BUS_DP_INFO("DPAA Portal=%p (%d) is being shared"
+ " between thread %" PRIu64 " and current "
+ "%" PRIu64 "\n",
+ dpaa2_io_portal[lcore_id].dpio_dev,
+ dpaa2_io_portal[lcore_id].dpio_dev->index,
+ dpaa2_io_portal[lcore_id].net_tid,
+ tid);
+ RTE_PER_LCORE(_dpaa2_io).dpio_dev
+ = dpaa2_io_portal[lcore_id].dpio_dev;
+ rte_atomic16_inc(&dpaa2_io_portal
+ [lcore_id].dpio_dev->ref_count);
+ dpaa2_io_portal[lcore_id].net_tid = tid;
+
+ DPAA2_BUS_DP_DEBUG("Old Portal=%p (%d) affined thread - "
+ "%" PRIu64 "\n",
+ dpaa2_io_portal[lcore_id].dpio_dev,
+ dpaa2_io_portal[lcore_id].dpio_dev->index,
+ tid);
+ return 0;
+ }
+
+ /* Populate the dpaa2_io_portal structure */
+ dpaa2_io_portal[lcore_id].dpio_dev = dpaa2_get_qbman_swp(lcore_id);
+
+ if (dpaa2_io_portal[lcore_id].dpio_dev) {
+ RTE_PER_LCORE(_dpaa2_io).dpio_dev
+ = dpaa2_io_portal[lcore_id].dpio_dev;
+ dpaa2_io_portal[lcore_id].net_tid = tid;
+
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+int
+dpaa2_affine_qbman_ethrx_swp(void)
+{
+ unsigned int lcore_id = rte_lcore_id();
+ uint64_t tid = syscall(SYS_gettid);
+
+ if (lcore_id == LCORE_ID_ANY)
+ lcore_id = rte_get_master_lcore();
+ /* if the core id is not supported */
+ else if (lcore_id >= RTE_MAX_LCORE)
+ return -1;
+
+ if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) {
+ DPAA2_BUS_DP_INFO(
+ "DPAA Portal=%p (%d) is being shared between thread"
+ " %" PRIu64 " and current %" PRIu64 "\n",
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev,
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index,
+ dpaa2_io_portal[lcore_id].sec_tid,
+ tid);
+ RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev
+ = dpaa2_io_portal[lcore_id].ethrx_dpio_dev;
+ rte_atomic16_inc(&dpaa2_io_portal
+ [lcore_id].ethrx_dpio_dev->ref_count);
+ dpaa2_io_portal[lcore_id].sec_tid = tid;
+
+ DPAA2_BUS_DP_DEBUG(
+ "Old Portal=%p (%d) affined thread"
+ " - %" PRIu64 "\n",
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev,
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index,
+ tid);
+ return 0;
+ }
+
+ /* Populate the dpaa2_io_portal structure */
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev =
+ dpaa2_get_qbman_swp(lcore_id);
+
+ if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) {
+ RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev
+ = dpaa2_io_portal[lcore_id].ethrx_dpio_dev;
+ dpaa2_io_portal[lcore_id].sec_tid = tid;
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+static int
+dpaa2_create_dpio_device(int vdev_fd,
+ struct vfio_device_info *obj_info,
+ int object_id)
+{
+ struct dpaa2_dpio_dev *dpio_dev;
+ struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
+
+ if (obj_info->num_regions < NUM_DPIO_REGIONS) {
+ DPAA2_BUS_ERR("Not sufficient number of DPIO regions");
+ return -1;
+ }
+
+ dpio_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpio_dev),
+ RTE_CACHE_LINE_SIZE);
+ if (!dpio_dev) {
+ DPAA2_BUS_ERR("Memory allocation failed for DPIO Device");
+ return -1;
+ }
+
+ dpio_dev->dpio = NULL;
+ dpio_dev->hw_id = object_id;
+ rte_atomic16_init(&dpio_dev->ref_count);
+ /* Using single portal for all devices */
+ dpio_dev->mc_portal = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+
+ reg_info.index = 0;
+ if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
+ DPAA2_BUS_ERR("vfio: error getting region info");
+ rte_free(dpio_dev);
+ return -1;
+ }
+
+ dpio_dev->ce_size = reg_info.size;
+ dpio_dev->qbman_portal_ce_paddr = (size_t)mmap(NULL, reg_info.size,
+ PROT_WRITE | PROT_READ, MAP_SHARED,
+ vdev_fd, reg_info.offset);
+
+ reg_info.index = 1;
+ if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
+ DPAA2_BUS_ERR("vfio: error getting region info");
+ rte_free(dpio_dev);
+ return -1;
+ }
+
+ dpio_dev->ci_size = reg_info.size;
+ dpio_dev->qbman_portal_ci_paddr = (size_t)mmap(NULL, reg_info.size,
+ PROT_WRITE | PROT_READ, MAP_SHARED,
+ vdev_fd, reg_info.offset);
+
+ if (configure_dpio_qbman_swp(dpio_dev)) {
+ DPAA2_BUS_ERR(
+ "Fail to configure the dpio qbman portal for %d",
+ dpio_dev->hw_id);
+ rte_free(dpio_dev);
+ return -1;
+ }
+
+ io_space_count++;
+ dpio_dev->index = io_space_count;
+
+ if (rte_dpaa2_vfio_setup_intr(&dpio_dev->intr_handle, vdev_fd, 1)) {
+ DPAA2_BUS_ERR("Fail to setup interrupt for %d",
+ dpio_dev->hw_id);
+ rte_free(dpio_dev);
+ }
+
+ /* find the SoC type for the first time */
+ if (!dpaa2_svr_family) {
+ struct mc_soc_version mc_plat_info = {0};
+
+ if (mc_get_soc_version(dpio_dev->dpio,
+ CMD_PRI_LOW, &mc_plat_info)) {
+ DPAA2_BUS_ERR("Unable to get SoC version information");
+ } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LS1080A) {
+ dpaa2_core_cluster_base = 0x02;
+ dpaa2_cluster_sz = 4;
+ DPAA2_BUS_DEBUG("LS108x (A53) Platform Detected");
+ } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LX2160A) {
+ dpaa2_core_cluster_base = 0x00;
+ dpaa2_cluster_sz = 2;
+ DPAA2_BUS_DEBUG("LX2160 Platform Detected");
+ }
+ dpaa2_svr_family = (mc_plat_info.svr & 0xffff0000);
+ }
+
+ TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next);
+
+ return 0;
+}
+
+void
+dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage)
+{
+ int i = 0;
+
+ for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
+ if (q_storage->dq_storage[i])
+ rte_free(q_storage->dq_storage[i]);
+ }
+}
+
+int
+dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
+{
+ int i = 0;
+
+ for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
+ q_storage->dq_storage[i] = rte_malloc(NULL,
+ DPAA2_DQRR_RING_SIZE * sizeof(struct qbman_result),
+ RTE_CACHE_LINE_SIZE);
+ if (!q_storage->dq_storage[i])
+ goto fail;
+ }
+ return 0;
+fail:
+ while (--i >= 0)
+ rte_free(q_storage->dq_storage[i]);
+
+ return -1;
+}
+
+static struct rte_dpaa2_object rte_dpaa2_dpio_obj = {
+ .dev_type = DPAA2_IO,
+ .create = dpaa2_create_dpio_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpio, rte_dpaa2_dpio_obj);
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
new file mode 100644
index 00000000..d593eea7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _DPAA2_HW_DPIO_H_
+#define _DPAA2_HW_DPIO_H_
+
+#include <mc/fsl_dpio.h>
+#include <mc/fsl_mc_sys.h>
+
+struct dpaa2_io_portal_t {
+ struct dpaa2_dpio_dev *dpio_dev;
+ struct dpaa2_dpio_dev *ethrx_dpio_dev;
+ uint64_t net_tid;
+ uint64_t sec_tid;
+ void *eventdev;
+};
+
+/*! Global per thread DPIO portal */
+RTE_DECLARE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
+
+#define DPAA2_PER_LCORE_DPIO RTE_PER_LCORE(_dpaa2_io).dpio_dev
+#define DPAA2_PER_LCORE_PORTAL DPAA2_PER_LCORE_DPIO->sw_portal
+
+#define DPAA2_PER_LCORE_ETHRX_DPIO RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev
+#define DPAA2_PER_LCORE_ETHRX_PORTAL DPAA2_PER_LCORE_ETHRX_DPIO->sw_portal
+
+/* Variable to store DPAA2 platform type */
+extern uint32_t dpaa2_svr_family;
+
+extern struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
+
+struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id);
+
+/* Affine a DPIO portal to current processing thread */
+int dpaa2_affine_qbman_swp(void);
+
+/* Affine additional DPIO portal to current crypto processing thread */
+int dpaa2_affine_qbman_ethrx_swp(void);
+
+/* allocate memory for FQ - dq storage */
+int
+dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage);
+
+/* free memory for FQ- dq storage */
+void
+dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage);
+
+#endif /* _DPAA2_HW_DPIO_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
new file mode 100644
index 00000000..82075936
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _DPAA2_HW_PVT_H_
+#define _DPAA2_HW_PVT_H_
+
+#include <rte_eventdev.h>
+
+#include <mc/fsl_mc_sys.h>
+#include <fsl_qbman_portal.h>
+
+#ifndef false
+#define false 0
+#endif
+#ifndef true
+#define true 1
+#endif
+#define lower_32_bits(x) ((uint32_t)(x))
+#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
+
+#define SVR_LS1080A 0x87030000
+#define SVR_LS2080A 0x87010000
+#define SVR_LS2088A 0x87090000
+#define SVR_LX2160A 0x87360000
+
+#ifndef VLAN_TAG_SIZE
+#define VLAN_TAG_SIZE 4 /** < Vlan Header Length */
+#endif
+
+#define MAX_TX_RING_SLOTS 8
+ /** <Maximum number of slots available in TX ring*/
+
+#define DPAA2_DQRR_RING_SIZE 16
+ /** <Maximum number of slots available in RX ring*/
+
+#define MC_PORTAL_INDEX 0
+#define NUM_DPIO_REGIONS 2
+#define NUM_DQS_PER_QUEUE 2
+
+/* Maximum release/acquire from QBMAN */
+#define DPAA2_MBUF_MAX_ACQ_REL 7
+
+#define DPAA2_MEMPOOL_OPS_NAME "dpaa2"
+
+#define MAX_BPID 256
+#define DPAA2_MBUF_HW_ANNOTATION 64
+#define DPAA2_FD_PTA_SIZE 0
+
+#if (DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM
+#error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM"
+#endif
+
+/* we will re-use the HEADROOM for annotation in RX */
+#define DPAA2_HW_BUF_RESERVE 0
+#define DPAA2_PACKET_LAYOUT_ALIGN 64 /*changing from 256 */
+
+#define DPAA2_DPCI_MAX_QUEUES 2
+
+struct dpaa2_dpio_dev {
+ TAILQ_ENTRY(dpaa2_dpio_dev) next;
+ /**< Pointer to Next device instance */
+ uint16_t index; /**< Index of a instance in the list */
+ rte_atomic16_t ref_count;
+ /**< How many thread contexts are sharing this.*/
+ struct fsl_mc_io *dpio; /** handle to DPIO portal object */
+ uint16_t token;
+ struct qbman_swp *sw_portal; /** SW portal object */
+ const struct qbman_result *dqrr[4];
+ /**< DQRR Entry for this SW portal */
+ void *mc_portal; /**< MC Portal for configuring this device */
+ uintptr_t qbman_portal_ce_paddr;
+ /**< Physical address of Cache Enabled Area */
+ uintptr_t ce_size; /**< Size of the CE region */
+ uintptr_t qbman_portal_ci_paddr;
+ /**< Physical address of Cache Inhibit Area */
+ uintptr_t ci_size; /**< Size of the CI region */
+ struct rte_intr_handle intr_handle; /* Interrupt related info */
+ int32_t epoll_fd; /**< File descriptor created for interrupt polling */
+ int32_t hw_id; /**< An unique ID of this DPIO device instance */
+};
+
+struct dpaa2_dpbp_dev {
+ TAILQ_ENTRY(dpaa2_dpbp_dev) next;
+ /**< Pointer to Next device instance */
+ struct fsl_mc_io dpbp; /** handle to DPBP portal object */
+ uint16_t token;
+ rte_atomic16_t in_use;
+ uint32_t dpbp_id; /*HW ID for DPBP object */
+};
+
+struct queue_storage_info_t {
+ struct qbman_result *dq_storage[NUM_DQS_PER_QUEUE];
+ struct qbman_result *active_dqs;
+ uint8_t active_dpio_id;
+ uint8_t toggle;
+ uint8_t last_num_pkts;
+};
+
+struct dpaa2_queue;
+
+typedef void (dpaa2_queue_cb_dqrr_t)(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev);
+
+struct dpaa2_queue {
+ struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
+ void *dev;
+ int32_t eventfd; /*!< Event Fd of this queue */
+ uint32_t fqid; /*!< Unique ID of this queue */
+ uint8_t tc_index; /*!< traffic class identifier */
+ uint16_t flow_id; /*!< To be used by DPAA2 frmework */
+ uint64_t rx_pkts;
+ uint64_t tx_pkts;
+ uint64_t err_pkts;
+ union {
+ struct queue_storage_info_t *q_storage;
+ struct qbman_result *cscn;
+ };
+ struct rte_event ev;
+ dpaa2_queue_cb_dqrr_t *cb;
+};
+
+struct swp_active_dqs {
+ struct qbman_result *global_active_dqs;
+ uint64_t reserved[7];
+};
+
+#define NUM_MAX_SWP 64
+
+extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
+
+struct dpaa2_dpci_dev {
+ TAILQ_ENTRY(dpaa2_dpci_dev) next;
+ /**< Pointer to Next device instance */
+ struct fsl_mc_io dpci; /** handle to DPCI portal object */
+ uint16_t token;
+ rte_atomic16_t in_use;
+ uint32_t dpci_id; /*HW ID for DPCI object */
+ struct dpaa2_queue rx_queue[DPAA2_DPCI_MAX_QUEUES];
+ struct dpaa2_queue tx_queue[DPAA2_DPCI_MAX_QUEUES];
+};
+
+/*! Global MCP list */
+extern void *(*rte_mcp_ptr_list);
+
+/* Refer to Table 7-3 in SEC BG */
+struct qbman_fle {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t length;
+ /* FMT must be 00, MSB is final bit */
+ uint32_t fin_bpid_offset;
+ uint32_t frc;
+ uint32_t reserved[3]; /* Not used currently */
+};
+
+struct qbman_sge {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t length;
+ uint32_t fin_bpid_offset;
+};
+
+/* There are three types of frames: Single, Scatter Gather and Frame Lists */
+enum qbman_fd_format {
+ qbman_fd_single = 0,
+ qbman_fd_list,
+ qbman_fd_sg
+};
+/*Macros to define operations on FD*/
+#define DPAA2_SET_FD_ADDR(fd, addr) do { \
+ (fd)->simple.addr_lo = lower_32_bits((size_t)(addr)); \
+ (fd)->simple.addr_hi = upper_32_bits((uint64_t)(addr)); \
+} while (0)
+#define DPAA2_SET_FD_LEN(fd, length) ((fd)->simple.len = length)
+#define DPAA2_SET_FD_BPID(fd, bpid) ((fd)->simple.bpid_offset |= bpid)
+#define DPAA2_SET_ONLY_FD_BPID(fd, bpid) \
+ ((fd)->simple.bpid_offset = bpid)
+#define DPAA2_SET_FD_IVP(fd) (((fd)->simple.bpid_offset |= 0x00004000))
+#define DPAA2_SET_FD_OFFSET(fd, offset) \
+ (((fd)->simple.bpid_offset |= (uint32_t)(offset) << 16))
+#define DPAA2_SET_FD_INTERNAL_JD(fd, len) \
+ ((fd)->simple.frc = (0x80000000 | (len)))
+#define DPAA2_GET_FD_FRC_PARSE_SUM(fd) \
+ ((uint16_t)(((fd)->simple.frc & 0xffff0000) >> 16))
+#define DPAA2_SET_FD_FRC(fd, _frc) ((fd)->simple.frc = _frc)
+#define DPAA2_RESET_FD_CTRL(fd) ((fd)->simple.ctrl = 0)
+
+#define DPAA2_SET_FD_ASAL(fd, asal) ((fd)->simple.ctrl |= (asal << 16))
+#define DPAA2_SET_FD_FLC(fd, addr) do { \
+ (fd)->simple.flc_lo = lower_32_bits((size_t)(addr)); \
+ (fd)->simple.flc_hi = upper_32_bits((uint64_t)(addr)); \
+} while (0)
+#define DPAA2_SET_FLE_INTERNAL_JD(fle, len) ((fle)->frc = (0x80000000 | (len)))
+#define DPAA2_GET_FLE_ADDR(fle) \
+ (size_t)((((uint64_t)((fle)->addr_hi)) << 32) + (fle)->addr_lo)
+#define DPAA2_SET_FLE_ADDR(fle, addr) do { \
+ (fle)->addr_lo = lower_32_bits((size_t)addr); \
+ (fle)->addr_hi = upper_32_bits((uint64_t)addr); \
+} while (0)
+#define DPAA2_GET_FLE_CTXT(fle) \
+ ((((uint64_t)((fle)->reserved[1])) << 32) + (fle)->reserved[0])
+#define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \
+ (fle)->reserved[0] = lower_32_bits((size_t)addr); \
+ (fle)->reserved[1] = upper_32_bits((uint64_t)addr); \
+} while (0)
+#define DPAA2_SET_FLE_OFFSET(fle, offset) \
+ ((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
+#define DPAA2_SET_FLE_LEN(fle, len) ((fle)->length = len)
+#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (size_t)bpid)
+#define DPAA2_GET_FLE_BPID(fle) ((fle)->fin_bpid_offset & 0x000000ff)
+#define DPAA2_SET_FLE_FIN(fle) ((fle)->fin_bpid_offset |= 1 << 31)
+#define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000))
+#define DPAA2_SET_FLE_BMT(fle) (((fle)->fin_bpid_offset |= 0x00008000))
+#define DPAA2_SET_FD_COMPOUND_FMT(fd) \
+ ((fd)->simple.bpid_offset |= (uint32_t)1 << 28)
+#define DPAA2_GET_FD_ADDR(fd) \
+(((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo))
+
+#define DPAA2_GET_FD_LEN(fd) ((fd)->simple.len)
+#define DPAA2_GET_FD_BPID(fd) (((fd)->simple.bpid_offset & 0x00003FFF))
+#define DPAA2_GET_FD_IVP(fd) (((fd)->simple.bpid_offset & 0x00004000) >> 14)
+#define DPAA2_GET_FD_OFFSET(fd) (((fd)->simple.bpid_offset & 0x0FFF0000) >> 16)
+#define DPAA2_GET_FD_FRC(fd) ((fd)->simple.frc)
+#define DPAA2_GET_FD_FLC(fd) \
+ (((uint64_t)((fd)->simple.flc_hi) << 32) + (fd)->simple.flc_lo)
+#define DPAA2_GET_FD_ERR(fd) ((fd)->simple.bpid_offset & 0x000000FF)
+#define DPAA2_GET_FLE_OFFSET(fle) (((fle)->fin_bpid_offset & 0x0FFF0000) >> 16)
+#define DPAA2_SET_FLE_SG_EXT(fle) ((fle)->fin_bpid_offset |= (uint64_t)1 << 29)
+#define DPAA2_IS_SET_FLE_SG_EXT(fle) \
+ (((fle)->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0)
+
+#define DPAA2_INLINE_MBUF_FROM_BUF(buf, meta_data_size) \
+ ((struct rte_mbuf *)((size_t)(buf) - (meta_data_size)))
+
+#define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64)
+
+#define DPAA2_FD_SET_FORMAT(fd, format) do { \
+ (fd)->simple.bpid_offset &= 0xCFFFFFFF; \
+ (fd)->simple.bpid_offset |= (uint32_t)format << 28; \
+} while (0)
+#define DPAA2_FD_GET_FORMAT(fd) (((fd)->simple.bpid_offset >> 28) & 0x3)
+
+#define DPAA2_SG_SET_FINAL(sg, fin) do { \
+ (sg)->fin_bpid_offset &= 0x7FFFFFFF; \
+ (sg)->fin_bpid_offset |= (uint32_t)fin << 31; \
+} while (0)
+#define DPAA2_SG_IS_FINAL(sg) (!!((sg)->fin_bpid_offset >> 31))
+/* Only Enqueue Error responses will be
+ * pushed on FQID_ERR of Enqueue FQ
+ */
+#define DPAA2_EQ_RESP_ERR_FQ 0
+/* All Enqueue responses will be pushed on address
+ * set with qbman_eq_desc_set_response
+ */
+#define DPAA2_EQ_RESP_ALWAYS 1
+
+/* Various structures representing contiguous memory maps */
+struct dpaa2_memseg {
+ TAILQ_ENTRY(dpaa2_memseg) next;
+ char *vaddr;
+ rte_iova_t iova;
+ size_t len;
+};
+
+TAILQ_HEAD(dpaa2_memseg_list, dpaa2_memseg);
+extern struct dpaa2_memseg_list rte_dpaa2_memsegs;
+
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+extern uint8_t dpaa2_virt_mode;
+static void *dpaa2_mem_ptov(phys_addr_t paddr) __attribute__((unused));
+/* todo - this is costly, need to write a fast coversion routine */
+static void *dpaa2_mem_ptov(phys_addr_t paddr)
+{
+ struct dpaa2_memseg *ms;
+
+ if (dpaa2_virt_mode)
+ return (void *)(size_t)paddr;
+
+ /* Check if the address is already part of the memseg list internally
+ * maintained by the dpaa2 driver.
+ */
+ TAILQ_FOREACH(ms, &rte_dpaa2_memsegs, next) {
+ if (paddr >= ms->iova && paddr <
+ ms->iova + ms->len)
+ return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova));
+ }
+
+ /* If not, Fallback to full memseg list searching */
+ return rte_mem_iova2virt(paddr);
+}
+
+static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));
+static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
+{
+ const struct rte_memseg *memseg;
+
+ if (dpaa2_virt_mode)
+ return vaddr;
+
+ memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL);
+ if (memseg)
+ return memseg->phys_addr + RTE_PTR_DIFF(vaddr, memseg->addr);
+ return (size_t)NULL;
+}
+
+/**
+ * When we are using Physical addresses as IO Virtual Addresses,
+ * Need to call conversion routines dpaa2_mem_vtop & dpaa2_mem_ptov
+ * wherever required.
+ * These routines are called with help of below MACRO's
+ */
+
+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_iova)
+
+/**
+ * macro to convert Virtual address to IOVA
+ */
+#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((size_t)(_vaddr))
+
+/**
+ * macro to convert IOVA to Virtual address
+ */
+#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((size_t)(_iova))
+
+/**
+ * macro to convert modify the memory containing IOVA to Virtual address
+ */
+#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \
+ {_mem = (_type)(dpaa2_mem_ptov((size_t)(_mem))); }
+
+#else /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+
+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr)
+#define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr)
+#define DPAA2_IOVA_TO_VADDR(_iova) (_iova)
+#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type)
+
+#endif /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+
+static inline
+int check_swp_active_dqs(uint16_t dpio_index)
+{
+ if (rte_global_active_dqs_list[dpio_index].global_active_dqs != NULL)
+ return 1;
+ return 0;
+}
+
+static inline
+void clear_swp_active_dqs(uint16_t dpio_index)
+{
+ rte_global_active_dqs_list[dpio_index].global_active_dqs = NULL;
+}
+
+static inline
+struct qbman_result *get_swp_active_dqs(uint16_t dpio_index)
+{
+ return rte_global_active_dqs_list[dpio_index].global_active_dqs;
+}
+
+static inline
+void set_swp_active_dqs(uint16_t dpio_index, struct qbman_result *dqs)
+{
+ rte_global_active_dqs_list[dpio_index].global_active_dqs = dqs;
+}
+struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void);
+void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp);
+int dpaa2_dpbp_supported(void);
+
+struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void);
+void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev *dpci);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/compat.h b/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/compat.h
new file mode 100644
index 00000000..7be8f54c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/compat.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2008-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef HEADER_COMPAT_H
+#define HEADER_COMPAT_H
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <stdint.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <malloc.h>
+#include <unistd.h>
+#include <error.h>
+#include <linux/types.h>
+#include <rte_atomic.h>
+
+/* The following definitions are primarily to allow the single-source driver
+ * interfaces to be included by arbitrary program code. Ie. for interfaces that
+ * are also available in kernel-space, these definitions provide compatibility
+ * with certain attributes and types used in those interfaces.
+ */
+
+/* Required compiler attributes */
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+
+/* Required types */
+typedef uint64_t dma_addr_t;
+
+/* Debugging */
+#define prflush(fmt, args...) \
+ do { \
+ printf(fmt, ##args); \
+ fflush(stdout); \
+ } while (0)
+#define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args)
+#define pr_err(fmt, args...) prflush("ERR:" fmt, ##args)
+#define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args)
+#define pr_info(fmt, args...) prflush(fmt, ##args)
+
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_BUS
+
+/* Trace the 3 different classes of read/write access to QBMan. #undef as
+ * required.
+ */
+#define QBMAN_CCSR_TRACE
+#define QBMAN_CINH_TRACE
+#define QBMAN_CENA_TRACE
+
+#define QBMAN_CHECKING
+
+#ifdef pr_debug
+#undef pr_debug
+#endif
+#define pr_debug(fmt, args...) printf(fmt, ##args)
+#define QBMAN_BUG_ON(c) \
+do { \
+ static int warned_##__LINE__; \
+ if ((c) && !warned_##__LINE__) { \
+ pr_warn("(%s:%d)\n", __FILE__, __LINE__); \
+ warned_##__LINE__ = 1; \
+ } \
+} while (0)
+#else
+#define QBMAN_BUG_ON(c) {}
+#define pr_debug(fmt, args...) {}
+#endif
+
+/* Other miscellaneous interfaces our APIs depend on; */
+
+#define lower_32_bits(x) ((uint32_t)(x))
+#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
+
+
+#define __iomem
+
+#define __raw_readb(p) (*(const volatile unsigned char *)(p))
+#define __raw_readl(p) (*(const volatile unsigned int *)(p))
+#define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
+
+#define atomic_t rte_atomic32_t
+#define atomic_read(v) rte_atomic32_read(v)
+#define atomic_set(v, i) rte_atomic32_set(v, i)
+
+#define atomic_inc(v) rte_atomic32_add(v, 1)
+#define atomic_dec(v) rte_atomic32_sub(v, 1)
+
+#define atomic_inc_and_test(v) rte_atomic32_inc_and_test(v)
+#define atomic_dec_and_test(v) rte_atomic32_dec_and_test(v)
+
+#define atomic_inc_return(v) rte_atomic32_add_return(v, 1)
+#define atomic_dec_return(v) rte_atomic32_sub_return(v, 1)
+#define atomic_sub_and_test(i, v) (rte_atomic32_sub_return(v, i) == 0)
+
+#endif /* HEADER_COMPAT_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
new file mode 100644
index 00000000..bb60a98f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ *
+ */
+#ifndef _FSL_QBMAN_BASE_H
+#define _FSL_QBMAN_BASE_H
+
+/**
+ * DOC: QBMan basic structures
+ *
+ * The QBMan block descriptor, software portal descriptor and Frame descriptor
+ * are defined here.
+ *
+ */
+
+/**
+ * struct qbman_block_desc - qbman block descriptor structure
+ * @ccsr_reg_bar: CCSR register map.
+ * @irq_rerr: Recoverable error interrupt line.
+ * @irq_nrerr: Non-recoverable error interrupt line
+ *
+ * Descriptor for a QBMan instance on the SoC. On partitions/targets that do not
+ * control this QBMan instance, these values may simply be place-holders. The
+ * idea is simply that we be able to distinguish between them, eg. so that SWP
+ * descriptors can identify which QBMan instance they belong to.
+ */
+struct qbman_block_desc {
+ void *ccsr_reg_bar;
+ int irq_rerr;
+ int irq_nrerr;
+};
+
+enum qbman_eqcr_mode {
+ qman_eqcr_vb_ring = 2, /* Valid bit, with eqcr in ring mode */
+ qman_eqcr_vb_array, /* Valid bit, with eqcr in array mode */
+};
+
+/**
+ * struct qbman_swp_desc - qbman software portal descriptor structure
+ * @block: The QBMan instance.
+ * @cena_bar: Cache-enabled portal register map.
+ * @cinh_bar: Cache-inhibited portal register map.
+ * @irq: -1 if unused (or unassigned)
+ * @idx: SWPs within a QBMan are indexed. -1 if opaque to the user.
+ * @qman_version: the qman version.
+ * @eqcr_mode: Select the eqcr mode, currently only valid bit ring mode and
+ * valid bit array mode are supported.
+ *
+ * Descriptor for a QBMan software portal, expressed in terms that make sense to
+ * the user context. Ie. on MC, this information is likely to be true-physical,
+ * and instantiated statically at compile-time. On GPP, this information is
+ * likely to be obtained via "discovery" over a partition's "MC bus"
+ * (ie. in response to a MC portal command), and would take into account any
+ * virtualisation of the GPP user's address space and/or interrupt numbering.
+ */
+struct qbman_swp_desc {
+ const struct qbman_block_desc *block;
+ uint8_t *cena_bar;
+ uint8_t *cinh_bar;
+ int irq;
+ int idx;
+ uint32_t qman_version;
+ enum qbman_eqcr_mode eqcr_mode;
+};
+
+/* Driver object for managing a QBMan portal */
+struct qbman_swp;
+
+/**
+ * struct qbman_fd - basci structure for qbman frame descriptor
+ * @words: for easier/faster copying the whole FD structure.
+ * @addr_lo: the lower 32 bits of the address in FD.
+ * @addr_hi: the upper 32 bits of the address in FD.
+ * @len: the length field in FD.
+ * @bpid_offset: represent the bpid and offset fields in FD. offset in
+ * the MS 16 bits, BPID in the LS 16 bits.
+ * @frc: frame context
+ * @ctrl: the 32bit control bits including dd, sc,... va, err.
+ * @flc_lo: the lower 32bit of flow context.
+ * @flc_hi: the upper 32bits of flow context.
+ *
+ * Place-holder for FDs, we represent it via the simplest form that we need for
+ * now. Different overlays may be needed to support different options, etc. (It
+ * is impractical to define One True Struct, because the resulting encoding
+ * routines (lots of read-modify-writes) would be worst-case performance whether
+ * or not circumstances required them.)
+ *
+ * Note, as with all data-structures exchanged between software and hardware (be
+ * they located in the portal register map or DMA'd to and from main-memory),
+ * the driver ensures that the caller of the driver API sees the data-structures
+ * in host-endianness. "struct qbman_fd" is no exception. The 32-bit words
+ * contained within this structure are represented in host-endianness, even if
+ * hardware always treats them as little-endian. As such, if any of these fields
+ * are interpreted in a binary (rather than numerical) fashion by hardware
+ * blocks (eg. accelerators), then the user should be careful. We illustrate
+ * with an example;
+ *
+ * Suppose the desired behaviour of an accelerator is controlled by the "frc"
+ * field of the FDs that are sent to it. Suppose also that the behaviour desired
+ * by the user corresponds to an "frc" value which is expressed as the literal
+ * sequence of bytes 0xfe, 0xed, 0xab, and 0xba. So "frc" should be the 32-bit
+ * value in which 0xfe is the first byte and 0xba is the last byte, and as
+ * hardware is little-endian, this amounts to a 32-bit "value" of 0xbaabedfe. If
+ * the software is little-endian also, this can simply be achieved by setting
+ * frc=0xbaabedfe. On the other hand, if software is big-endian, it should set
+ * frc=0xfeedabba! The best away of avoiding trouble with this sort of thing is
+ * to treat the 32-bit words as numerical values, in which the offset of a field
+ * from the beginning of the first byte (as required or generated by hardware)
+ * is numerically encoded by a left-shift (ie. by raising the field to a
+ * corresponding power of 2). Ie. in the current example, software could set
+ * "frc" in the following way, and it would work correctly on both little-endian
+ * and big-endian operation;
+ * fd.frc = (0xfe << 0) | (0xed << 8) | (0xab << 16) | (0xba << 24);
+ */
+struct qbman_fd {
+ union {
+ uint32_t words[8];
+ struct qbman_fd_simple {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t len;
+ uint32_t bpid_offset;
+ uint32_t frc;
+ uint32_t ctrl;
+ uint32_t flc_lo;
+ uint32_t flc_hi;
+ } simple;
+ };
+};
+
+#endif /* !_FSL_QBMAN_BASE_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h b/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h
new file mode 100644
index 00000000..072ad551
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h
@@ -0,0 +1,30 @@
+/* Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+struct qbman_swp;
+
+struct qbman_fq_query_np_rslt {
+uint8_t verb;
+ uint8_t rslt;
+ uint8_t st1;
+ uint8_t st2;
+ uint8_t reserved[2];
+ uint16_t od1_sfdr;
+ uint16_t od2_sfdr;
+ uint16_t od3_sfdr;
+ uint16_t ra1_sfdr;
+ uint16_t ra2_sfdr;
+ uint32_t pfdr_hptr;
+ uint32_t pfdr_tptr;
+ uint32_t frm_cnt;
+ uint32_t byte_cnt;
+ uint16_t ics_surp;
+ uint8_t is;
+ uint8_t reserved2[29];
+};
+
+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid,
+ struct qbman_fq_query_np_rslt *r);
+uint32_t qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
+uint32_t qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
new file mode 100644
index 00000000..3e63db3a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
@@ -0,0 +1,1186 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ *
+ */
+#ifndef _FSL_QBMAN_PORTAL_H
+#define _FSL_QBMAN_PORTAL_H
+
+#include <fsl_qbman_base.h>
+
+/**
+ * DOC - QBMan portal APIs to implement the following functions:
+ * - Initialize and destroy Software portal object.
+ * - Read and write Software portal interrupt registers.
+ * - Enqueue, including setting the enqueue descriptor, and issuing enqueue
+ * command etc.
+ * - Dequeue, including setting the dequeue descriptor, issuing dequeue command,
+ * parsing the dequeue response in DQRR and memeory, parsing the state change
+ * notifications etc.
+ * - Release, including setting the release descriptor, and issuing the buffer
+ * release command.
+ * - Acquire, acquire the buffer from the given buffer pool.
+ * - FQ management.
+ * - Channel management, enable/disable CDAN with or without context.
+ */
+
+/**
+ * qbman_swp_init() - Create a functional object representing the given
+ * QBMan portal descriptor.
+ * @d: the given qbman swp descriptor
+ *
+ * Return qbman_swp portal object for success, NULL if the object cannot
+ * be created.
+ */
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
+
+/**
+ * qbman_swp_finish() - Create and destroy a functional object representing
+ * the given QBMan portal descriptor.
+ * @p: the qbman_swp object to be destroyed.
+ *
+ */
+void qbman_swp_finish(struct qbman_swp *p);
+
+/**
+ * qbman_swp_get_desc() - Get the descriptor of the given portal object.
+ * @p: the given portal object.
+ *
+ * Return the descriptor for this portal.
+ */
+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p);
+
+ /**************/
+ /* Interrupts */
+ /**************/
+
+/* EQCR ring interrupt */
+#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001)
+/* Enqueue command dispatched interrupt */
+#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002)
+/* DQRR non-empty interrupt */
+#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004)
+/* RCR ring interrupt */
+#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008)
+/* Release command dispatched interrupt */
+#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010)
+/* Volatile dequeue command interrupt */
+#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020)
+
+/**
+ * qbman_swp_interrupt_get_vanish() - Get the data in software portal
+ * interrupt status disable register.
+ * @p: the given software portal object.
+ *
+ * Return the settings in SWP_ISDR register.
+ */
+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p);
+
+/**
+ * qbman_swp_interrupt_set_vanish() - Set the data in software portal
+ * interrupt status disable register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_IDSR register.
+ */
+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask);
+
+/**
+ * qbman_swp_interrupt_read_status() - Get the data in software portal
+ * interrupt status register.
+ * @p: the given software portal object.
+ *
+ * Return the settings in SWP_ISR register.
+ */
+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
+
+/**
+ * qbman_swp_interrupt_clear_status() - Set the data in software portal
+ * interrupt status register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_ISR register.
+ */
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask);
+
+/**
+ * qbman_swp_dqrr_thrshld_read_status() - Get the data in software portal
+ * DQRR interrupt threshold register.
+ * @p: the given software portal object.
+ */
+uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p);
+
+/**
+ * qbman_swp_dqrr_thrshld_write() - Set the data in software portal
+ * DQRR interrupt threshold register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_DQRR_ITR register.
+ */
+void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask);
+
+/**
+ * qbman_swp_intr_timeout_read_status() - Get the data in software portal
+ * Interrupt Time-Out period register.
+ * @p: the given software portal object.
+ */
+uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p);
+
+/**
+ * qbman_swp_intr_timeout_write() - Set the data in software portal
+ * Interrupt Time-Out period register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_ITPR register.
+ */
+void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask);
+
+/**
+ * qbman_swp_interrupt_get_trigger() - Get the data in software portal
+ * interrupt enable register.
+ * @p: the given software portal object.
+ *
+ * Return the settings in SWP_IER register.
+ */
+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
+
+/**
+ * qbman_swp_interrupt_set_trigger() - Set the data in software portal
+ * interrupt enable register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_IER register.
+ */
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask);
+
+/**
+ * qbman_swp_interrupt_get_inhibit() - Get the data in software portal
+ * interrupt inhibit register.
+ * @p: the given software portal object.
+ *
+ * Return the settings in SWP_IIR register.
+ */
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
+
+/**
+ * qbman_swp_interrupt_set_inhibit() - Set the data in software portal
+ * interrupt inhibit register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_IIR register.
+ */
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
+
+ /************/
+ /* Dequeues */
+ /************/
+
+/**
+ * struct qbman_result - structure for qbman dequeue response and/or
+ * notification.
+ * @donot_manipulate_directly: the 16 32bit data to represent the whole
+ * possible qbman dequeue result.
+ */
+struct qbman_result {
+ union {
+ struct common {
+ uint8_t verb;
+ uint8_t reserved[63];
+ } common;
+ struct dq {
+ uint8_t verb;
+ uint8_t stat;
+ __le16 seqnum;
+ __le16 oprid;
+ uint8_t reserved;
+ uint8_t tok;
+ __le32 fqid;
+ uint32_t reserved2;
+ __le32 fq_byte_cnt;
+ __le32 fq_frm_cnt;
+ __le64 fqd_ctx;
+ uint8_t fd[32];
+ } dq;
+ struct scn {
+ uint8_t verb;
+ uint8_t stat;
+ uint8_t state;
+ uint8_t reserved;
+ __le32 rid_tok;
+ __le64 ctx;
+ } scn;
+ };
+};
+
+/* TODO:
+ *A DQRI interrupt can be generated when there are dequeue results on the
+ * portal's DQRR (this mechanism does not deal with "pull" dequeues to
+ * user-supplied 'storage' addresses). There are two parameters to this
+ * interrupt source, one is a threshold and the other is a timeout. The
+ * interrupt will fire if either the fill-level of the ring exceeds 'thresh', or
+ * if the ring has been non-empty for been longer than 'timeout' nanoseconds.
+ * For timeout, an approximation to the desired nanosecond-granularity value is
+ * made, so there are get and set APIs to allow the user to see what actual
+ * timeout is set (compared to the timeout that was requested).
+ */
+int qbman_swp_dequeue_thresh(struct qbman_swp *s, unsigned int thresh);
+int qbman_swp_dequeue_set_timeout(struct qbman_swp *s, unsigned int timeout);
+int qbman_swp_dequeue_get_timeout(struct qbman_swp *s, unsigned int *timeout);
+
+/* ------------------- */
+/* Push-mode dequeuing */
+/* ------------------- */
+
+/* The user of a portal can enable and disable push-mode dequeuing of up to 16
+ * channels independently. It does not specify this toggling by channel IDs, but
+ * rather by specifying the index (from 0 to 15) that has been mapped to the
+ * desired channel.
+ */
+
+/**
+ * qbman_swp_push_get() - Get the push dequeue setup.
+ * @s: the software portal object.
+ * @channel_idx: the channel index to query.
+ * @enabled: returned boolean to show whether the push dequeue is enabled for
+ * the given channel.
+ */
+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled);
+
+/**
+ * qbman_swp_push_set() - Enable or disable push dequeue.
+ * @s: the software portal object.
+ * @channel_idx: the channel index..
+ * @enable: enable or disable push dequeue.
+ *
+ * The user of a portal can enable and disable push-mode dequeuing of up to 16
+ * channels independently. It does not specify this toggling by channel IDs, but
+ * rather by specifying the index (from 0 to 15) that has been mapped to the
+ * desired channel.
+ */
+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable);
+
+/* ------------------- */
+/* Pull-mode dequeuing */
+/* ------------------- */
+
+/**
+ * struct qbman_pull_desc - the structure for pull dequeue descriptor
+ */
+struct qbman_pull_desc {
+ union {
+ uint32_t donot_manipulate_directly[16];
+ struct pull {
+ uint8_t verb;
+ uint8_t numf;
+ uint8_t tok;
+ uint8_t reserved;
+ uint32_t dq_src;
+ uint64_t rsp_addr;
+ uint64_t rsp_addr_virt;
+ uint8_t padding[40];
+ } pull;
+ };
+};
+
+enum qbman_pull_type_e {
+ /* dequeue with priority precedence, respect intra-class scheduling */
+ qbman_pull_type_prio = 1,
+ /* dequeue with active FQ precedence, respect ICS */
+ qbman_pull_type_active,
+ /* dequeue with active FQ precedence, no ICS */
+ qbman_pull_type_active_noics
+};
+
+/**
+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ * @d: the pull dequeue descriptor to be cleared.
+ */
+void qbman_pull_desc_clear(struct qbman_pull_desc *d);
+
+/**
+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage
+ * @d: the pull dequeue descriptor to be set.
+ * @storage: the pointer of the memory to store the dequeue result.
+ * @storage_phys: the physical address of the storage memory.
+ * @stash: to indicate whether write allocate is enabled.
+ *
+ * If not called, or if called with 'storage' as NULL, the result pull dequeues
+ * will produce results to DQRR. If 'storage' is non-NULL, then results are
+ * produced to the given memory location (using the physical/DMA address which
+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
+ * those writes to main-memory express a cache-warming attribute.
+ */
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+ struct qbman_result *storage,
+ uint64_t storage_phys,
+ int stash);
+/**
+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued.
+ * @d: the pull dequeue descriptor to be set.
+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive.
+ */
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
+ uint8_t numframes);
+/**
+ * qbman_pull_desc_set_token() - Set dequeue token for pull command
+ * @d: the dequeue descriptor
+ * @token: the token to be set
+ *
+ * token is the value that shows up in the dequeue response that can be used to
+ * detect when the results have been published. The easiest technique is to zero
+ * result "storage" before issuing a dequeue, and use any non-zero 'token' value
+ */
+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token);
+
+/* Exactly one of the following descriptor "actions" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * - pull dequeue from the given frame queue (FQ)
+ * - pull dequeue from any FQ in the given work queue (WQ)
+ * - pull dequeue from any FQ in any WQ in the given channel
+ */
+/**
+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues.
+ * @fqid: the frame queue index of the given FQ.
+ */
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid);
+
+/**
+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues.
+ * @wqid: composed of channel id and wqid within the channel.
+ * @dct: the dequeue command type.
+ */
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
+ enum qbman_pull_type_e dct);
+
+/* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
+ * dequeues.
+ * @chid: the channel id to be dequeued.
+ * @dct: the dequeue command type.
+ */
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
+ enum qbman_pull_type_e dct);
+
+/**
+ * qbman_swp_pull() - Issue the pull dequeue command
+ * @s: the software portal object.
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls.
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d);
+
+/* -------------------------------- */
+/* Polling DQRR for dequeue results */
+/* -------------------------------- */
+
+/**
+ * qbman_swp_dqrr_next() - Get an valid DQRR entry.
+ * @s: the software portal object.
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *p);
+
+/**
+ * qbman_swp_prefetch_dqrr_next() - prefetch the next DQRR entry.
+ * @s: the software portal object.
+ */
+void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s);
+
+/**
+ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
+ * qbman_swp_dqrr_next().
+ * @s: the software portal object.
+ * @dq: the DQRR entry to be consumed.
+ */
+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq);
+
+/**
+ * qbman_swp_dqrr_idx_consume() - Given the DQRR index consume the DQRR entry
+ * @s: the software portal object.
+ * @dqrr_index: the DQRR index entry to be consumed.
+ */
+void qbman_swp_dqrr_idx_consume(struct qbman_swp *s, uint8_t dqrr_index);
+
+/**
+ * qbman_get_dqrr_idx() - Get dqrr index from the given dqrr
+ * @dqrr: the given dqrr object.
+ *
+ * Return dqrr index.
+ */
+uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr);
+
+/**
+ * qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the
+ * given portal
+ * @s: the given portal.
+ * @idx: the dqrr index.
+ *
+ * Return dqrr entry object.
+ */
+struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx);
+
+/* ------------------------------------------------- */
+/* Polling user-provided storage for dequeue results */
+/* ------------------------------------------------- */
+
+/**
+ * qbman_result_has_new_result() - Check and get the dequeue response from the
+ * dq storage memory set in pull dequeue command
+ * @s: the software portal object.
+ * @dq: the dequeue result read from the memory.
+ *
+ * Only used for user-provided storage of dequeue results, not DQRR. For
+ * efficiency purposes, the driver will perform any required endianness
+ * conversion to ensure that the user's dequeue result storage is in host-endian
+ * format (whether or not that is the same as the little-endian format that
+ * hardware DMA'd to the user's storage). As such, once the user has called
+ * qbman_result_has_new_result() and been returned a valid dequeue result,
+ * they should not call it again on the same memory location (except of course
+ * if another dequeue command has been executed to produce a new result to that
+ * location).
+ *
+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
+ * dequeue result.
+ */
+int qbman_result_has_new_result(struct qbman_swp *s,
+ struct qbman_result *dq);
+
+/**
+ * qbman_check_command_complete() - Check if the previous issued dq commnd
+ * is completed and results are available in memory.
+ * @s: the software portal object.
+ * @dq: the dequeue result read from the memory.
+ *
+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
+ * dequeue result.
+ */
+int qbman_check_command_complete(struct qbman_result *dq);
+
+int qbman_check_new_result(struct qbman_result *dq);
+
+/* -------------------------------------------------------- */
+/* Parsing dequeue entries (DQRR and user-provided storage) */
+/* -------------------------------------------------------- */
+
+/**
+ * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not
+ * @dq: the dequeue result to be checked.
+ *
+ * DQRR entries may contain non-dequeue results, ie. notifications
+ */
+int qbman_result_is_DQ(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_SCN() - Check the dequeue result is notification or not
+ * @dq: the dequeue result to be checked.
+ *
+ * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change
+ * notifications" of one type or another. Some APIs apply to all of them, of the
+ * form qbman_result_SCN_***().
+ */
+static inline int qbman_result_is_SCN(const struct qbman_result *dq)
+{
+ return !qbman_result_is_DQ(dq);
+}
+
+/* Recognise different notification types, only required if the user allows for
+ * these to occur, and cares about them when they do.
+ */
+
+/**
+ * qbman_result_is_FQDAN() - Check for FQ Data Availability
+ * @dq: the qbman_result object.
+ *
+ * Return 1 if this is FQDAN.
+ */
+int qbman_result_is_FQDAN(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_CDAN() - Check for Channel Data Availability
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is CDAN.
+ */
+int qbman_result_is_CDAN(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_CSCN() - Check for Congestion State Change
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is CSCN.
+ */
+int qbman_result_is_CSCN(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_BPSCN() - Check for Buffer Pool State Change.
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is BPSCN.
+ */
+int qbman_result_is_BPSCN(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_CGCU() - Check for Congestion Group Count Update.
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is CGCU.
+ */
+int qbman_result_is_CGCU(const struct qbman_result *dq);
+
+/* Frame queue state change notifications; (FQDAN in theory counts too as it
+ * leaves a FQ parked, but it is primarily a data availability notification)
+ */
+
+/**
+ * qbman_result_is_FQRN() - Check for FQ Retirement Notification.
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is FQRN.
+ */
+int qbman_result_is_FQRN(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_FQRNI() - Check for FQ Retirement Immediate
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is FQRNI.
+ */
+int qbman_result_is_FQRNI(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_FQPN() - Check for FQ Park Notification
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is FQPN.
+ */
+int qbman_result_is_FQPN(const struct qbman_result *dq);
+
+/* Parsing frame dequeue results (qbman_result_is_DQ() must be TRUE)
+ */
+/* FQ empty */
+#define QBMAN_DQ_STAT_FQEMPTY 0x80
+/* FQ held active */
+#define QBMAN_DQ_STAT_HELDACTIVE 0x40
+/* FQ force eligible */
+#define QBMAN_DQ_STAT_FORCEELIGIBLE 0x20
+/* Valid frame */
+#define QBMAN_DQ_STAT_VALIDFRAME 0x10
+/* FQ ODP enable */
+#define QBMAN_DQ_STAT_ODPVALID 0x04
+/* Volatile dequeue */
+#define QBMAN_DQ_STAT_VOLATILE 0x02
+/* volatile dequeue command is expired */
+#define QBMAN_DQ_STAT_EXPIRED 0x01
+
+#define QBMAN_EQCR_DCA_IDXMASK 0x0f
+#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
+
+/**
+ * qbman_result_DQ_flags() - Get the STAT field of dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return the state field.
+ */
+uint8_t qbman_result_DQ_flags(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_is_pull() - Check whether the dq response is from a pull
+ * command.
+ * @dq: the dequeue result.
+ *
+ * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
+ */
+static inline int qbman_result_DQ_is_pull(const struct qbman_result *dq)
+{
+ return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_VOLATILE);
+}
+
+/**
+ * qbman_result_DQ_is_pull_complete() - Check whether the pull command is
+ * completed.
+ * @dq: the dequeue result.
+ *
+ * Return boolean.
+ */
+static inline int qbman_result_DQ_is_pull_complete(
+ const struct qbman_result *dq)
+{
+ return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_EXPIRED);
+}
+
+/**
+ * qbman_result_DQ_seqnum() - Get the seqnum field in dequeue response
+ * seqnum is valid only if VALIDFRAME flag is TRUE
+ * @dq: the dequeue result.
+ *
+ * Return seqnum.
+ */
+uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_odpid() - Get the seqnum field in dequeue response
+ * odpid is valid only if ODPVAILD flag is TRUE.
+ * @dq: the dequeue result.
+ *
+ * Return odpid.
+ */
+uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_fqid() - Get the fqid in dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return fqid.
+ */
+uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_byte_count() - Get the byte count in dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return the byte count remaining in the FQ.
+ */
+uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_frame_count - Get the frame count in dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return the frame count remaining in the FQ.
+ */
+uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_fqd_ctx() - Get the frame queue context in dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return the frame queue context.
+ */
+uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_fd() - Get the frame descriptor in dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return the frame descriptor.
+ */
+const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq);
+
+/* State-change notifications (FQDAN/CDAN/CSCN/...). */
+
+/**
+ * qbman_result_SCN_state() - Get the state field in State-change notification
+ * @scn: the state change notification.
+ *
+ * Return the state in the notifiation.
+ */
+uint8_t qbman_result_SCN_state(const struct qbman_result *scn);
+
+/**
+ * qbman_result_SCN_rid() - Get the resource id from the notification
+ * @scn: the state change notification.
+ *
+ * Return the resource id.
+ */
+uint32_t qbman_result_SCN_rid(const struct qbman_result *scn);
+
+/**
+ * qbman_result_SCN_ctx() - get the context from the notification
+ * @scn: the state change notification.
+ *
+ * Return the context.
+ */
+uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn);
+
+/* Type-specific "resource IDs". Mainly for illustration purposes, though it
+ * also gives the appropriate type widths.
+ */
+/* Get the FQID from the FQDAN */
+#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq)
+/* Get the FQID from the FQRN */
+#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq)
+/* Get the FQID from the FQRNI */
+#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq)
+/* Get the FQID from the FQPN */
+#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq)
+/* Get the channel ID from the CDAN */
+#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
+/* Get the CGID from the CSCN */
+#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
+
+/**
+ * qbman_result_bpscn_bpid() - Get the bpid from BPSCN
+ * @scn: the state change notification.
+ *
+ * Return the buffer pool id.
+ */
+uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn);
+
+/**
+ * qbman_result_bpscn_has_free_bufs() - Check whether there are free
+ * buffers in the pool from BPSCN.
+ * @scn: the state change notification.
+ *
+ * Return the number of free buffers.
+ */
+int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn);
+
+/**
+ * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the
+ * buffer pool is depleted.
+ * @scn: the state change notification.
+ *
+ * Return the status of buffer pool depletion.
+ */
+int qbman_result_bpscn_is_depleted(const struct qbman_result *scn);
+
+/**
+ * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer
+ * pool is surplus or not.
+ * @scn: the state change notification.
+ *
+ * Return the status of buffer pool surplus.
+ */
+int qbman_result_bpscn_is_surplus(const struct qbman_result *scn);
+
+/**
+ * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message
+ * @scn: the state change notification.
+ *
+ * Return the BPSCN context.
+ */
+uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn);
+
+/* Parsing CGCU */
+/**
+ * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid
+ * @scn: the state change notification.
+ *
+ * Return the CGCU resource id.
+ */
+uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn);
+
+/**
+ * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU
+ * @scn: the state change notification.
+ *
+ * Return instantaneous count in the CGCU notification.
+ */
+uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
+
+ /************/
+ /* Enqueues */
+ /************/
+
+/* struct qbman_eq_desc - structure of enqueue descriptor */
+struct qbman_eq_desc {
+ union {
+ uint32_t donot_manipulate_directly[8];
+ struct eq {
+ uint8_t verb;
+ uint8_t dca;
+ uint16_t seqnum;
+ uint16_t orpid;
+ uint16_t reserved1;
+ uint32_t tgtid;
+ uint32_t tag;
+ uint16_t qdbin;
+ uint8_t qpri;
+ uint8_t reserved[3];
+ uint8_t wae;
+ uint8_t rspid;
+ uint64_t rsp_addr;
+ } eq;
+ };
+};
+
+/**
+ * struct qbman_eq_response - structure of enqueue response
+ * @donot_manipulate_directly: the 16 32bit data to represent the whole
+ * enqueue response.
+ */
+struct qbman_eq_response {
+ uint32_t donot_manipulate_directly[16];
+};
+
+/**
+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ * @d: the given enqueue descriptor.
+ */
+void qbman_eq_desc_clear(struct qbman_eq_desc *d);
+
+/* Exactly one of the following descriptor "actions" should be set. (Calling
+ * any one of these will replace the effect of any prior call to one of these.)
+ * - enqueue without order-restoration
+ * - enqueue with order-restoration
+ * - fill a hole in the order-restoration sequence, without any enqueue
+ * - advance NESN (Next Expected Sequence Number), without any enqueue
+ * 'respond_success' indicates whether an enqueue response should be DMA'd
+ * after success (otherwise a response is DMA'd only after failure).
+ * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to
+ * be enqueued.
+ */
+
+/**
+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
+ * @d: the enqueue descriptor.
+ * @response_success: 1 = enqueue with response always; 0 = enqueue with
+ * rejections returned on a FQ.
+ */
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
+/**
+ * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor
+ * @d: the enqueue descriptor.
+ * @response_success: 1 = enqueue with response always; 0 = enqueue with
+ * rejections returned on a FQ.
+ * @opr_id: the order point record id.
+ * @seqnum: the order restoration sequence number.
+ * @incomplete: indiates whether this is the last fragments using the same
+ * sequeue number.
+ */
+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
+ uint16_t opr_id, uint16_t seqnum, int incomplete);
+
+/**
+ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
+ * without any enqueue
+ * @d: the enqueue descriptor.
+ * @opr_id: the order point record id.
+ * @seqnum: the order restoration sequence number.
+ */
+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
+ uint16_t seqnum);
+
+/**
+ * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number)
+ * without any enqueue
+ * @d: the enqueue descriptor.
+ * @opr_id: the order point record id.
+ * @seqnum: the order restoration sequence number.
+ */
+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
+ uint16_t seqnum);
+/**
+ * qbman_eq_desc_set_response() - Set the enqueue response info.
+ * @d: the enqueue descriptor
+ * @storage_phys: the physical address of the enqueue response in memory.
+ * @stash: indicate that the write allocation enabled or not.
+ *
+ * In the case where an enqueue response is DMA'd, this determines where that
+ * response should go. (The physical/DMA address is given for hardware's
+ * benefit, but software should interpret it as a "struct qbman_eq_response"
+ * data structure.) 'stash' controls whether or not the write to main-memory
+ * expresses a cache-warming attribute.
+ */
+void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
+ uint64_t storage_phys,
+ int stash);
+
+/**
+ * qbman_eq_desc_set_token() - Set token for the enqueue command
+ * @d: the enqueue descriptor
+ * @token: the token to be set.
+ *
+ * token is the value that shows up in an enqueue response that can be used to
+ * detect when the results have been published. The easiest technique is to zero
+ * result "storage" before issuing an enqueue, and use any non-zero 'token'
+ * value.
+ */
+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token);
+
+/**
+ * Exactly one of the following descriptor "targets" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * - enqueue to a frame queue
+ * - enqueue to a queuing destination
+ * Note, that none of these will have any affect if the "action" type has been
+ * set to "orp_hole" or "orp_nesn".
+ */
+/**
+ * qbman_eq_desc_set_fq() - Set Frame Queue id for the enqueue command
+ * @d: the enqueue descriptor
+ * @fqid: the id of the frame queue to be enqueued.
+ */
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid);
+
+/**
+ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command.
+ * @d: the enqueue descriptor
+ * @qdid: the id of the queuing destination to be enqueued.
+ * @qd_bin: the queuing destination bin
+ * @qd_prio: the queuing destination priority.
+ */
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
+ uint16_t qd_bin, uint8_t qd_prio);
+
+/**
+ * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt
+ * @d: the enqueue descriptor
+ * @enable: boolean to enable/disable EQDI
+ *
+ * Determines whether or not the portal's EQDI interrupt source should be
+ * asserted after the enqueue command is completed.
+ */
+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);
+
+/**
+ * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command.
+ * @d: the enqueue descriptor.
+ * @enable: enabled/disable DCA mode.
+ * @dqrr_idx: DCAP_CI, the DCAP consumer index.
+ * @park: determine the whether park the FQ or not
+ *
+ * Determines whether or not a portal DQRR entry should be consumed once the
+ * enqueue command is completed. (And if so, and the DQRR entry corresponds to a
+ * held-active (order-preserving) FQ, whether the FQ should be parked instead of
+ * being rescheduled.)
+ */
+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
+ uint8_t dqrr_idx, int park);
+
+/**
+ * qbman_swp_enqueue() - Issue an enqueue command.
+ * @s: the software portal used for enqueue.
+ * @d: the enqueue descriptor.
+ * @fd: the frame descriptor to be enqueued.
+ *
+ * Please note that 'fd' should only be NULL if the "action" of the
+ * descriptor is "orp_hole" or "orp_nesn".
+ *
+ * Return 0 for a successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd);
+/**
+ * qbman_swp_enqueue_multiple() - Enqueue multiple frames with same
+ eq descriptor
+ * @s: the software portal used for enqueue.
+ * @d: the enqueue descriptor.
+ * @fd: the frame descriptor to be enqueued.
+ * @num_frames: the number of the frames to be enqueued.
+ *
+ * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
+ */
+int qbman_swp_enqueue_multiple(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+/**
+ * qbman_swp_enqueue_multiple_desc() - Enqueue multiple frames with
+ * individual eq descriptor.
+ * @s: the software portal used for enqueue.
+ * @d: the enqueue descriptor.
+ * @fd: the frame descriptor to be enqueued.
+ * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
+ * @num_frames: the number of the frames to be enqueued.
+ *
+ * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
+ */
+int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames);
+
+/* TODO:
+ * qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt.
+ * @s: the software portal.
+ * @thresh: the threshold to trigger the EQRI interrupt.
+ *
+ * An EQRI interrupt can be generated when the fill-level of EQCR falls below
+ * the 'thresh' value set here. Setting thresh==0 (the default) disables.
+ */
+int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh);
+
+ /*******************/
+ /* Buffer releases */
+ /*******************/
+/**
+ * struct qbman_release_desc - The structure for buffer release descriptor
+ * @donot_manipulate_directly: the 32bit data to represent the whole
+ * possible settings of qbman release descriptor.
+ */
+struct qbman_release_desc {
+ union {
+ uint32_t donot_manipulate_directly[16];
+ struct br {
+ uint8_t verb;
+ uint8_t reserved;
+ uint16_t bpid;
+ uint32_t reserved2;
+ uint64_t buf[7];
+ } br;
+ };
+};
+
+/**
+ * qbman_release_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ * @d: the qbman release descriptor.
+ */
+void qbman_release_desc_clear(struct qbman_release_desc *d);
+
+/**
+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
+ * @d: the qbman release descriptor.
+ */
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid);
+
+/**
+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
+ * interrupt source should be asserted after the release command is completed.
+ * @d: the qbman release descriptor.
+ */
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
+
+/**
+ * qbman_swp_release() - Issue a buffer release command.
+ * @s: the software portal object.
+ * @d: the release descriptor.
+ * @buffers: a pointer pointing to the buffer address to be released.
+ * @num_buffers: number of buffers to be released, must be less than 8.
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
+ const uint64_t *buffers, unsigned int num_buffers);
+
+/* TODO:
+ * qbman_swp_release_thresh() - Set threshold for RCRI interrupt
+ * @s: the software portal.
+ * @thresh: the threshold.
+ * An RCRI interrupt can be generated when the fill-level of RCR falls below
+ * the 'thresh' value set here. Setting thresh==0 (the default) disables.
+ */
+int qbman_swp_release_thresh(struct qbman_swp *s, unsigned int thresh);
+
+ /*******************/
+ /* Buffer acquires */
+ /*******************/
+/**
+ * qbman_swp_acquire() - Issue a buffer acquire command.
+ * @s: the software portal object.
+ * @bpid: the buffer pool index.
+ * @buffers: a pointer pointing to the acquired buffer address|es.
+ * @num_buffers: number of buffers to be acquired, must be less than 8.
+ *
+ * Return 0 for success, or negative error code if the acquire command
+ * fails.
+ */
+int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
+ unsigned int num_buffers);
+
+ /*****************/
+ /* FQ management */
+ /*****************/
+/**
+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state.
+ * @s: the software portal object.
+ * @fqid: the index of frame queue to be scheduled.
+ *
+ * There are a couple of different ways that a FQ can end up parked state,
+ * This schedules it.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid);
+
+/**
+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state.
+ * @s: the software portal object.
+ * @fqid: the index of frame queue to be forced.
+ *
+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
+ * and thus be available for selection by any channel-dequeuing behaviour (push
+ * or pull). If the FQ is subsequently "dequeued" from the channel and is still
+ * empty at the time this happens, the resulting dq_entry will have no FD.
+ * (qbman_result_DQ_fd() will return NULL.)
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);
+
+/**
+ * These functions change the FQ flow-control stuff between XON/XOFF. (The
+ * default is XON.) This setting doesn't affect enqueues to the FQ, just
+ * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when
+ * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is
+ * changed to XOFF after it had already become truly-scheduled to a channel, and
+ * a pull dequeue of that channel occurs that selects that FQ for dequeuing,
+ * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will
+ * return NULL.)
+ */
+/**
+ * qbman_swp_fq_xon() - XON the frame queue.
+ * @s: the software portal object.
+ * @fqid: the index of frame queue.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid);
+/**
+ * qbman_swp_fq_xoff() - XOFF the frame queue.
+ * @s: the software portal object.
+ * @fqid: the index of frame queue.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid);
+
+ /**********************/
+ /* Channel management */
+ /**********************/
+
+/**
+ * If the user has been allocated a channel object that is going to generate
+ * CDANs to another channel, then these functions will be necessary.
+ * CDAN-enabled channels only generate a single CDAN notification, after which
+ * it they need to be reenabled before they'll generate another. (The idea is
+ * that pull dequeuing will occur in reaction to the CDAN, followed by a
+ * reenable step.) Each function generates a distinct command to hardware, so a
+ * combination function is provided if the user wishes to modify the "context"
+ * (which shows up in each CDAN message) each time they reenable, as a single
+ * command to hardware.
+ */
+
+/**
+ * qbman_swp_CDAN_set_context() - Set CDAN context
+ * @s: the software portal object.
+ * @channelid: the channel index.
+ * @ctx: the context to be set in CDAN.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
+ uint64_t ctx);
+
+/**
+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel.
+ * @s: the software portal object.
+ * @channelid: the index of the channel to generate CDAN.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid);
+
+/**
+ * qbman_swp_CDAN_disable() - disable CDAN for the channel.
+ * @s: the software portal object.
+ * @channelid: the index of the channel to generate CDAN.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid);
+
+/**
+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
+ * @s: the software portal object.
+ * @channelid: the index of the channel to generate CDAN.
+ * @ctx: the context set in CDAN.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
+ uint64_t ctx);
+#endif /* !_FSL_QBMAN_PORTAL_H */
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_debug.c b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_debug.c
new file mode 100644
index 00000000..591673ab
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_debug.c
@@ -0,0 +1,66 @@
+/* Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "compat.h"
+#include <fsl_qbman_debug.h>
+#include "qbman_portal.h"
+
+/* QBMan portal management command code */
+#define QBMAN_BP_QUERY 0x32
+#define QBMAN_FQ_QUERY 0x44
+#define QBMAN_FQ_QUERY_NP 0x45
+#define QBMAN_WQ_QUERY 0x47
+#define QBMAN_CGR_QUERY 0x51
+#define QBMAN_WRED_QUERY 0x54
+#define QBMAN_CGR_STAT_QUERY 0x55
+#define QBMAN_CGR_STAT_QUERY_CLR 0x56
+
+struct qbman_fq_query_desc {
+ uint8_t verb;
+ uint8_t reserved[3];
+ uint32_t fqid;
+ uint8_t reserved2[57];
+};
+
+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid,
+ struct qbman_fq_query_np_rslt *r)
+{
+ struct qbman_fq_query_desc *p;
+
+ p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ p->fqid = fqid;
+ *r = *(struct qbman_fq_query_np_rslt *)qbman_swp_mc_complete(s, p,
+ QBMAN_FQ_QUERY_NP);
+ if (!r) {
+ pr_err("qbman: Query FQID %d NP fields failed, no response\n",
+ fqid);
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
+
+ /* Determine success or failure */
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
+ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
+ fqid, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+uint32_t qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
+{
+ return (r->frm_cnt & 0x00FFFFFF);
+}
+
+uint32_t qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
+{
+ return r->byte_cnt;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_portal.c b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_portal.c
new file mode 100644
index 00000000..07145005
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -0,0 +1,1425 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ *
+ */
+
+#include "qbman_portal.h"
+
+/* QBMan portal management command codes */
+#define QBMAN_MC_ACQUIRE 0x30
+#define QBMAN_WQCHAN_CONFIGURE 0x46
+
+/* CINH register offsets */
+#define QBMAN_CINH_SWP_EQCR_PI 0x800
+#define QBMAN_CINH_SWP_EQCR_CI 0x840
+#define QBMAN_CINH_SWP_EQAR 0x8c0
+#define QBMAN_CINH_SWP_DQPI 0xa00
+#define QBMAN_CINH_SWP_DCAP 0xac0
+#define QBMAN_CINH_SWP_SDQCR 0xb00
+#define QBMAN_CINH_SWP_RAR 0xcc0
+#define QBMAN_CINH_SWP_ISR 0xe00
+#define QBMAN_CINH_SWP_IER 0xe40
+#define QBMAN_CINH_SWP_ISDR 0xe80
+#define QBMAN_CINH_SWP_IIR 0xec0
+#define QBMAN_CINH_SWP_DQRR_ITR 0xa80
+#define QBMAN_CINH_SWP_ITPR 0xf40
+
+/* CENA register offsets */
+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_CR 0x600
+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
+#define QBMAN_CENA_SWP_VDQCR 0x780
+#define QBMAN_CENA_SWP_EQCR_CI 0x840
+
+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
+
+/* QBMan FQ management command codes */
+#define QBMAN_FQ_SCHEDULE 0x48
+#define QBMAN_FQ_FORCE 0x49
+#define QBMAN_FQ_XON 0x4d
+#define QBMAN_FQ_XOFF 0x4e
+
+/*******************************/
+/* Pre-defined attribute codes */
+/*******************************/
+
+#define QBMAN_RESPONSE_VERB_MASK 0x7f
+
+/*************************/
+/* SDQCR attribute codes */
+/*************************/
+#define QB_SDQCR_FC_SHIFT 29
+#define QB_SDQCR_FC_MASK 0x1
+#define QB_SDQCR_DCT_SHIFT 24
+#define QB_SDQCR_DCT_MASK 0x3
+#define QB_SDQCR_TOK_SHIFT 16
+#define QB_SDQCR_TOK_MASK 0xff
+#define QB_SDQCR_SRC_SHIFT 0
+#define QB_SDQCR_SRC_MASK 0xffff
+
+/* opaque token for static dequeues */
+#define QMAN_SDQCR_TOKEN 0xbb
+
+enum qbman_sdqcr_dct {
+ qbman_sdqcr_dct_null = 0,
+ qbman_sdqcr_dct_prio_ics,
+ qbman_sdqcr_dct_active_ics,
+ qbman_sdqcr_dct_active
+};
+
+enum qbman_sdqcr_fc {
+ qbman_sdqcr_fc_one = 0,
+ qbman_sdqcr_fc_up_to_3 = 1
+};
+
+/* We need to keep track of which SWP triggered a pull command
+ * so keep an array of portal IDs and use the token field to
+ * be able to find the proper portal
+ */
+#define MAX_QBMAN_PORTALS 64
+static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
+
+/*********************************/
+/* Portal constructor/destructor */
+/*********************************/
+
+/* Software portals should always be in the power-on state when we initialise,
+ * due to the CCSR-based portal reset functionality that MC has.
+ *
+ * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
+ * valid-bits, so we need to support a workaround where we don't trust
+ * valid-bits when detecting new entries until any stale ring entries have been
+ * overwritten at least once. The idea is that we read PI for the first few
+ * entries, then switch to valid-bit after that. The trick is to clear the
+ * bug-work-around boolean once the PI wraps around the ring for the first time.
+ *
+ * Note: this still carries a slight additional cost once the decrementer hits
+ * zero.
+ */
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
+{
+ int ret;
+ uint32_t eqcr_pi;
+ struct qbman_swp *p = malloc(sizeof(*p));
+
+ if (!p)
+ return NULL;
+ p->desc = *d;
+#ifdef QBMAN_CHECKING
+ p->mc.check = swp_mc_can_start;
+#endif
+ p->mc.valid_bit = QB_VALID_BIT;
+ p->sdq = 0;
+ p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
+ p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
+ p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
+
+ atomic_set(&p->vdq.busy, 1);
+ p->vdq.valid_bit = QB_VALID_BIT;
+ p->dqrr.next_idx = 0;
+ p->dqrr.valid_bit = QB_VALID_BIT;
+ if ((p->desc.qman_version & 0xFFFF0000) < QMAN_REV_4100) {
+ p->dqrr.dqrr_size = 4;
+ p->dqrr.reset_bug = 1;
+ } else {
+ p->dqrr.dqrr_size = 8;
+ p->dqrr.reset_bug = 0;
+ }
+
+ ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
+ if (ret) {
+ free(p);
+ pr_err("qbman_swp_sys_init() failed %d\n", ret);
+ return NULL;
+ }
+ /* SDQCR needs to be initialized to 0 when no channels are
+ * being dequeued from or else the QMan HW will indicate an
+ * error. The values that were calculated above will be
+ * applied when dequeues from a specific channel are enabled.
+ */
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
+ eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
+ p->eqcr.pi = eqcr_pi & 0xF;
+ p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
+ p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;
+ p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,
+ p->eqcr.ci, p->eqcr.pi);
+
+ portal_idx_map[p->desc.idx] = p;
+ return p;
+}
+
+void qbman_swp_finish(struct qbman_swp *p)
+{
+#ifdef QBMAN_CHECKING
+ QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
+#endif
+ qbman_swp_sys_finish(&p->sys);
+ portal_idx_map[p->desc.idx] = NULL;
+ free(p);
+}
+
+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
+{
+ return &p->desc;
+}
+
+/**************/
+/* Interrupts */
+/**************/
+
+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
+}
+
+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
+}
+
+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
+}
+
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
+}
+
+uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
+}
+
+void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
+}
+
+uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
+}
+
+void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
+}
+
+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
+}
+
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
+}
+
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
+}
+
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
+}
+
+/***********************/
+/* Management commands */
+/***********************/
+
+/*
+ * Internal code common to all types of management commands.
+ */
+
+void *qbman_swp_mc_start(struct qbman_swp *p)
+{
+ void *ret;
+#ifdef QBMAN_CHECKING
+ QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
+#endif
+ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
+#ifdef QBMAN_CHECKING
+ if (!ret)
+ p->mc.check = swp_mc_can_submit;
+#endif
+ return ret;
+}
+
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
+{
+ uint8_t *v = cmd;
+#ifdef QBMAN_CHECKING
+ QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
+#endif
+ /* TBD: "|=" is going to hurt performance. Need to move as many fields
+ * out of word zero, and for those that remain, the "OR" needs to occur
+ * at the caller side. This debug check helps to catch cases where the
+ * caller wants to OR but has forgotten to do so.
+ */
+ QBMAN_BUG_ON((*v & cmd_verb) != *v);
+ *v = cmd_verb | p->mc.valid_bit;
+ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
+#ifdef QBMAN_CHECKING
+ p->mc.check = swp_mc_can_poll;
+#endif
+}
+
+void *qbman_swp_mc_result(struct qbman_swp *p)
+{
+ uint32_t *ret, verb;
+#ifdef QBMAN_CHECKING
+ QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
+#endif
+ qbman_cena_invalidate_prefetch(&p->sys,
+ QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ /* Remove the valid-bit - command completed if the rest is non-zero */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+#ifdef QBMAN_CHECKING
+ p->mc.check = swp_mc_can_start;
+#endif
+ p->mc.valid_bit ^= QB_VALID_BIT;
+ return ret;
+}
+
+/***********/
+/* Enqueue */
+/***********/
+
+#define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
+enum qb_enqueue_commands {
+ enqueue_empty = 0,
+ enqueue_response_always = 1,
+ enqueue_rejects_to_fq = 2
+};
+
+#define QB_ENQUEUE_CMD_EC_OPTION_MASK 0x3
+#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
+#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
+#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
+#define QB_ENQUEUE_CMD_DCA_PK_SHIFT 6
+#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
+#define QB_ENQUEUE_CMD_NLIS_SHIFT 14
+#define QB_ENQUEUE_CMD_IS_NESN_SHIFT 15
+
+void qbman_eq_desc_clear(struct qbman_eq_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
+{
+ d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
+ if (respond_success)
+ d->eq.verb |= enqueue_response_always;
+ else
+ d->eq.verb |= enqueue_rejects_to_fq;
+}
+
+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
+ uint16_t opr_id, uint16_t seqnum, int incomplete)
+{
+ d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
+ if (respond_success)
+ d->eq.verb |= enqueue_response_always;
+ else
+ d->eq.verb |= enqueue_rejects_to_fq;
+
+ d->eq.orpid = opr_id;
+ d->eq.seqnum = seqnum;
+ if (incomplete)
+ d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
+ else
+ d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
+}
+
+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
+ uint16_t seqnum)
+{
+ d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
+ d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
+ d->eq.orpid = opr_id;
+ d->eq.seqnum = seqnum;
+ d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
+ d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
+}
+
+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
+ uint16_t seqnum)
+{
+ d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
+ d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
+ d->eq.orpid = opr_id;
+ d->eq.seqnum = seqnum;
+ d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
+ d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
+}
+
+void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
+ dma_addr_t storage_phys,
+ int stash)
+{
+ d->eq.rsp_addr = storage_phys;
+ d->eq.wae = stash;
+}
+
+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
+{
+ d->eq.rspid = token;
+}
+
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
+{
+ d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
+ d->eq.tgtid = fqid;
+}
+
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
+ uint16_t qd_bin, uint8_t qd_prio)
+{
+ d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
+ d->eq.tgtid = qdid;
+ d->eq.qdbin = qd_bin;
+ d->eq.qpri = qd_prio;
+}
+
+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
+{
+ if (enable)
+ d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
+ else
+ d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
+}
+
+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
+ uint8_t dqrr_idx, int park)
+{
+ if (enable) {
+ d->eq.dca = dqrr_idx;
+ if (park)
+ d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
+ else
+ d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
+ d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
+ } else {
+ d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
+ }
+}
+
+#define EQAR_IDX(eqar) ((eqar) & 0x7)
+#define EQAR_VB(eqar) ((eqar) & 0x80)
+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
+
+static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
+
+ pr_debug("EQAR=%08x\n", eqar);
+ if (!EQAR_SUCCESS(eqar))
+ return -EBUSY;
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], fd, sizeof(*fd));
+ /* Set the verb byte, have to substitute in the valid-bit */
+ lwsync();
+ p[0] = cl[0] | EQAR_VB(eqar);
+ qbman_cena_write_complete_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ return 0;
+}
+
+static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci;
+ uint8_t diff;
+
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
+ eqcr_ci, s->eqcr.ci);
+ s->eqcr.available += diff;
+ if (!diff)
+ return -EBUSY;
+ }
+
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], fd, sizeof(*fd));
+ lwsync();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ qbman_cena_write_complete_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
+ s->eqcr.pi++;
+ s->eqcr.pi &= 0xF;
+ s->eqcr.available--;
+ if (!(s->eqcr.pi & 7))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+
+ return 0;
+}
+
+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ if (s->sys.eqcr_mode == qman_eqcr_vb_array)
+ return qbman_swp_enqueue_array_mode(s, d, fd);
+ else /* Use ring mode by default */
+ return qbman_swp_enqueue_ring_mode(s, d, fd);
+}
+
+int qbman_swp_enqueue_multiple(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci, eqcr_pi;
+ uint8_t diff;
+ int i, num_enqueued = 0;
+ uint64_t addr_cena;
+
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
+ eqcr_ci, s->eqcr.ci);
+ s->eqcr.available += diff;
+ if (!diff)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ }
+
+ lwsync();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+ d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ if (!(eqcr_pi & 7))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ addr_cena = (size_t)s->sys.addr_cena;
+ for (i = 0; i < num_enqueued; i++) {
+ dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ }
+ s->eqcr.pi = eqcr_pi;
+
+ return num_enqueued;
+}
+
+int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl;
+ uint32_t eqcr_ci, eqcr_pi;
+ uint8_t diff;
+ int i, num_enqueued = 0;
+ uint64_t addr_cena;
+
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
+ eqcr_ci, s->eqcr.ci);
+ s->eqcr.available += diff;
+ if (!diff)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ cl = qb_cl(&d[i]);
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ }
+
+ lwsync();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ cl = qb_cl(&d[i]);
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ if (!(eqcr_pi & 7))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ addr_cena = (size_t)s->sys.addr_cena;
+ for (i = 0; i < num_enqueued; i++) {
+ dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ }
+ s->eqcr.pi = eqcr_pi;
+
+ return num_enqueued;
+}
+
+/*************************/
+/* Static (push) dequeue */
+/*************************/
+
+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
+{
+ uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
+
+ QBMAN_BUG_ON(channel_idx > 15);
+ *enabled = src | (1 << channel_idx);
+}
+
+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
+{
+ uint16_t dqsrc;
+
+ QBMAN_BUG_ON(channel_idx > 15);
+ if (enable)
+ s->sdq |= 1 << channel_idx;
+ else
+ s->sdq &= ~(1 << channel_idx);
+
+ /* Read make the complete src map. If no channels are enabled
+ * the SDQCR must be 0 or else QMan will assert errors
+ */
+ dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
+ if (dqsrc != 0)
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
+ else
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
+}
+
+/***************************/
+/* Volatile (pull) dequeue */
+/***************************/
+
+/* These should be const, eventually */
+#define QB_VDQCR_VERB_DCT_SHIFT 0
+#define QB_VDQCR_VERB_DT_SHIFT 2
+#define QB_VDQCR_VERB_RLS_SHIFT 4
+#define QB_VDQCR_VERB_WAE_SHIFT 5
+
+enum qb_pull_dt_e {
+ qb_pull_dt_channel,
+ qb_pull_dt_workqueue,
+ qb_pull_dt_framequeue
+};
+
+void qbman_pull_desc_clear(struct qbman_pull_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+ struct qbman_result *storage,
+ dma_addr_t storage_phys,
+ int stash)
+{
+ d->pull.rsp_addr_virt = (size_t)storage;
+
+ if (!storage) {
+ d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
+ return;
+ }
+ d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
+ if (stash)
+ d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
+ else
+ d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
+
+ d->pull.rsp_addr = storage_phys;
+}
+
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
+{
+ d->pull.numf = numframes - 1;
+}
+
+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
+{
+ d->pull.tok = token;
+}
+
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
+{
+ d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
+ d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
+ d->pull.dq_src = fqid;
+}
+
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
+ enum qbman_pull_type_e dct)
+{
+ d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+ d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
+ d->pull.dq_src = wqid;
+}
+
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
+ enum qbman_pull_type_e dct)
+{
+ d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+ d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
+ d->pull.dq_src = chid;
+}
+
+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+ uint32_t *p;
+ uint32_t *cl = qb_cl(d);
+
+ if (!atomic_dec_and_test(&s->vdq.busy)) {
+ atomic_inc(&s->vdq.busy);
+ return -EBUSY;
+ }
+
+ d->pull.tok = s->sys.idx + 1;
+ s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
+ p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
+ memcpy(&p[1], &cl[1], 12);
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ lwsync();
+ p[0] = cl[0] | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+ qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
+
+ return 0;
+}
+
+/****************/
+/* Polling DQRR */
+/****************/
+
+#define QMAN_DQRR_PI_MASK 0xf
+
+#define QBMAN_RESULT_DQ 0x60
+#define QBMAN_RESULT_FQRN 0x21
+#define QBMAN_RESULT_FQRNI 0x22
+#define QBMAN_RESULT_FQPN 0x24
+#define QBMAN_RESULT_FQDAN 0x25
+#define QBMAN_RESULT_CDAN 0x26
+#define QBMAN_RESULT_CSCN_MEM 0x27
+#define QBMAN_RESULT_CGCU 0x28
+#define QBMAN_RESULT_BPSCN 0x29
+#define QBMAN_RESULT_CSCN_WQ 0x2a
+
+#include <rte_prefetch.h>
+
+void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
+{
+ const struct qbman_result *p;
+
+ p = qbman_cena_read_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ rte_prefetch0(p);
+}
+
+/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
+{
+ uint32_t verb;
+ uint32_t response_verb;
+ uint32_t flags;
+ const struct qbman_result *p;
+
+ /* Before using valid-bit to detect if something is there, we have to
+ * handle the case of the DQRR reset bug...
+ */
+ if (unlikely(s->dqrr.reset_bug)) {
+ /* We pick up new entries by cache-inhibited producer index,
+ * which means that a non-coherent mapping would require us to
+ * invalidate and read *only* once that PI has indicated that
+ * there's an entry here. The first trip around the DQRR ring
+ * will be much less efficient than all subsequent trips around
+ * it...
+ */
+ uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
+ QMAN_DQRR_PI_MASK;
+
+ /* there are new entries if pi != next_idx */
+ if (pi == s->dqrr.next_idx)
+ return NULL;
+
+ /* if next_idx is/was the last ring index, and 'pi' is
+ * different, we can disable the workaround as all the ring
+ * entries have now been DMA'd to so valid-bit checking is
+ * repaired. Note: this logic needs to be based on next_idx
+ * (which increments one at a time), rather than on pi (which
+ * can burst and wrap-around between our snapshots of it).
+ */
+ QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
+ pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
+ s->dqrr.next_idx, pi);
+ s->dqrr.reset_bug = 0;
+ }
+ qbman_cena_invalidate_prefetch(&s->sys,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ }
+ p = qbman_cena_read_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ verb = p->dq.verb;
+
+ /* If the valid-bit isn't of the expected polarity, nothing there. Note,
+ * in the DQRR reset bug workaround, we shouldn't need to skip these
+ * check, because we've already determined that a new entry is available
+ * and we've invalidated the cacheline before reading it, so the
+ * valid-bit behaviour is repaired and should tell us what we already
+ * knew from reading PI.
+ */
+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
+ return NULL;
+
+ /* There's something there. Move "next_idx" attention to the next ring
+ * entry (and prefetch it) before returning what we found.
+ */
+ s->dqrr.next_idx++;
+ if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
+ s->dqrr.next_idx = 0;
+ s->dqrr.valid_bit ^= QB_VALID_BIT;
+ }
+ /* If this is the final response to a volatile dequeue command
+ * indicate that the vdq is no longer busy
+ */
+ flags = p->dq.stat;
+ response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
+ if ((response_verb == QBMAN_RESULT_DQ) &&
+ (flags & QBMAN_DQ_STAT_VOLATILE) &&
+ (flags & QBMAN_DQ_STAT_EXPIRED))
+ atomic_inc(&s->vdq.busy);
+
+ return p;
+}
+
+/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
+void qbman_swp_dqrr_consume(struct qbman_swp *s,
+ const struct qbman_result *dq)
+{
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
+}
+
+/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
+void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
+ uint8_t dqrr_index)
+{
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
+}
+
+/*********************************/
+/* Polling user-provided storage */
+/*********************************/
+int qbman_result_has_new_result(struct qbman_swp *s,
+ struct qbman_result *dq)
+{
+ if (dq->dq.tok == 0)
+ return 0;
+
+ /*
+ * Set token to be 0 so we will detect change back to 1
+ * next time the looping is traversed. Const is cast away here
+ * as we want users to treat the dequeue responses as read only.
+ */
+ ((struct qbman_result *)dq)->dq.tok = 0;
+
+ /*
+ * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
+ * fact "VDQCR" shows busy doesn't mean that we hold the result that
+ * makes it available. Eg. we may be looking at our 10th dequeue result,
+ * having released VDQCR after the 1st result and it is now busy due to
+ * some other command!
+ */
+ if (s->vdq.storage == dq) {
+ s->vdq.storage = NULL;
+ atomic_inc(&s->vdq.busy);
+ }
+
+ return 1;
+}
+
+int qbman_check_new_result(struct qbman_result *dq)
+{
+ if (dq->dq.tok == 0)
+ return 0;
+
+ /*
+ * Set token to be 0 so we will detect change back to 1
+ * next time the looping is traversed. Const is cast away here
+ * as we want users to treat the dequeue responses as read only.
+ */
+ ((struct qbman_result *)dq)->dq.tok = 0;
+
+ return 1;
+}
+
+int qbman_check_command_complete(struct qbman_result *dq)
+{
+ struct qbman_swp *s;
+
+ if (dq->dq.tok == 0)
+ return 0;
+
+ s = portal_idx_map[dq->dq.tok - 1];
+ /*
+ * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
+ * fact "VDQCR" shows busy doesn't mean that we hold the result that
+ * makes it available. Eg. we may be looking at our 10th dequeue result,
+ * having released VDQCR after the 1st result and it is now busy due to
+ * some other command!
+ */
+ if (s->vdq.storage == dq) {
+ s->vdq.storage = NULL;
+ atomic_inc(&s->vdq.busy);
+ }
+
+ return 1;
+}
+
+/********************************/
+/* Categorising qbman results */
+/********************************/
+
+static inline int __qbman_result_is_x(const struct qbman_result *dq,
+ uint8_t x)
+{
+ uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
+
+ return (response_verb == x);
+}
+
+int qbman_result_is_DQ(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
+}
+
+int qbman_result_is_FQDAN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
+}
+
+int qbman_result_is_CDAN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
+}
+
+int qbman_result_is_CSCN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
+ __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
+}
+
+int qbman_result_is_BPSCN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
+}
+
+int qbman_result_is_CGCU(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
+}
+
+int qbman_result_is_FQRN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
+}
+
+int qbman_result_is_FQRNI(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
+}
+
+int qbman_result_is_FQPN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
+}
+
+/*********************************/
+/* Parsing frame dequeue results */
+/*********************************/
+
+/* These APIs assume qbman_result_is_DQ() is TRUE */
+
+uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
+{
+ return dq->dq.stat;
+}
+
+uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
+{
+ return dq->dq.seqnum;
+}
+
+uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
+{
+ return dq->dq.oprid;
+}
+
+uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
+{
+ return dq->dq.fqid;
+}
+
+uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
+{
+ return dq->dq.fq_byte_cnt;
+}
+
+uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
+{
+ return dq->dq.fq_frm_cnt;
+}
+
+uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
+{
+ return dq->dq.fqd_ctx;
+}
+
+const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
+{
+ return (const struct qbman_fd *)&dq->dq.fd[0];
+}
+
+/**************************************/
+/* Parsing state-change notifications */
+/**************************************/
+uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
+{
+ return scn->scn.state;
+}
+
+uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
+{
+ return scn->scn.rid_tok;
+}
+
+uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
+{
+ return scn->scn.ctx;
+}
+
+/*****************/
+/* Parsing BPSCN */
+/*****************/
+uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
+{
+ return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
+}
+
+int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
+{
+ return !(int)(qbman_result_SCN_state(scn) & 0x1);
+}
+
+int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
+{
+ return (int)(qbman_result_SCN_state(scn) & 0x2);
+}
+
+int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
+{
+ return (int)(qbman_result_SCN_state(scn) & 0x4);
+}
+
+uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
+{
+ return qbman_result_SCN_ctx(scn);
+}
+
+/*****************/
+/* Parsing CGCU */
+/*****************/
+uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
+{
+ return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
+}
+
+uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
+{
+ return qbman_result_SCN_ctx(scn);
+}
+
+/******************/
+/* Buffer release */
+/******************/
+#define QB_BR_RC_VALID_SHIFT 5
+#define QB_BR_RCDI_SHIFT 6
+
+void qbman_release_desc_clear(struct qbman_release_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+ d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
+}
+
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
+{
+ d->br.bpid = bpid;
+}
+
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
+{
+ if (enable)
+ d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
+ else
+ d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
+}
+
+#define RAR_IDX(rar) ((rar) & 0x7)
+#define RAR_VB(rar) ((rar) & 0x80)
+#define RAR_SUCCESS(rar) ((rar) & 0x100)
+
+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
+ const uint64_t *buffers, unsigned int num_buffers)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
+
+ pr_debug("RAR=%08x\n", rar);
+ if (!RAR_SUCCESS(rar))
+ return -EBUSY;
+
+ QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
+
+ /* Start the release command */
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+
+ /* Copy the caller's buffer pointers to the command */
+ u64_to_le32_copy(&p[2], buffers, num_buffers);
+
+ /* Set the verb byte, have to substitute in the valid-bit and the number
+ * of buffers.
+ */
+ lwsync();
+ p[0] = cl[0] | RAR_VB(rar) | num_buffers;
+ qbman_cena_write_complete_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+
+ return 0;
+}
+
+/*******************/
+/* Buffer acquires */
+/*******************/
+struct qbman_acquire_desc {
+ uint8_t verb;
+ uint8_t reserved;
+ uint16_t bpid;
+ uint8_t num;
+ uint8_t reserved2[59];
+};
+
+struct qbman_acquire_rslt {
+ uint8_t verb;
+ uint8_t rslt;
+ uint16_t reserved;
+ uint8_t num;
+ uint8_t reserved2[3];
+ uint64_t buf[7];
+};
+
+int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
+ unsigned int num_buffers)
+{
+ struct qbman_acquire_desc *p;
+ struct qbman_acquire_rslt *r;
+
+ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+
+ if (!p)
+ return -EBUSY;
+
+ /* Encode the caller-provided attributes */
+ p->bpid = bpid;
+ p->num = num_buffers;
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
+ if (unlikely(!r)) {
+ pr_err("qbman: acquire from BPID %d failed, no response\n",
+ bpid);
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
+ bpid, r->rslt);
+ return -EIO;
+ }
+
+ QBMAN_BUG_ON(r->num > num_buffers);
+
+ /* Copy the acquired buffers to the caller's array */
+ u64_from_le32_copy(buffers, &r->buf[0], r->num);
+
+ return (int)r->num;
+}
+
+/*****************/
+/* FQ management */
+/*****************/
+struct qbman_alt_fq_state_desc {
+ uint8_t verb;
+ uint8_t reserved[3];
+ uint32_t fqid;
+ uint8_t reserved2[56];
+};
+
+struct qbman_alt_fq_state_rslt {
+ uint8_t verb;
+ uint8_t rslt;
+ uint8_t reserved[62];
+};
+
+#define ALT_FQ_FQID_MASK 0x00FFFFFF
+
+static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
+ uint8_t alt_fq_verb)
+{
+ struct qbman_alt_fq_state_desc *p;
+ struct qbman_alt_fq_state_rslt *r;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ p->fqid = fqid & ALT_FQ_FQID_MASK;
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, alt_fq_verb);
+ if (unlikely(!r)) {
+ pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
+ alt_fq_verb);
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
+ fqid, alt_fq_verb, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
+}
+
+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
+}
+
+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
+}
+
+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
+}
+
+/**********************/
+/* Channel management */
+/**********************/
+
+struct qbman_cdan_ctrl_desc {
+ uint8_t verb;
+ uint8_t reserved;
+ uint16_t ch;
+ uint8_t we;
+ uint8_t ctrl;
+ uint16_t reserved2;
+ uint64_t cdan_ctx;
+ uint8_t reserved3[48];
+
+};
+
+struct qbman_cdan_ctrl_rslt {
+ uint8_t verb;
+ uint8_t rslt;
+ uint16_t ch;
+ uint8_t reserved[60];
+};
+
+/* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
+ * would be irresponsible to expose it.
+ */
+#define CODE_CDAN_WE_EN 0x1
+#define CODE_CDAN_WE_CTX 0x4
+
+static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
+ uint8_t we_mask, uint8_t cdan_en,
+ uint64_t ctx)
+{
+ struct qbman_cdan_ctrl_desc *p;
+ struct qbman_cdan_ctrl_rslt *r;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ /* Encode the caller-provided attributes */
+ p->ch = channelid;
+ p->we = we_mask;
+ if (cdan_en)
+ p->ctrl = 1;
+ else
+ p->ctrl = 0;
+ p->cdan_ctx = ctx;
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
+ if (unlikely(!r)) {
+ pr_err("qbman: wqchan config failed, no response\n");
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
+ != QBMAN_WQCHAN_CONFIGURE);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("CDAN cQID %d failed: code = 0x%02x\n",
+ channelid, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
+ uint64_t ctx)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_CTX,
+ 0, ctx);
+}
+
+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN,
+ 1, 0);
+}
+
+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN,
+ 0, 0);
+}
+
+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
+ uint64_t ctx)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
+ 1, ctx);
+}
+
+uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
+{
+ return QBMAN_IDX_FROM_DQRR(dqrr);
+}
+
+struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
+{
+ struct qbman_result *dq;
+
+ dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
+ return dq;
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_portal.h b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_portal.h
new file mode 100644
index 00000000..dbea22a1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_portal.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ *
+ */
+
+#include "qbman_sys.h"
+#include <fsl_qbman_portal.h>
+
+#define QMAN_REV_4000 0x04000000
+#define QMAN_REV_4100 0x04010000
+#define QMAN_REV_4101 0x04010001
+
+/* All QBMan command and result structures use this "valid bit" encoding */
+#define QB_VALID_BIT ((uint32_t)0x80)
+
+/* Management command result codes */
+#define QBMAN_MC_RSLT_OK 0xf0
+
+/* QBMan DQRR size is set at runtime in qbman_portal.c */
+
+#define QBMAN_EQCR_SIZE 8
+
+static inline uint8_t qm_cyc_diff(uint8_t ringsize, uint8_t first,
+ uint8_t last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return (2 * ringsize) + last - first;
+}
+
+/* --------------------- */
+/* portal data structure */
+/* --------------------- */
+
+struct qbman_swp {
+ struct qbman_swp_desc desc;
+ /* The qbman_sys (ie. arch/OS-specific) support code can put anything it
+ * needs in here.
+ */
+ struct qbman_swp_sys sys;
+ /* Management commands */
+ struct {
+#ifdef QBMAN_CHECKING
+ enum swp_mc_check {
+ swp_mc_can_start, /* call __qbman_swp_mc_start() */
+ swp_mc_can_submit, /* call __qbman_swp_mc_submit() */
+ swp_mc_can_poll, /* call __qbman_swp_mc_result() */
+ } check;
+#endif
+ uint32_t valid_bit; /* 0x00 or 0x80 */
+ } mc;
+ /* Push dequeues */
+ uint32_t sdq;
+ /* Volatile dequeues */
+ struct {
+ /* VDQCR supports a "1 deep pipeline", meaning that if you know
+ * the last-submitted command is already executing in the
+ * hardware (as evidenced by at least 1 valid dequeue result),
+ * you can write another dequeue command to the register, the
+ * hardware will start executing it as soon as the
+ * already-executing command terminates. (This minimises latency
+ * and stalls.) With that in mind, this "busy" variable refers
+ * to whether or not a command can be submitted, not whether or
+ * not a previously-submitted command is still executing. In
+ * other words, once proof is seen that the previously-submitted
+ * command is executing, "vdq" is no longer "busy".
+ */
+ atomic_t busy;
+ uint32_t valid_bit; /* 0x00 or 0x80 */
+ /* We need to determine when vdq is no longer busy. This depends
+ * on whether the "busy" (last-submitted) dequeue command is
+ * targeting DQRR or main-memory, and detected is based on the
+ * presence of the dequeue command's "token" showing up in
+ * dequeue entries in DQRR or main-memory (respectively).
+ */
+ struct qbman_result *storage; /* NULL if DQRR */
+ } vdq;
+ /* DQRR */
+ struct {
+ uint32_t next_idx;
+ uint32_t valid_bit;
+ uint8_t dqrr_size;
+ int reset_bug;
+ } dqrr;
+ struct {
+ uint32_t pi;
+ uint32_t pi_vb;
+ uint32_t ci;
+ int available;
+ } eqcr;
+};
+
+/* -------------------------- */
+/* portal management commands */
+/* -------------------------- */
+
+/* Different management commands all use this common base layer of code to issue
+ * commands and poll for results. The first function returns a pointer to where
+ * the caller should fill in their MC command (though they should ignore the
+ * verb byte), the second function commits merges in the caller-supplied command
+ * verb (which should not include the valid-bit) and submits the command to
+ * hardware, and the third function checks for a completed response (returns
+ * non-NULL if only if the response is complete).
+ */
+void *qbman_swp_mc_start(struct qbman_swp *p);
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb);
+void *qbman_swp_mc_result(struct qbman_swp *p);
+
+/* Wraps up submit + poll-for-result */
+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
+ uint8_t cmd_verb)
+{
+ int loopvar = 1000;
+
+ qbman_swp_mc_submit(swp, cmd, cmd_verb);
+ do {
+ cmd = qbman_swp_mc_result(swp);
+ } while (!cmd && loopvar--);
+ QBMAN_BUG_ON(!loopvar);
+
+ return cmd;
+}
+
+/* ---------------------- */
+/* Descriptors/cachelines */
+/* ---------------------- */
+
+/* To avoid needless dynamic allocation, the driver API often gives the caller
+ * a "descriptor" type that the caller can instantiate however they like.
+ * Ultimately though, it is just a cacheline of binary storage (or something
+ * smaller when it is known that the descriptor doesn't need all 64 bytes) for
+ * holding pre-formatted pieces of hardware commands. The performance-critical
+ * code can then copy these descriptors directly into hardware command
+ * registers more efficiently than trying to construct/format commands
+ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in
+ * order for the compiler to know its size, but the internal details are not
+ * exposed. The following macro is used within the driver for converting *any*
+ * descriptor pointer to a usable array pointer. The use of a macro (instead of
+ * an inline) is necessary to work with different descriptor types and to work
+ * correctly with const and non-const inputs (and similarly-qualified outputs).
+ */
+#define qb_cl(d) (&(d)->donot_manipulate_directly[0])
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_sys.h b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_sys.h
new file mode 100644
index 00000000..2bd33ea5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_sys.h
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ *
+ */
+/* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the
+ * driver. They are only included via qbman_private.h, which is itself a
+ * platform-independent file and is included by all the other driver source.
+ *
+ * qbman_sys_decl.h is included prior to all other declarations and logic, and
+ * it exists to provide compatibility with any linux interfaces our
+ * single-source driver code is dependent on (eg. kmalloc). Ie. this file
+ * provides linux compatibility.
+ *
+ * This qbman_sys.h header, on the other hand, is included *after* any common
+ * and platform-neutral declarations and logic in qbman_private.h, and exists to
+ * implement any platform-specific logic of the qbman driver itself. Ie. it is
+ * *not* to provide linux compatibility.
+ */
+
+#include "qbman_sys_decl.h"
+
+#define CENA_WRITE_ENABLE 0
+#define CINH_WRITE_ENABLE 1
+
+/* Debugging assists */
+static inline void __hexdump(unsigned long start, unsigned long end,
+ unsigned long p, size_t sz, const unsigned char *c)
+{
+ while (start < end) {
+ unsigned int pos = 0;
+ char buf[64];
+ int nl = 0;
+
+ pos += sprintf(buf + pos, "%08lx: ", start);
+ do {
+ if ((start < p) || (start >= (p + sz)))
+ pos += sprintf(buf + pos, "..");
+ else
+ pos += sprintf(buf + pos, "%02x", *(c++));
+ if (!(++start & 15)) {
+ buf[pos++] = '\n';
+ nl = 1;
+ } else {
+ nl = 0;
+ if (!(start & 1))
+ buf[pos++] = ' ';
+ if (!(start & 3))
+ buf[pos++] = ' ';
+ }
+ } while (start & 15);
+ if (!nl)
+ buf[pos++] = '\n';
+ buf[pos] = '\0';
+ pr_info("%s", buf);
+ }
+}
+
+static inline void hexdump(const void *ptr, size_t sz)
+{
+ unsigned long p = (unsigned long)ptr;
+ unsigned long start = p & ~15;
+ unsigned long end = (p + sz + 15) & ~15;
+ const unsigned char *c = ptr;
+
+ __hexdump(start, end, p, sz, c);
+}
+
+/* Currently, the CENA support code expects each 32-bit word to be written in
+ * host order, and these are converted to hardware (little-endian) order on
+ * command submission. However, 64-bit quantities are must be written (and read)
+ * as two 32-bit words with the least-significant word first, irrespective of
+ * host endianness.
+ */
+static inline void u64_to_le32_copy(void *d, const uint64_t *s,
+ unsigned int cnt)
+{
+ uint32_t *dd = d;
+ const uint32_t *ss = (const uint32_t *)s;
+
+ while (cnt--) {
+ /* TBD: the toolchain was choking on the use of 64-bit types up
+ * until recently so this works entirely with 32-bit variables.
+ * When 64-bit types become usable again, investigate better
+ * ways of doing this.
+ */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ *(dd++) = ss[1];
+ *(dd++) = ss[0];
+ ss += 2;
+#else
+ *(dd++) = *(ss++);
+ *(dd++) = *(ss++);
+#endif
+ }
+}
+
+static inline void u64_from_le32_copy(uint64_t *d, const void *s,
+ unsigned int cnt)
+{
+ const uint32_t *ss = s;
+ uint32_t *dd = (uint32_t *)d;
+
+ while (cnt--) {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ dd[1] = *(ss++);
+ dd[0] = *(ss++);
+ dd += 2;
+#else
+ *(dd++) = *(ss++);
+ *(dd++) = *(ss++);
+#endif
+ }
+}
+
+ /******************/
+ /* Portal access */
+ /******************/
+struct qbman_swp_sys {
+ /* On GPP, the sys support for qbman_swp is here. The CENA region isi
+ * not an mmap() of the real portal registers, but an allocated
+ * place-holder, because the actual writes/reads to/from the portal are
+ * marshalled from these allocated areas using QBMan's "MC access
+ * registers". CINH accesses are atomic so there's no need for a
+ * place-holder.
+ */
+ uint8_t *cena;
+ uint8_t __iomem *addr_cena;
+ uint8_t __iomem *addr_cinh;
+ uint32_t idx;
+ enum qbman_eqcr_mode eqcr_mode;
+};
+
+/* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal
+ * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH)
+ * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index
+ * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal)
+ * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE)
+ * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete)
+ */
+
+static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset,
+ uint32_t val)
+{
+ __raw_writel(val, s->addr_cinh + offset);
+#ifdef QBMAN_CINH_TRACE
+ pr_info("qbman_cinh_write(%p:%d:0x%03x) 0x%08x\n",
+ s->addr_cinh, s->idx, offset, val);
+#endif
+}
+
+static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset)
+{
+ uint32_t reg = __raw_readl(s->addr_cinh + offset);
+#ifdef QBMAN_CINH_TRACE
+ pr_info("qbman_cinh_read(%p:%d:0x%03x) 0x%08x\n",
+ s->addr_cinh, s->idx, offset, reg);
+#endif
+ return reg;
+}
+
+static inline void *qbman_cena_write_start(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+ void *shadow = s->cena + offset;
+
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_write_start(%p:%d:0x%03x) %p\n",
+ s->addr_cena, s->idx, offset, shadow);
+#endif
+ QBMAN_BUG_ON(offset & 63);
+ dcbz(shadow);
+ return shadow;
+}
+
+static inline void *qbman_cena_write_start_wo_shadow(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n",
+ s->addr_cena, s->idx, offset);
+#endif
+ QBMAN_BUG_ON(offset & 63);
+#ifdef RTE_ARCH_64
+ return (s->addr_cena + offset);
+#else
+ return (s->addr_cinh + offset);
+#endif
+}
+
+static inline void qbman_cena_write_complete(struct qbman_swp_sys *s,
+ uint32_t offset, void *cmd)
+{
+ const uint32_t *shadow = cmd;
+ int loop;
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_write_complete(%p:%d:0x%03x) %p\n",
+ s->addr_cena, s->idx, offset, shadow);
+ hexdump(cmd, 64);
+#endif
+#ifdef RTE_ARCH_64
+ for (loop = 15; loop >= 1; loop--)
+ __raw_writel(shadow[loop], s->addr_cena +
+ offset + loop * 4);
+ lwsync();
+ __raw_writel(shadow[0], s->addr_cena + offset);
+#else
+ for (loop = 15; loop >= 1; loop--)
+ __raw_writel(shadow[loop], s->addr_cinh +
+ offset + loop * 4);
+ lwsync();
+ __raw_writel(shadow[0], s->addr_cinh + offset);
+#endif
+ dcbf(s->addr_cena + offset);
+}
+
+static inline void qbman_cena_write_complete_wo_shadow(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_write_complete(%p:%d:0x%03x)\n",
+ s->addr_cena, s->idx, offset);
+#endif
+ dcbf(s->addr_cena + offset);
+}
+
+static inline uint32_t qbman_cena_read_reg(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+ return __raw_readl(s->addr_cena + offset);
+}
+
+static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset)
+{
+ uint32_t *shadow = (uint32_t *)(s->cena + offset);
+ unsigned int loop;
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n",
+ s->addr_cena, s->idx, offset, shadow);
+#endif
+
+#ifdef RTE_ARCH_64
+ for (loop = 0; loop < 16; loop++)
+ shadow[loop] = __raw_readl(s->addr_cena + offset
+ + loop * 4);
+#else
+ for (loop = 0; loop < 16; loop++)
+ shadow[loop] = __raw_readl(s->addr_cinh + offset
+ + loop * 4);
+#endif
+#ifdef QBMAN_CENA_TRACE
+ hexdump(shadow, 64);
+#endif
+ return shadow;
+}
+
+static inline void *qbman_cena_read_wo_shadow(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_read(%p:%d:0x%03x)\n",
+ s->addr_cena, s->idx, offset);
+#endif
+ return s->addr_cena + offset;
+}
+
+static inline void qbman_cena_invalidate(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+ dccivac(s->addr_cena + offset);
+}
+
+static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+ dccivac(s->addr_cena + offset);
+ prefetch_for_load(s->addr_cena + offset);
+}
+
+static inline void qbman_cena_prefetch(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+ prefetch_for_load(s->addr_cena + offset);
+}
+
+ /******************/
+ /* Portal support */
+ /******************/
+
+/* The SWP_CFG portal register is special, in that it is used by the
+ * platform-specific code rather than the platform-independent code in
+ * qbman_portal.c. So use of it is declared locally here.
+ */
+#define QBMAN_CINH_SWP_CFG 0xd00
+#define QBMAN_CINH_SWP_CFG 0xd00
+#define SWP_CFG_DQRR_MF_SHIFT 20
+#define SWP_CFG_EST_SHIFT 16
+#define SWP_CFG_WN_SHIFT 14
+#define SWP_CFG_RPM_SHIFT 12
+#define SWP_CFG_DCM_SHIFT 10
+#define SWP_CFG_EPM_SHIFT 8
+#define SWP_CFG_SD_SHIFT 5
+#define SWP_CFG_SP_SHIFT 4
+#define SWP_CFG_SE_SHIFT 3
+#define SWP_CFG_DP_SHIFT 2
+#define SWP_CFG_DE_SHIFT 1
+#define SWP_CFG_EP_SHIFT 0
+
+static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,
+ uint8_t est, uint8_t rpm, uint8_t dcm,
+ uint8_t epm, int sd, int sp, int se,
+ int dp, int de, int ep)
+{
+ uint32_t reg;
+
+ reg = (max_fill << SWP_CFG_DQRR_MF_SHIFT |
+ est << SWP_CFG_EST_SHIFT |
+ wn << SWP_CFG_WN_SHIFT |
+ rpm << SWP_CFG_RPM_SHIFT |
+ dcm << SWP_CFG_DCM_SHIFT |
+ epm << SWP_CFG_EPM_SHIFT |
+ sd << SWP_CFG_SD_SHIFT |
+ sp << SWP_CFG_SP_SHIFT |
+ se << SWP_CFG_SE_SHIFT |
+ dp << SWP_CFG_DP_SHIFT |
+ de << SWP_CFG_DE_SHIFT |
+ ep << SWP_CFG_EP_SHIFT);
+
+ return reg;
+}
+
+static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
+ const struct qbman_swp_desc *d,
+ uint8_t dqrr_size)
+{
+ uint32_t reg;
+#ifdef RTE_ARCH_64
+ uint8_t wn = CENA_WRITE_ENABLE;
+#else
+ uint8_t wn = CINH_WRITE_ENABLE;
+#endif
+
+ s->addr_cena = d->cena_bar;
+ s->addr_cinh = d->cinh_bar;
+ s->idx = (uint32_t)d->idx;
+ s->cena = malloc(4096);
+ if (!s->cena) {
+ pr_err("Could not allocate page for cena shadow\n");
+ return -1;
+ }
+ s->eqcr_mode = d->eqcr_mode;
+ QBMAN_BUG_ON(d->idx < 0);
+#ifdef QBMAN_CHECKING
+ /* We should never be asked to initialise for a portal that isn't in
+ * the power-on state. (Ie. don't forget to reset portals when they are
+ * decommissioned!)
+ */
+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
+ QBMAN_BUG_ON(reg);
+#endif
+ if (s->eqcr_mode == qman_eqcr_vb_array)
+ reg = qbman_set_swp_cfg(dqrr_size, wn, 0, 3, 2, 3, 1, 1, 1, 1,
+ 1, 1);
+ else
+ reg = qbman_set_swp_cfg(dqrr_size, wn, 1, 3, 2, 2, 1, 1, 1, 1,
+ 1, 1);
+ qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
+ if (!reg) {
+ pr_err("The portal %d is not enabled!\n", s->idx);
+ free(s->cena);
+ return -1;
+ }
+ return 0;
+}
+
+static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s)
+{
+ free(s->cena);
+}
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_sys_decl.h b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_sys_decl.h
new file mode 100644
index 00000000..fa6977fe
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/qbman/qbman_sys_decl.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ *
+ */
+#include <compat.h>
+#include <fsl_qbman_base.h>
+
+/* Sanity check */
+#if (__BYTE_ORDER__ != __ORDER_BIG_ENDIAN__) && \
+ (__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__)
+#error "Unknown endianness!"
+#endif
+
+ /****************/
+ /* arch assists */
+ /****************/
+#if defined(RTE_ARCH_ARM64)
+#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
+#define lwsync() { asm volatile("dmb st" : : : "memory"); }
+#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
+#define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); }
+static inline void prefetch_for_load(void *p)
+{
+ asm volatile("prfm pldl1keep, [%0, #0]" : : "r" (p));
+}
+
+static inline void prefetch_for_store(void *p)
+{
+ asm volatile("prfm pstl1keep, [%0, #0]" : : "r" (p));
+}
+#elif defined(RTE_ARCH_ARM)
+#define dcbz(p) memset(p, 0, 64)
+#define lwsync() { asm volatile("dmb st" : : : "memory"); }
+#define dcbf(p) RTE_SET_USED(p)
+#define dccivac(p) RTE_SET_USED(p)
+#define prefetch_for_load(p) { asm volatile ("pld [%0]" : : "r" (p)); }
+#define prefetch_for_store(p) { asm volatile ("pld [%0]" : : "r" (p)); }
+
+#else
+#define dcbz(p) RTE_SET_USED(p)
+#define lwsync()
+#define dcbf(p) RTE_SET_USED(p)
+#define dccivac(p) RTE_SET_USED(p)
+static inline void prefetch_for_load(void *p)
+{
+ RTE_SET_USED(p);
+}
+static inline void prefetch_for_store(void *p)
+{
+ RTE_SET_USED(p);
+}
+#endif
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/rte_bus_fslmc_version.map b/src/spdk/dpdk/drivers/bus/fslmc/rte_bus_fslmc_version.map
new file mode 100644
index 00000000..fe45a113
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/rte_bus_fslmc_version.map
@@ -0,0 +1,118 @@
+DPDK_17.05 {
+ global:
+
+ dpaa2_affine_qbman_swp;
+ dpaa2_alloc_dpbp_dev;
+ dpaa2_alloc_dq_storage;
+ dpaa2_free_dpbp_dev;
+ dpaa2_free_dq_storage;
+ dpbp_disable;
+ dpbp_enable;
+ dpbp_get_attributes;
+ dpbp_get_num_free_bufs;
+ dpbp_open;
+ dpbp_reset;
+ dpio_close;
+ dpio_disable;
+ dpio_enable;
+ dpio_get_attributes;
+ dpio_open;
+ dpio_reset;
+ dpio_set_stashing_destination;
+ mc_send_command;
+ per_lcore__dpaa2_io;
+ qbman_check_command_complete;
+ qbman_eq_desc_clear;
+ qbman_eq_desc_set_fq;
+ qbman_eq_desc_set_no_orp;
+ qbman_eq_desc_set_qd;
+ qbman_eq_desc_set_response;
+ qbman_pull_desc_clear;
+ qbman_pull_desc_set_fq;
+ qbman_pull_desc_set_numframes;
+ qbman_pull_desc_set_storage;
+ qbman_release_desc_clear;
+ qbman_release_desc_set_bpid;
+ qbman_result_DQ_fd;
+ qbman_result_DQ_flags;
+ qbman_result_has_new_result;
+ qbman_swp_acquire;
+ qbman_swp_pull;
+ qbman_swp_release;
+ rte_fslmc_driver_register;
+ rte_fslmc_driver_unregister;
+ rte_fslmc_vfio_dmamap;
+ rte_mcp_ptr_list;
+
+ local: *;
+};
+
+DPDK_17.08 {
+ global:
+
+ dpaa2_io_portal;
+ dpaa2_get_qbman_swp;
+ dpci_set_rx_queue;
+ dpcon_open;
+ dpcon_get_attributes;
+ dpio_add_static_dequeue_channel;
+ dpio_remove_static_dequeue_channel;
+ mc_get_soc_version;
+ mc_get_version;
+ qbman_check_new_result;
+ qbman_eq_desc_set_dca;
+ qbman_get_dqrr_from_idx;
+ qbman_get_dqrr_idx;
+ qbman_result_DQ_fqd_ctx;
+ qbman_result_SCN_state;
+ qbman_swp_dqrr_consume;
+ qbman_swp_dqrr_next;
+ qbman_swp_enqueue_multiple;
+ qbman_swp_enqueue_multiple_desc;
+ qbman_swp_interrupt_clear_status;
+ qbman_swp_push_set;
+ rte_dpaa2_alloc_dpci_dev;
+ rte_fslmc_object_register;
+ rte_global_active_dqs_list;
+
+} DPDK_17.05;
+
+DPDK_17.11 {
+ global:
+
+ dpaa2_dpbp_supported;
+ rte_dpaa2_dev_type;
+ rte_dpaa2_intr_disable;
+ rte_dpaa2_intr_enable;
+
+} DPDK_17.08;
+
+DPDK_18.02 {
+ global:
+
+ dpaa2_svr_family;
+ dpaa2_virt_mode;
+ per_lcore_dpaa2_held_bufs;
+ qbman_fq_query_state;
+ qbman_fq_state_frame_count;
+ qbman_swp_dqrr_idx_consume;
+ qbman_swp_prefetch_dqrr_next;
+ rte_fslmc_get_device_count;
+
+} DPDK_17.11;
+
+DPDK_18.05 {
+ global:
+
+ dpaa2_affine_qbman_ethrx_swp;
+ dpdmai_close;
+ dpdmai_disable;
+ dpdmai_enable;
+ dpdmai_get_attributes;
+ dpdmai_get_rx_queue;
+ dpdmai_get_tx_queue;
+ dpdmai_open;
+ dpdmai_set_rx_queue;
+ rte_dpaa2_free_dpci_dev;
+
+} DPDK_18.02;
diff --git a/src/spdk/dpdk/drivers/bus/fslmc/rte_fslmc.h b/src/spdk/dpdk/drivers/bus/fslmc/rte_fslmc.h
new file mode 100644
index 00000000..cea5b78f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/fslmc/rte_fslmc.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _RTE_FSLMC_H_
+#define _RTE_FSLMC_H_
+
+/**
+ * @file
+ *
+ * RTE FSLMC Bus Interface
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <linux/vfio.h>
+
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_dev.h>
+#include <rte_bus.h>
+#include <rte_tailq.h>
+#include <rte_devargs.h>
+
+#include <fslmc_vfio.h>
+
+#define FSLMC_OBJECT_MAX_LEN 32 /**< Length of each device on bus */
+
+
+/** Device driver supports link state interrupt */
+#define RTE_DPAA2_DRV_INTR_LSC 0x0008
+
+/** Device driver supports IOVA as VA */
+#define RTE_DPAA2_DRV_IOVA_AS_VA 0X0040
+
+struct rte_dpaa2_driver;
+
+/* DPAA2 Device and Driver lists for FSLMC bus */
+TAILQ_HEAD(rte_fslmc_device_list, rte_dpaa2_device);
+TAILQ_HEAD(rte_fslmc_driver_list, rte_dpaa2_driver);
+
+#define RTE_DEV_TO_FSLMC_CONST(ptr) \
+ container_of(ptr, const struct rte_dpaa2_device, device)
+
+extern struct rte_fslmc_bus rte_fslmc_bus;
+
+enum rte_dpaa2_dev_type {
+ /* Devices backed by DPDK driver */
+ DPAA2_ETH, /**< DPNI type device*/
+ DPAA2_CRYPTO, /**< DPSECI type device */
+ DPAA2_CON, /**< DPCONC type device */
+ /* Devices not backed by a DPDK driver: DPIO, DPBP, DPCI, DPMCP */
+ DPAA2_BPOOL, /**< DPBP type device */
+ DPAA2_IO, /**< DPIO type device */
+ DPAA2_CI, /**< DPCI type device */
+ DPAA2_MPORTAL, /**< DPMCP type device */
+ DPAA2_QDMA, /**< DPDMAI type device */
+ /* Unknown device placeholder */
+ DPAA2_UNKNOWN,
+ DPAA2_DEVTYPE_MAX,
+};
+
+TAILQ_HEAD(rte_dpaa2_object_list, rte_dpaa2_object);
+
+typedef int (*rte_dpaa2_obj_create_t)(int vdev_fd,
+ struct vfio_device_info *obj_info,
+ int object_id);
+
+/**
+ * A structure describing a DPAA2 object.
+ */
+struct rte_dpaa2_object {
+ TAILQ_ENTRY(rte_dpaa2_object) next; /**< Next in list. */
+ const char *name; /**< Name of Object. */
+ enum rte_dpaa2_dev_type dev_type; /**< Type of device */
+ rte_dpaa2_obj_create_t create;
+};
+
+/**
+ * A structure describing a DPAA2 device.
+ */
+struct rte_dpaa2_device {
+ TAILQ_ENTRY(rte_dpaa2_device) next; /**< Next probed DPAA2 device. */
+ struct rte_device device; /**< Inherit core device */
+ union {
+ struct rte_eth_dev *eth_dev; /**< ethernet device */
+ struct rte_cryptodev *cryptodev; /**< Crypto Device */
+ struct rte_rawdev *rawdev; /**< Raw Device */
+ };
+ enum rte_dpaa2_dev_type dev_type; /**< Device Type */
+ uint16_t object_id; /**< DPAA2 Object ID */
+ struct rte_intr_handle intr_handle; /**< Interrupt handle */
+ struct rte_dpaa2_driver *driver; /**< Associated driver */
+ char name[FSLMC_OBJECT_MAX_LEN]; /**< DPAA2 Object name*/
+};
+
+typedef int (*rte_dpaa2_probe_t)(struct rte_dpaa2_driver *dpaa2_drv,
+ struct rte_dpaa2_device *dpaa2_dev);
+typedef int (*rte_dpaa2_remove_t)(struct rte_dpaa2_device *dpaa2_dev);
+
+/**
+ * A structure describing a DPAA2 driver.
+ */
+struct rte_dpaa2_driver {
+ TAILQ_ENTRY(rte_dpaa2_driver) next; /**< Next in list. */
+ struct rte_driver driver; /**< Inherit core driver. */
+ struct rte_fslmc_bus *fslmc_bus; /**< FSLMC bus reference */
+ uint32_t drv_flags; /**< Flags for controlling device.*/
+ enum rte_dpaa2_dev_type drv_type; /**< Driver Type */
+ rte_dpaa2_probe_t probe;
+ rte_dpaa2_remove_t remove;
+};
+
+/*
+ * FSLMC bus
+ */
+struct rte_fslmc_bus {
+ struct rte_bus bus; /**< Generic Bus object */
+ struct rte_fslmc_device_list device_list;
+ /**< FSLMC DPAA2 Device list */
+ struct rte_fslmc_driver_list driver_list;
+ /**< FSLMC DPAA2 Driver list */
+ int device_count[DPAA2_DEVTYPE_MAX];
+ /**< Count of all devices scanned */
+};
+
+#define DPAA2_PORTAL_DEQUEUE_DEPTH 32
+
+/* Create storage for dqrr entries per lcore */
+struct dpaa2_portal_dqrr {
+ struct rte_mbuf *mbuf[DPAA2_PORTAL_DEQUEUE_DEPTH];
+ uint64_t dqrr_held;
+ uint8_t dqrr_size;
+};
+
+RTE_DECLARE_PER_LCORE(struct dpaa2_portal_dqrr, dpaa2_held_bufs);
+
+#define DPAA2_PER_LCORE_DQRR_SIZE \
+ RTE_PER_LCORE(dpaa2_held_bufs).dqrr_size
+#define DPAA2_PER_LCORE_DQRR_HELD \
+ RTE_PER_LCORE(dpaa2_held_bufs).dqrr_held
+#define DPAA2_PER_LCORE_DQRR_MBUF(i) \
+ RTE_PER_LCORE(dpaa2_held_bufs).mbuf[i]
+
+/**
+ * Register a DPAA2 driver.
+ *
+ * @param driver
+ * A pointer to a rte_dpaa2_driver structure describing the driver
+ * to be registered.
+ */
+void rte_fslmc_driver_register(struct rte_dpaa2_driver *driver);
+
+/**
+ * Unregister a DPAA2 driver.
+ *
+ * @param driver
+ * A pointer to a rte_dpaa2_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver);
+
+/** Helper for DPAA2 device registration from driver (eth, crypto) instance */
+#define RTE_PMD_REGISTER_DPAA2(nm, dpaa2_drv) \
+RTE_INIT(dpaa2initfn_ ##nm) \
+{\
+ (dpaa2_drv).driver.name = RTE_STR(nm);\
+ rte_fslmc_driver_register(&dpaa2_drv); \
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+/**
+ * Register a DPAA2 MC Object driver.
+ *
+ * @param mc_object
+ * A pointer to a rte_dpaa_object structure describing the mc object
+ * to be registered.
+ */
+void rte_fslmc_object_register(struct rte_dpaa2_object *object);
+
+/**
+ * Count of a particular type of DPAA2 device scanned on the bus.
+ *
+ * @param dev_type
+ * Type of device as rte_dpaa2_dev_type enumerator
+ * @return
+ * >=0 for count; 0 indicates either no device of the said type scanned or
+ * invalid device type.
+ */
+uint32_t rte_fslmc_get_device_count(enum rte_dpaa2_dev_type device_type);
+
+/** Helper for DPAA2 object registration */
+#define RTE_PMD_REGISTER_DPAA2_OBJECT(nm, dpaa2_obj) \
+RTE_INIT(dpaa2objinitfn_ ##nm) \
+{\
+ (dpaa2_obj).name = RTE_STR(nm);\
+ rte_fslmc_object_register(&dpaa2_obj); \
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_FSLMC_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/ifpga/Makefile b/src/spdk/dpdk/drivers/bus/ifpga/Makefile
new file mode 100644
index 00000000..3ff3bdb8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/ifpga/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_bus_ifpga.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_kvargs
+
+# versioning export map
+EXPORT_MAP := rte_bus_ifpga_version.map
+
+# library version
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga_bus.c
+SRCS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga_common.c
+
+#
+# Export include files
+#
+SYMLINK-$(CONFIG_RTE_LIBRTE_IFPGA_BUS)-include += rte_bus_ifpga.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/bus/ifpga/ifpga_bus.c b/src/spdk/dpdk/drivers/bus/ifpga/ifpga_bus.c
new file mode 100644
index 00000000..b324872e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/ifpga/ifpga_bus.c
@@ -0,0 +1,467 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <rte_errno.h>
+#include <rte_bus.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_common.h>
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+#include <rte_alarm.h>
+#include <rte_string_fns.h>
+
+#include "rte_rawdev.h"
+#include "rte_rawdev_pmd.h"
+#include "rte_bus_ifpga.h"
+#include "ifpga_logs.h"
+#include "ifpga_common.h"
+
+int ifpga_bus_logtype;
+
+/* Forward declaration to access Intel FPGA bus
+ * on which iFPGA devices are connected
+ */
+static struct rte_bus rte_ifpga_bus;
+
+static struct ifpga_afu_dev_list ifpga_afu_dev_list =
+ TAILQ_HEAD_INITIALIZER(ifpga_afu_dev_list);
+static struct ifpga_afu_drv_list ifpga_afu_drv_list =
+ TAILQ_HEAD_INITIALIZER(ifpga_afu_drv_list);
+
+
+/* register a ifpga bus based driver */
+void rte_ifpga_driver_register(struct rte_afu_driver *driver)
+{
+ RTE_VERIFY(driver);
+
+ TAILQ_INSERT_TAIL(&ifpga_afu_drv_list, driver, next);
+}
+
+/* un-register a fpga bus based driver */
+void rte_ifpga_driver_unregister(struct rte_afu_driver *driver)
+{
+ TAILQ_REMOVE(&ifpga_afu_drv_list, driver, next);
+}
+
+static struct rte_afu_device *
+ifpga_find_afu_dev(const struct rte_rawdev *rdev,
+ const struct rte_afu_id *afu_id)
+{
+ struct rte_afu_device *afu_dev = NULL;
+
+ TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) {
+ if (afu_dev &&
+ afu_dev->rawdev == rdev &&
+ !ifpga_afu_id_cmp(&afu_dev->id, afu_id))
+ return afu_dev;
+ }
+ return NULL;
+}
+
+static const char * const valid_args[] = {
+#define IFPGA_ARG_NAME "ifpga"
+ IFPGA_ARG_NAME,
+#define IFPGA_ARG_PORT "port"
+ IFPGA_ARG_PORT,
+#define IFPGA_AFU_BTS "afu_bts"
+ IFPGA_AFU_BTS,
+ NULL
+};
+
+/*
+ * Scan the content of the FPGA bus, and the devices in the devices
+ * list
+ */
+static struct rte_afu_device *
+ifpga_scan_one(struct rte_rawdev *rawdev,
+ struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist = NULL;
+ struct rte_afu_device *afu_dev = NULL;
+ struct rte_afu_pr_conf afu_pr_conf;
+ int ret = 0;
+ char *path = NULL;
+
+ memset(&afu_pr_conf, 0, sizeof(struct rte_afu_pr_conf));
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ if (!kvlist) {
+ IFPGA_BUS_ERR("error when parsing param");
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_PORT) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_ARG_PORT,
+ &rte_ifpga_get_integer32_arg, &afu_pr_conf.afu_id.port) < 0) {
+ IFPGA_BUS_ERR("error to parse %s",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+ } else {
+ IFPGA_BUS_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_AFU_BTS) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_AFU_BTS,
+ &rte_ifpga_get_string_arg, &path) < 0) {
+ IFPGA_BUS_ERR("Failed to parse %s",
+ IFPGA_AFU_BTS);
+ goto end;
+ }
+ } else {
+ IFPGA_BUS_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_AFU_BTS);
+ goto end;
+ }
+
+ afu_pr_conf.afu_id.uuid.uuid_low = 0;
+ afu_pr_conf.afu_id.uuid.uuid_high = 0;
+ afu_pr_conf.pr_enable = path?1:0;
+
+ if (ifpga_find_afu_dev(rawdev, &afu_pr_conf.afu_id))
+ goto end;
+
+ afu_dev = calloc(1, sizeof(*afu_dev));
+ if (!afu_dev)
+ goto end;
+
+ afu_dev->device.devargs = devargs;
+ afu_dev->device.numa_node = SOCKET_ID_ANY;
+ afu_dev->device.name = devargs->name;
+ afu_dev->rawdev = rawdev;
+ afu_dev->id.uuid.uuid_low = 0;
+ afu_dev->id.uuid.uuid_high = 0;
+ afu_dev->id.port = afu_pr_conf.afu_id.port;
+
+ if (rawdev->dev_ops && rawdev->dev_ops->dev_info_get)
+ rawdev->dev_ops->dev_info_get(rawdev, afu_dev);
+
+ if (rawdev->dev_ops &&
+ rawdev->dev_ops->dev_start &&
+ rawdev->dev_ops->dev_start(rawdev))
+ goto end;
+
+ strlcpy(afu_pr_conf.bs_path, path, sizeof(afu_pr_conf.bs_path));
+ if (rawdev->dev_ops &&
+ rawdev->dev_ops->firmware_load &&
+ rawdev->dev_ops->firmware_load(rawdev,
+ &afu_pr_conf)){
+ IFPGA_BUS_ERR("firmware load error %d\n", ret);
+ goto end;
+ }
+ afu_dev->id.uuid.uuid_low = afu_pr_conf.afu_id.uuid.uuid_low;
+ afu_dev->id.uuid.uuid_high = afu_pr_conf.afu_id.uuid.uuid_high;
+
+ rte_kvargs_free(kvlist);
+ free(path);
+ return afu_dev;
+
+end:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ if (path)
+ free(path);
+ if (afu_dev)
+ free(afu_dev);
+
+ return NULL;
+}
+
+/*
+ * Scan the content of the FPGA bus, and the devices in the devices
+ * list
+ */
+static int
+ifpga_scan(void)
+{
+ struct rte_devargs *devargs;
+ struct rte_kvargs *kvlist = NULL;
+ struct rte_rawdev *rawdev = NULL;
+ char *name = NULL;
+ char name1[RTE_RAWDEV_NAME_MAX_LEN];
+ struct rte_afu_device *afu_dev = NULL;
+
+ /* for FPGA devices we scan the devargs_list populated via cmdline */
+ RTE_EAL_DEVARGS_FOREACH(IFPGA_ARG_NAME, devargs) {
+ if (devargs->bus != &rte_ifpga_bus)
+ continue;
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ if (!kvlist) {
+ IFPGA_BUS_ERR("error when parsing param");
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_NAME) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_ARG_NAME,
+ &rte_ifpga_get_string_arg, &name) < 0) {
+ IFPGA_BUS_ERR("error to parse %s",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+ } else {
+ IFPGA_BUS_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+
+ memset(name1, 0, sizeof(name1));
+ snprintf(name1, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%s", name);
+
+ rawdev = rte_rawdev_pmd_get_named_dev(name1);
+ if (!rawdev)
+ goto end;
+
+ afu_dev = ifpga_scan_one(rawdev, devargs);
+ if (afu_dev != NULL)
+ TAILQ_INSERT_TAIL(&ifpga_afu_dev_list, afu_dev, next);
+ }
+
+end:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ if (name)
+ free(name);
+
+ return 0;
+}
+
+/*
+ * Match the AFU Driver and AFU Device using the ID Table
+ */
+static int
+rte_afu_match(const struct rte_afu_driver *afu_drv,
+ const struct rte_afu_device *afu_dev)
+{
+ const struct rte_afu_uuid *id_table;
+
+ for (id_table = afu_drv->id_table;
+ ((id_table->uuid_low != 0) && (id_table->uuid_high != 0));
+ id_table++) {
+ /* check if device's identifiers match the driver's ones */
+ if ((id_table->uuid_low != afu_dev->id.uuid.uuid_low) ||
+ id_table->uuid_high !=
+ afu_dev->id.uuid.uuid_high)
+ continue;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+ifpga_probe_one_driver(struct rte_afu_driver *drv,
+ struct rte_afu_device *afu_dev)
+{
+ int ret;
+
+ if (!rte_afu_match(drv, afu_dev))
+ /* Match of device and driver failed */
+ return 1;
+
+ /* reference driver structure */
+ afu_dev->driver = drv;
+ afu_dev->device.driver = &drv->driver;
+
+ /* call the driver probe() function */
+ ret = drv->probe(afu_dev);
+ if (ret) {
+ afu_dev->driver = NULL;
+ afu_dev->device.driver = NULL;
+ }
+
+ return ret;
+}
+
+static int
+ifpga_probe_all_drivers(struct rte_afu_device *afu_dev)
+{
+ struct rte_afu_driver *drv = NULL;
+ int ret = 0;
+
+ if (afu_dev == NULL)
+ return -1;
+
+ /* Check if a driver is already loaded */
+ if (afu_dev->driver != NULL)
+ return 0;
+
+ TAILQ_FOREACH(drv, &ifpga_afu_drv_list, next) {
+ if (ifpga_probe_one_driver(drv, afu_dev)) {
+ ret = -1;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Scan the content of the Intel FPGA bus, and call the probe() function for
+ * all registered drivers that have a matching entry in its id_table
+ * for discovered devices.
+ */
+static int
+ifpga_probe(void)
+{
+ struct rte_afu_device *afu_dev = NULL;
+ int ret = 0;
+
+ TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) {
+ if (afu_dev->device.driver)
+ continue;
+
+ ret = ifpga_probe_all_drivers(afu_dev);
+ if (ret < 0)
+ IFPGA_BUS_ERR("failed to initialize %s device\n",
+ rte_ifpga_device_name(afu_dev));
+ }
+
+ return ret;
+}
+
+static int
+ifpga_plug(struct rte_device *dev)
+{
+ return ifpga_probe_all_drivers(RTE_DEV_TO_AFU(dev));
+}
+
+static int
+ifpga_remove_driver(struct rte_afu_device *afu_dev)
+{
+ const char *name;
+ const struct rte_afu_driver *driver;
+
+ name = rte_ifpga_device_name(afu_dev);
+ if (!afu_dev->device.driver) {
+ IFPGA_BUS_DEBUG("no driver attach to device %s\n", name);
+ return 1;
+ }
+
+ driver = RTE_DRV_TO_AFU_CONST(afu_dev->device.driver);
+ return driver->remove(afu_dev);
+}
+
+static int
+ifpga_unplug(struct rte_device *dev)
+{
+ struct rte_afu_device *afu_dev = NULL;
+ struct rte_devargs *devargs = NULL;
+ int ret;
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ afu_dev = RTE_DEV_TO_AFU(dev);
+ if (!afu_dev)
+ return -ENOENT;
+
+ devargs = dev->devargs;
+
+ ret = ifpga_remove_driver(afu_dev);
+ if (ret)
+ return ret;
+
+ TAILQ_REMOVE(&ifpga_afu_dev_list, afu_dev, next);
+
+ rte_devargs_remove(devargs->bus->name, devargs->name);
+ free(afu_dev);
+ return 0;
+
+}
+
+static struct rte_device *
+ifpga_find_device(const struct rte_device *start,
+ rte_dev_cmp_t cmp, const void *data)
+{
+ struct rte_afu_device *afu_dev;
+
+ TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) {
+ if (start && &afu_dev->device == start) {
+ start = NULL;
+ continue;
+ }
+ if (cmp(&afu_dev->device, data) == 0)
+ return &afu_dev->device;
+ }
+
+ return NULL;
+}
+static int
+ifpga_parse(const char *name, void *addr)
+{
+ int *out = addr;
+ struct rte_rawdev *rawdev = NULL;
+ char rawdev_name[RTE_RAWDEV_NAME_MAX_LEN];
+ char *c1 = NULL;
+ char *c2 = NULL;
+ int port = IFPGA_BUS_DEV_PORT_MAX;
+ char str_port[8];
+ int str_port_len = 0;
+ int ret;
+
+ memset(str_port, 0, 8);
+ c1 = strchr(name, '|');
+ if (c1 != NULL) {
+ str_port_len = c1 - name;
+ c2 = c1 + 1;
+ }
+
+ if (str_port_len < 8 &&
+ str_port_len > 0) {
+ memcpy(str_port, name, str_port_len);
+ ret = sscanf(str_port, "%d", &port);
+ if (ret == -1)
+ return 0;
+ }
+
+ memset(rawdev_name, 0, sizeof(rawdev_name));
+ snprintf(rawdev_name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%s", c2);
+ rawdev = rte_rawdev_pmd_get_named_dev(rawdev_name);
+
+ if ((port < IFPGA_BUS_DEV_PORT_MAX) &&
+ rawdev &&
+ (addr != NULL))
+ *out = port;
+
+ if ((port < IFPGA_BUS_DEV_PORT_MAX) &&
+ rawdev)
+ return 0;
+ else
+ return 1;
+}
+
+static struct rte_bus rte_ifpga_bus = {
+ .scan = ifpga_scan,
+ .probe = ifpga_probe,
+ .find_device = ifpga_find_device,
+ .plug = ifpga_plug,
+ .unplug = ifpga_unplug,
+ .parse = ifpga_parse,
+};
+
+RTE_REGISTER_BUS(IFPGA_BUS_NAME, rte_ifpga_bus);
+
+RTE_INIT(ifpga_init_log)
+{
+ ifpga_bus_logtype = rte_log_register("bus.ifpga");
+ if (ifpga_bus_logtype >= 0)
+ rte_log_set_level(ifpga_bus_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/bus/ifpga/ifpga_common.c b/src/spdk/dpdk/drivers/bus/ifpga/ifpga_common.c
new file mode 100644
index 00000000..78e2eaee
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/ifpga/ifpga_common.c
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <rte_errno.h>
+#include <rte_bus.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_common.h>
+
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+#include <rte_alarm.h>
+
+#include "rte_bus_ifpga.h"
+#include "ifpga_logs.h"
+#include "ifpga_common.h"
+
+int rte_ifpga_get_string_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(char **)extra_args = strdup(value);
+
+ if (!*(char **)extra_args)
+ return -ENOMEM;
+
+ return 0;
+}
+int rte_ifpga_get_integer32_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(int *)extra_args = strtoull(value, NULL, 0);
+
+ return 0;
+}
+int ifpga_get_integer64_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(uint64_t *)extra_args = strtoull(value, NULL, 0);
+
+ return 0;
+}
+int ifpga_get_unsigned_long(const char *str, int base)
+{
+ unsigned long num;
+ char *end = NULL;
+
+ errno = 0;
+
+ num = strtoul(str, &end, base);
+ if ((str[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
+ return -1;
+
+ return num;
+}
+
+int ifpga_afu_id_cmp(const struct rte_afu_id *afu_id0,
+ const struct rte_afu_id *afu_id1)
+{
+ if ((afu_id0->uuid.uuid_low == afu_id1->uuid.uuid_low) &&
+ (afu_id0->uuid.uuid_high == afu_id1->uuid.uuid_high) &&
+ (afu_id0->port == afu_id1->port)) {
+ return 0;
+ } else
+ return 1;
+}
diff --git a/src/spdk/dpdk/drivers/bus/ifpga/ifpga_common.h b/src/spdk/dpdk/drivers/bus/ifpga/ifpga_common.h
new file mode 100644
index 00000000..f9254b9d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/ifpga/ifpga_common.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_COMMON_H_
+#define _IFPGA_COMMON_H_
+
+int rte_ifpga_get_string_arg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+int rte_ifpga_get_integer32_arg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+int ifpga_get_integer64_arg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+int ifpga_get_unsigned_long(const char *str, int base);
+int ifpga_afu_id_cmp(const struct rte_afu_id *afu_id0,
+ const struct rte_afu_id *afu_id1);
+
+#endif /* _IFPGA_COMMON_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/ifpga/ifpga_logs.h b/src/spdk/dpdk/drivers/bus/ifpga/ifpga_logs.h
new file mode 100644
index 00000000..873e0a4f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/ifpga/ifpga_logs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_LOGS_H_
+#define _IFPGA_LOGS_H_
+
+#include <rte_log.h>
+
+extern int ifpga_bus_logtype;
+
+#define IFPGA_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ifpga_bus_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define IFPGA_BUS_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ifpga_bus_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define IFPGA_BUS_FUNC_TRACE() IFPGA_BUS_LOG(DEBUG, ">>")
+
+#define IFPGA_BUS_DEBUG(fmt, args...) \
+ IFPGA_BUS_LOG(DEBUG, fmt, ## args)
+#define IFPGA_BUS_INFO(fmt, args...) \
+ IFPGA_BUS_LOG(INFO, fmt, ## args)
+#define IFPGA_BUS_ERR(fmt, args...) \
+ IFPGA_BUS_LOG(ERR, fmt, ## args)
+#define IFPGA_BUS_WARN(fmt, args...) \
+ IFPGA_BUS_LOG(WARNING, fmt, ## args)
+
+#endif /* _IFPGA_BUS_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/ifpga/meson.build b/src/spdk/dpdk/drivers/bus/ifpga/meson.build
new file mode 100644
index 00000000..c9b08c86
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/ifpga/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2018 Intel Corporation
+
+deps += ['pci', 'kvargs', 'rawdev']
+install_headers('rte_bus_ifpga.h')
+sources = files('ifpga_common.c', 'ifpga_bus.c')
+
+allow_experimental_apis = true
diff --git a/src/spdk/dpdk/drivers/bus/ifpga/rte_bus_ifpga.h b/src/spdk/dpdk/drivers/bus/ifpga/rte_bus_ifpga.h
new file mode 100644
index 00000000..51d5ae0d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/ifpga/rte_bus_ifpga.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _RTE_BUS_IFPGA_H_
+#define _RTE_BUS_IFPGA_H_
+
+/**
+ * @file
+ *
+ * RTE Intel FPGA Bus Interface
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_bus.h>
+#include <rte_pci.h>
+
+/** Name of Intel FPGA Bus */
+#define IFPGA_BUS_NAME ifpga
+
+/* Forward declarations */
+struct rte_afu_device;
+struct rte_afu_driver;
+
+/** Double linked list of Intel FPGA AFU device. */
+TAILQ_HEAD(ifpga_afu_dev_list, rte_afu_device);
+/** Double linked list of Intel FPGA AFU device drivers. */
+TAILQ_HEAD(ifpga_afu_drv_list, rte_afu_driver);
+
+#define IFPGA_BUS_BITSTREAM_PATH_MAX_LEN 256
+
+struct rte_afu_uuid {
+ uint64_t uuid_low;
+ uint64_t uuid_high;
+} __attribute__ ((packed));
+
+#define IFPGA_BUS_DEV_PORT_MAX 4
+
+/**
+ * A structure describing an ID for a AFU driver. Each driver provides a
+ * table of these IDs for each device that it supports.
+ */
+struct rte_afu_id {
+ struct rte_afu_uuid uuid;
+ int port; /**< port number */
+} __attribute__ ((packed));
+
+/**
+ * A structure PR (Partial Reconfiguration) configuration AFU driver.
+ */
+
+struct rte_afu_pr_conf {
+ struct rte_afu_id afu_id;
+ int pr_enable;
+ char bs_path[IFPGA_BUS_BITSTREAM_PATH_MAX_LEN];
+};
+
+#define AFU_PRI_STR_SIZE (PCI_PRI_STR_SIZE + 8)
+
+/**
+ * A structure describing a AFU device.
+ */
+struct rte_afu_device {
+ TAILQ_ENTRY(rte_afu_device) next; /**< Next in device list. */
+ struct rte_device device; /**< Inherit core device */
+ struct rte_rawdev *rawdev; /**< Point Rawdev */
+ struct rte_afu_id id; /**< AFU id within FPGA. */
+ uint32_t num_region; /**< number of regions found */
+ struct rte_mem_resource mem_resource[PCI_MAX_RESOURCE];
+ /**< AFU Memory Resource */
+ struct rte_intr_handle intr_handle; /**< Interrupt handle */
+ struct rte_afu_driver *driver; /**< Associated driver */
+ char path[IFPGA_BUS_BITSTREAM_PATH_MAX_LEN];
+} __attribute__ ((packed));
+
+/**
+ * @internal
+ * Helper macro for drivers that need to convert to struct rte_afu_device.
+ */
+#define RTE_DEV_TO_AFU(ptr) \
+ container_of(ptr, struct rte_afu_device, device)
+
+#define RTE_DRV_TO_AFU_CONST(ptr) \
+ container_of(ptr, const struct rte_afu_driver, driver)
+
+/**
+ * Initialization function for the driver called during FPGA BUS probing.
+ */
+typedef int (afu_probe_t)(struct rte_afu_device *);
+
+/**
+ * Uninitialization function for the driver called during hotplugging.
+ */
+typedef int (afu_remove_t)(struct rte_afu_device *);
+
+/**
+ * A structure describing a AFU device.
+ */
+struct rte_afu_driver {
+ TAILQ_ENTRY(rte_afu_driver) next; /**< Next afu driver. */
+ struct rte_driver driver; /**< Inherit core driver. */
+ afu_probe_t *probe; /**< Device Probe function. */
+ afu_remove_t *remove; /**< Device Remove function. */
+ const struct rte_afu_uuid *id_table; /**< AFU uuid within FPGA. */
+};
+
+static inline const char *
+rte_ifpga_device_name(const struct rte_afu_device *afu)
+{
+ if (afu && afu->device.name)
+ return afu->device.name;
+ return NULL;
+}
+
+/**
+ * Register a ifpga afu device driver.
+ *
+ * @param driver
+ * A pointer to a rte_afu_driver structure describing the driver
+ * to be registered.
+ */
+void rte_ifpga_driver_register(struct rte_afu_driver *driver);
+
+/**
+ * Unregister a ifpga afu device driver.
+ *
+ * @param driver
+ * A pointer to a rte_afu_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_ifpga_driver_unregister(struct rte_afu_driver *driver);
+
+#define RTE_PMD_REGISTER_AFU(nm, afudrv)\
+static const char *afudrvinit_ ## nm ## _alias;\
+RTE_INIT(afudrvinitfn_ ##afudrv)\
+{\
+ (afudrv).driver.name = RTE_STR(nm);\
+ (afudrv).driver.alias = afudrvinit_ ## nm ## _alias;\
+ rte_ifpga_driver_register(&afudrv);\
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+#define RTE_PMD_REGISTER_AFU_ALIAS(nm, alias)\
+static const char *afudrvinit_ ## nm ## _alias = RTE_STR(alias)
+
+#endif /* _RTE_BUS_IFPGA_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/ifpga/rte_bus_ifpga_version.map b/src/spdk/dpdk/drivers/bus/ifpga/rte_bus_ifpga_version.map
new file mode 100644
index 00000000..a0279797
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/ifpga/rte_bus_ifpga_version.map
@@ -0,0 +1,10 @@
+DPDK_18.05 {
+ global:
+
+ rte_ifpga_get_integer32_arg;
+ rte_ifpga_get_string_arg;
+ rte_ifpga_driver_register;
+ rte_ifpga_driver_unregister;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/bus/meson.build b/src/spdk/dpdk/drivers/bus/meson.build
new file mode 100644
index 00000000..80de2d91
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+drivers = ['dpaa', 'fslmc', 'ifpga', 'pci', 'vdev', 'vmbus']
+std_deps = ['eal']
+config_flag_fmt = 'RTE_LIBRTE_@0@_BUS'
+driver_name_fmt = 'rte_bus_@0@'
diff --git a/src/spdk/dpdk/drivers/bus/pci/Makefile b/src/spdk/dpdk/drivers/bus/pci/Makefile
new file mode 100644
index 00000000..cf373068
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 6WIND S.A.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+LIB = librte_bus_pci.a
+LIBABIVER := 1
+EXPORT_MAP := rte_bus_pci_version.map
+
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),)
+SYSTEM := linux
+endif
+ifneq ($(CONFIG_RTE_EXEC_ENV_BSDAPP),)
+SYSTEM := bsd
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/pci/$(SYSTEM)
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/$(SYSTEM)app/eal
+
+# memseg walk is not part of stable API yet
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_pci
+
+include $(RTE_SDK)/drivers/bus/pci/$(SYSTEM)/Makefile
+SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) := $(addprefix $(SYSTEM)/,$(SRCS))
+SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci_common_uio.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_PCI_BUS)-include += rte_bus_pci.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/bus/pci/bsd/Makefile b/src/spdk/dpdk/drivers/bus/pci/bsd/Makefile
new file mode 100644
index 00000000..c1b54c05
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/bsd/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 6WIND S.A.
+
+SRCS += pci.c
diff --git a/src/spdk/dpdk/drivers/bus/pci/bsd/pci.c b/src/spdk/dpdk/drivers/bus/pci/bsd/pci.c
new file mode 100644
index 00000000..655b34b7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/bsd/pci.c
@@ -0,0 +1,651 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <dirent.h>
+#include <limits.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include <sys/pciio.h>
+#include <dev/pci/pcireg.h>
+
+#if defined(RTE_ARCH_X86)
+#include <machine/cpufunc.h>
+#endif
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_launch.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+#include <rte_debug.h>
+#include <rte_devargs.h>
+
+#include "eal_filesystem.h"
+#include "private.h"
+
+/**
+ * @file
+ * PCI probing under BSD
+ *
+ * This code is used to simulate a PCI probe by parsing information in
+ * sysfs. Moreover, when a registered driver matches a device, the
+ * kernel driver currently using it is unloaded and replaced by
+ * igb_uio module, which is a very minimal userland driver for Intel
+ * network card, only providing access to PCI BAR to applications, and
+ * enabling bus master.
+ */
+
+extern struct rte_pci_bus rte_pci_bus;
+
+/* Map pci device */
+int
+rte_pci_map_device(struct rte_pci_device *dev)
+{
+ int ret = -1;
+
+ /* try mapping the NIC resources */
+ switch (dev->kdrv) {
+ case RTE_KDRV_NIC_UIO:
+ /* map resources for devices that use uio */
+ ret = pci_uio_map_resource(dev);
+ break;
+ default:
+ RTE_LOG(DEBUG, EAL,
+ " Not managed by a supported kernel driver, skipped\n");
+ ret = 1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Unmap pci device */
+void
+rte_pci_unmap_device(struct rte_pci_device *dev)
+{
+ /* try unmapping the NIC resources */
+ switch (dev->kdrv) {
+ case RTE_KDRV_NIC_UIO:
+ /* unmap resources for devices that use uio */
+ pci_uio_unmap_resource(dev);
+ break;
+ default:
+ RTE_LOG(DEBUG, EAL,
+ " Not managed by a supported kernel driver, skipped\n");
+ break;
+ }
+}
+
+void
+pci_uio_free_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource *uio_res)
+{
+ rte_free(uio_res);
+
+ if (dev->intr_handle.fd) {
+ close(dev->intr_handle.fd);
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ }
+}
+
+int
+pci_uio_alloc_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource **uio_res)
+{
+ char devname[PATH_MAX]; /* contains the /dev/uioX */
+ struct rte_pci_addr *loc;
+
+ loc = &dev->addr;
+
+ snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u",
+ dev->addr.bus, dev->addr.devid, dev->addr.function);
+
+ if (access(devname, O_RDWR) < 0) {
+ RTE_LOG(WARNING, EAL, " "PCI_PRI_FMT" not managed by UIO driver, "
+ "skipping\n", loc->domain, loc->bus, loc->devid, loc->function);
+ return 1;
+ }
+
+ /* save fd if in primary process */
+ dev->intr_handle.fd = open(devname, O_RDWR);
+ if (dev->intr_handle.fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ devname, strerror(errno));
+ goto error;
+ }
+ dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
+
+ /* allocate the mapping details for secondary processes*/
+ *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
+ if (*uio_res == NULL) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot store uio mmap details\n", __func__);
+ goto error;
+ }
+
+ snprintf((*uio_res)->path, sizeof((*uio_res)->path), "%s", devname);
+ memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr));
+
+ return 0;
+
+error:
+ pci_uio_free_resource(dev, *uio_res);
+ return -1;
+}
+
+int
+pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
+ struct mapped_pci_resource *uio_res, int map_idx)
+{
+ int fd;
+ char *devname;
+ void *mapaddr;
+ uint64_t offset;
+ uint64_t pagesz;
+ struct pci_map *maps;
+
+ maps = uio_res->maps;
+ devname = uio_res->path;
+ pagesz = sysconf(_SC_PAGESIZE);
+
+ /* allocate memory to keep path */
+ maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
+ if (maps[map_idx].path == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ /*
+ * open resource file, to mmap it
+ */
+ fd = open(devname, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ devname, strerror(errno));
+ goto error;
+ }
+
+ /* if matching map is found, then use it */
+ offset = res_idx * pagesz;
+ mapaddr = pci_map_resource(NULL, fd, (off_t)offset,
+ (size_t)dev->mem_resource[res_idx].len, 0);
+ close(fd);
+ if (mapaddr == MAP_FAILED)
+ goto error;
+
+ maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
+ maps[map_idx].size = dev->mem_resource[res_idx].len;
+ maps[map_idx].addr = mapaddr;
+ maps[map_idx].offset = offset;
+ strcpy(maps[map_idx].path, devname);
+ dev->mem_resource[res_idx].addr = mapaddr;
+
+ return 0;
+
+error:
+ rte_free(maps[map_idx].path);
+ return -1;
+}
+
+static int
+pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
+{
+ struct rte_pci_device *dev;
+ struct pci_bar_io bar;
+ unsigned i, max;
+
+ dev = malloc(sizeof(*dev));
+ if (dev == NULL) {
+ return -1;
+ }
+
+ memset(dev, 0, sizeof(*dev));
+ dev->addr.domain = conf->pc_sel.pc_domain;
+ dev->addr.bus = conf->pc_sel.pc_bus;
+ dev->addr.devid = conf->pc_sel.pc_dev;
+ dev->addr.function = conf->pc_sel.pc_func;
+
+ /* get vendor id */
+ dev->id.vendor_id = conf->pc_vendor;
+
+ /* get device id */
+ dev->id.device_id = conf->pc_device;
+
+ /* get subsystem_vendor id */
+ dev->id.subsystem_vendor_id = conf->pc_subvendor;
+
+ /* get subsystem_device id */
+ dev->id.subsystem_device_id = conf->pc_subdevice;
+
+ /* get class id */
+ dev->id.class_id = (conf->pc_class << 16) |
+ (conf->pc_subclass << 8) |
+ (conf->pc_progif);
+
+ /* TODO: get max_vfs */
+ dev->max_vfs = 0;
+
+ /* FreeBSD has no NUMA support (yet) */
+ dev->device.numa_node = 0;
+
+ pci_name_set(dev);
+
+ /* FreeBSD has only one pass through driver */
+ dev->kdrv = RTE_KDRV_NIC_UIO;
+
+ /* parse resources */
+ switch (conf->pc_hdr & PCIM_HDRTYPE) {
+ case PCIM_HDRTYPE_NORMAL:
+ max = PCIR_MAX_BAR_0;
+ break;
+ case PCIM_HDRTYPE_BRIDGE:
+ max = PCIR_MAX_BAR_1;
+ break;
+ case PCIM_HDRTYPE_CARDBUS:
+ max = PCIR_MAX_BAR_2;
+ break;
+ default:
+ goto skipdev;
+ }
+
+ for (i = 0; i <= max; i++) {
+ bar.pbi_sel = conf->pc_sel;
+ bar.pbi_reg = PCIR_BAR(i);
+ if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0)
+ continue;
+
+ dev->mem_resource[i].len = bar.pbi_length;
+ if (PCI_BAR_IO(bar.pbi_base)) {
+ dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf));
+ continue;
+ }
+ dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf);
+ }
+
+ /* device is valid, add in list (sorted) */
+ if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
+ rte_pci_add_device(dev);
+ }
+ else {
+ struct rte_pci_device *dev2 = NULL;
+ int ret;
+
+ TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
+ ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr);
+ if (ret > 0)
+ continue;
+ else if (ret < 0) {
+ rte_pci_insert_device(dev2, dev);
+ } else { /* already registered */
+ dev2->kdrv = dev->kdrv;
+ dev2->max_vfs = dev->max_vfs;
+ pci_name_set(dev2);
+ memmove(dev2->mem_resource,
+ dev->mem_resource,
+ sizeof(dev->mem_resource));
+ free(dev);
+ }
+ return 0;
+ }
+ rte_pci_add_device(dev);
+ }
+
+ return 0;
+
+skipdev:
+ free(dev);
+ return 0;
+}
+
+/*
+ * Scan the content of the PCI bus, and add the devices in the devices
+ * list. Call pci_scan_one() for each pci entry found.
+ */
+int
+rte_pci_scan(void)
+{
+ int fd;
+ unsigned dev_count = 0;
+ struct pci_conf matches[16];
+ struct pci_conf_io conf_io = {
+ .pat_buf_len = 0,
+ .num_patterns = 0,
+ .patterns = NULL,
+ .match_buf_len = sizeof(matches),
+ .matches = &matches[0],
+ };
+
+ /* for debug purposes, PCI can be disabled */
+ if (!rte_eal_has_pci())
+ return 0;
+
+ fd = open("/dev/pci", O_RDONLY);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
+ goto error;
+ }
+
+ do {
+ unsigned i;
+ if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
+ RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
+ __func__, strerror(errno));
+ goto error;
+ }
+
+ for (i = 0; i < conf_io.num_matches; i++)
+ if (pci_scan_one(fd, &matches[i]) < 0)
+ goto error;
+
+ dev_count += conf_io.num_matches;
+ } while(conf_io.status == PCI_GETCONF_MORE_DEVS);
+
+ close(fd);
+
+ RTE_LOG(DEBUG, EAL, "PCI scan found %u devices\n", dev_count);
+ return 0;
+
+error:
+ if (fd >= 0)
+ close(fd);
+ return -1;
+}
+
+/*
+ * Get iommu class of PCI devices on the bus.
+ */
+enum rte_iova_mode
+rte_pci_get_iommu_class(void)
+{
+ /* Supports only RTE_KDRV_NIC_UIO */
+ return RTE_IOVA_PA;
+}
+
+int
+pci_update_device(const struct rte_pci_addr *addr)
+{
+ int fd;
+ struct pci_conf matches[2];
+ struct pci_match_conf match = {
+ .pc_sel = {
+ .pc_domain = addr->domain,
+ .pc_bus = addr->bus,
+ .pc_dev = addr->devid,
+ .pc_func = addr->function,
+ },
+ };
+ struct pci_conf_io conf_io = {
+ .pat_buf_len = 0,
+ .num_patterns = 1,
+ .patterns = &match,
+ .match_buf_len = sizeof(matches),
+ .matches = &matches[0],
+ };
+
+ fd = open("/dev/pci", O_RDONLY);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
+ goto error;
+ }
+
+ if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
+ RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
+ __func__, strerror(errno));
+ goto error;
+ }
+
+ if (conf_io.num_matches != 1)
+ goto error;
+
+ if (pci_scan_one(fd, &matches[0]) < 0)
+ goto error;
+
+ close(fd);
+
+ return 0;
+
+error:
+ if (fd >= 0)
+ close(fd);
+ return -1;
+}
+
+/* Read PCI config space. */
+int rte_pci_read_config(const struct rte_pci_device *dev,
+ void *buf, size_t len, off_t offset)
+{
+ int fd = -1;
+ int size;
+ struct pci_io pi = {
+ .pi_sel = {
+ .pc_domain = dev->addr.domain,
+ .pc_bus = dev->addr.bus,
+ .pc_dev = dev->addr.devid,
+ .pc_func = dev->addr.function,
+ },
+ .pi_reg = offset,
+ };
+
+ fd = open("/dev/pci", O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
+ goto error;
+ }
+
+ while (len > 0) {
+ size = (len >= 4) ? 4 : ((len >= 2) ? 2 : 1);
+ pi.pi_width = size;
+
+ if (ioctl(fd, PCIOCREAD, &pi) < 0)
+ goto error;
+ memcpy(buf, &pi.pi_data, size);
+
+ buf = (char *)buf + size;
+ pi.pi_reg += size;
+ len -= size;
+ }
+ close(fd);
+
+ return 0;
+
+ error:
+ if (fd >= 0)
+ close(fd);
+ return -1;
+}
+
+/* Write PCI config space. */
+int rte_pci_write_config(const struct rte_pci_device *dev,
+ const void *buf, size_t len, off_t offset)
+{
+ int fd = -1;
+
+ struct pci_io pi = {
+ .pi_sel = {
+ .pc_domain = dev->addr.domain,
+ .pc_bus = dev->addr.bus,
+ .pc_dev = dev->addr.devid,
+ .pc_func = dev->addr.function,
+ },
+ .pi_reg = offset,
+ .pi_data = *(const uint32_t *)buf,
+ .pi_width = len,
+ };
+
+ if (len == 3 || len > sizeof(pi.pi_data)) {
+ RTE_LOG(ERR, EAL, "%s(): invalid pci read length\n", __func__);
+ goto error;
+ }
+
+ memcpy(&pi.pi_data, buf, len);
+
+ fd = open("/dev/pci", O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
+ goto error;
+ }
+
+ if (ioctl(fd, PCIOCWRITE, &pi) < 0)
+ goto error;
+
+ close(fd);
+ return 0;
+
+ error:
+ if (fd >= 0)
+ close(fd);
+ return -1;
+}
+
+int
+rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p)
+{
+ int ret;
+
+ switch (dev->kdrv) {
+#if defined(RTE_ARCH_X86)
+ case RTE_KDRV_NIC_UIO:
+ if ((uintptr_t) dev->mem_resource[bar].addr <= UINT16_MAX) {
+ p->base = (uintptr_t)dev->mem_resource[bar].addr;
+ ret = 0;
+ } else
+ ret = -1;
+ break;
+#endif
+ default:
+ ret = -1;
+ break;
+ }
+
+ if (!ret)
+ p->dev = dev;
+
+ return ret;
+}
+
+static void
+pci_uio_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset)
+{
+#if defined(RTE_ARCH_X86)
+ uint8_t *d;
+ int size;
+ unsigned short reg = p->base + offset;
+
+ for (d = data; len > 0; d += size, reg += size, len -= size) {
+ if (len >= 4) {
+ size = 4;
+ *(uint32_t *)d = inl(reg);
+ } else if (len >= 2) {
+ size = 2;
+ *(uint16_t *)d = inw(reg);
+ } else {
+ size = 1;
+ *d = inb(reg);
+ }
+ }
+#else
+ RTE_SET_USED(p);
+ RTE_SET_USED(data);
+ RTE_SET_USED(len);
+ RTE_SET_USED(offset);
+#endif
+}
+
+void
+rte_pci_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset)
+{
+ switch (p->dev->kdrv) {
+ case RTE_KDRV_NIC_UIO:
+ pci_uio_ioport_read(p, data, len, offset);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+pci_uio_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset)
+{
+#if defined(RTE_ARCH_X86)
+ const uint8_t *s;
+ int size;
+ unsigned short reg = p->base + offset;
+
+ for (s = data; len > 0; s += size, reg += size, len -= size) {
+ if (len >= 4) {
+ size = 4;
+ outl(reg, *(const uint32_t *)s);
+ } else if (len >= 2) {
+ size = 2;
+ outw(reg, *(const uint16_t *)s);
+ } else {
+ size = 1;
+ outb(reg, *s);
+ }
+ }
+#else
+ RTE_SET_USED(p);
+ RTE_SET_USED(data);
+ RTE_SET_USED(len);
+ RTE_SET_USED(offset);
+#endif
+}
+
+void
+rte_pci_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset)
+{
+ switch (p->dev->kdrv) {
+ case RTE_KDRV_NIC_UIO:
+ pci_uio_ioport_write(p, data, len, offset);
+ break;
+ default:
+ break;
+ }
+}
+
+int
+rte_pci_ioport_unmap(struct rte_pci_ioport *p)
+{
+ int ret;
+
+ switch (p->dev->kdrv) {
+#if defined(RTE_ARCH_X86)
+ case RTE_KDRV_NIC_UIO:
+ ret = 0;
+ break;
+#endif
+ default:
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
diff --git a/src/spdk/dpdk/drivers/bus/pci/linux/Makefile b/src/spdk/dpdk/drivers/bus/pci/linux/Makefile
new file mode 100644
index 00000000..96ea1d54
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/linux/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 6WIND S.A.
+
+SRCS += pci.c
+SRCS += pci_uio.c
+SRCS += pci_vfio.c
+
+CFLAGS += -D_GNU_SOURCE
diff --git a/src/spdk/dpdk/drivers/bus/pci/linux/pci.c b/src/spdk/dpdk/drivers/bus/pci/linux/pci.c
new file mode 100644
index 00000000..daf087d3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/linux/pci.c
@@ -0,0 +1,926 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include <string.h>
+#include <dirent.h>
+
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+#include <rte_devargs.h>
+#include <rte_memcpy.h>
+#include <rte_vfio.h>
+
+#include "eal_filesystem.h"
+
+#include "private.h"
+#include "pci_init.h"
+
+/**
+ * @file
+ * PCI probing under linux
+ *
+ * This code is used to simulate a PCI probe by parsing information in sysfs.
+ * When a registered device matches a driver, it is then initialized with
+ * IGB_UIO driver (or doesn't initialize, if the device wasn't bound to it).
+ */
+
+extern struct rte_pci_bus rte_pci_bus;
+
+static int
+pci_get_kernel_driver_by_path(const char *filename, char *dri_name,
+ size_t len)
+{
+ int count;
+ char path[PATH_MAX];
+ char *name;
+
+ if (!filename || !dri_name)
+ return -1;
+
+ count = readlink(filename, path, PATH_MAX);
+ if (count >= PATH_MAX)
+ return -1;
+
+ /* For device does not have a driver */
+ if (count < 0)
+ return 1;
+
+ path[count] = '\0';
+
+ name = strrchr(path, '/');
+ if (name) {
+ strlcpy(dri_name, name + 1, len);
+ return 0;
+ }
+
+ return -1;
+}
+
+/* Map pci device */
+int
+rte_pci_map_device(struct rte_pci_device *dev)
+{
+ int ret = -1;
+
+ /* try mapping the NIC resources using VFIO if it exists */
+ switch (dev->kdrv) {
+ case RTE_KDRV_VFIO:
+#ifdef VFIO_PRESENT
+ if (pci_vfio_is_enabled())
+ ret = pci_vfio_map_resource(dev);
+#endif
+ break;
+ case RTE_KDRV_IGB_UIO:
+ case RTE_KDRV_UIO_GENERIC:
+ if (rte_eal_using_phys_addrs()) {
+ /* map resources for devices that use uio */
+ ret = pci_uio_map_resource(dev);
+ }
+ break;
+ default:
+ RTE_LOG(DEBUG, EAL,
+ " Not managed by a supported kernel driver, skipped\n");
+ ret = 1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Unmap pci device */
+void
+rte_pci_unmap_device(struct rte_pci_device *dev)
+{
+ /* try unmapping the NIC resources using VFIO if it exists */
+ switch (dev->kdrv) {
+ case RTE_KDRV_VFIO:
+#ifdef VFIO_PRESENT
+ if (pci_vfio_is_enabled())
+ pci_vfio_unmap_resource(dev);
+#endif
+ break;
+ case RTE_KDRV_IGB_UIO:
+ case RTE_KDRV_UIO_GENERIC:
+ /* unmap resources for devices that use uio */
+ pci_uio_unmap_resource(dev);
+ break;
+ default:
+ RTE_LOG(DEBUG, EAL,
+ " Not managed by a supported kernel driver, skipped\n");
+ break;
+ }
+}
+
+static int
+find_max_end_va(const struct rte_memseg_list *msl, void *arg)
+{
+ size_t sz = msl->memseg_arr.len * msl->page_sz;
+ void *end_va = RTE_PTR_ADD(msl->base_va, sz);
+ void **max_va = arg;
+
+ if (*max_va < end_va)
+ *max_va = end_va;
+ return 0;
+}
+
+void *
+pci_find_max_end_va(void)
+{
+ void *va = NULL;
+
+ rte_memseg_list_walk(find_max_end_va, &va);
+ return va;
+}
+
+
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+int
+pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+ uint64_t *end_addr, uint64_t *flags)
+{
+ union pci_resource_info {
+ struct {
+ char *phys_addr;
+ char *end_addr;
+ char *flags;
+ };
+ char *ptrs[PCI_RESOURCE_FMT_NVAL];
+ } res_info;
+
+ if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3) {
+ RTE_LOG(ERR, EAL,
+ "%s(): bad resource format\n", __func__);
+ return -1;
+ }
+ errno = 0;
+ *phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+ *end_addr = strtoull(res_info.end_addr, NULL, 16);
+ *flags = strtoull(res_info.flags, NULL, 16);
+ if (errno != 0) {
+ RTE_LOG(ERR, EAL,
+ "%s(): bad resource format\n", __func__);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* parse the "resource" sysfs file */
+static int
+pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+ FILE *f;
+ char buf[BUFSIZ];
+ int i;
+ uint64_t phys_addr, end_addr, flags;
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot open sysfs resource\n");
+ return -1;
+ }
+
+ for (i = 0; i<PCI_MAX_RESOURCE; i++) {
+
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot read resource\n", __func__);
+ goto error;
+ }
+ if (pci_parse_one_sysfs_resource(buf, sizeof(buf), &phys_addr,
+ &end_addr, &flags) < 0)
+ goto error;
+
+ if (flags & IORESOURCE_MEM) {
+ dev->mem_resource[i].phys_addr = phys_addr;
+ dev->mem_resource[i].len = end_addr - phys_addr + 1;
+ /* not mapped for now */
+ dev->mem_resource[i].addr = NULL;
+ }
+ }
+ fclose(f);
+ return 0;
+
+error:
+ fclose(f);
+ return -1;
+}
+
+/* Scan one pci sysfs entry, and fill the devices list from it. */
+static int
+pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
+{
+ char filename[PATH_MAX];
+ unsigned long tmp;
+ struct rte_pci_device *dev;
+ char driver[PATH_MAX];
+ int ret;
+
+ dev = malloc(sizeof(*dev));
+ if (dev == NULL)
+ return -1;
+
+ memset(dev, 0, sizeof(*dev));
+ dev->addr = *addr;
+
+ /* get vendor id */
+ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0) {
+ free(dev);
+ return -1;
+ }
+ dev->id.vendor_id = (uint16_t)tmp;
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device", dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0) {
+ free(dev);
+ return -1;
+ }
+ dev->id.device_id = (uint16_t)tmp;
+
+ /* get subsystem_vendor id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
+ dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0) {
+ free(dev);
+ return -1;
+ }
+ dev->id.subsystem_vendor_id = (uint16_t)tmp;
+
+ /* get subsystem_device id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_device",
+ dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0) {
+ free(dev);
+ return -1;
+ }
+ dev->id.subsystem_device_id = (uint16_t)tmp;
+
+ /* get class_id */
+ snprintf(filename, sizeof(filename), "%s/class",
+ dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0) {
+ free(dev);
+ return -1;
+ }
+ /* the least 24 bits are valid: class, subclass, program interface */
+ dev->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
+
+ /* get max_vfs */
+ dev->max_vfs = 0;
+ snprintf(filename, sizeof(filename), "%s/max_vfs", dirname);
+ if (!access(filename, F_OK) &&
+ eal_parse_sysfs_value(filename, &tmp) == 0)
+ dev->max_vfs = (uint16_t)tmp;
+ else {
+ /* for non igb_uio driver, need kernel version >= 3.8 */
+ snprintf(filename, sizeof(filename),
+ "%s/sriov_numvfs", dirname);
+ if (!access(filename, F_OK) &&
+ eal_parse_sysfs_value(filename, &tmp) == 0)
+ dev->max_vfs = (uint16_t)tmp;
+ }
+
+ /* get numa node, default to 0 if not present */
+ snprintf(filename, sizeof(filename), "%s/numa_node",
+ dirname);
+
+ if (access(filename, F_OK) != -1) {
+ if (eal_parse_sysfs_value(filename, &tmp) == 0)
+ dev->device.numa_node = tmp;
+ else
+ dev->device.numa_node = -1;
+ } else {
+ dev->device.numa_node = 0;
+ }
+
+ pci_name_set(dev);
+
+ /* parse resources */
+ snprintf(filename, sizeof(filename), "%s/resource", dirname);
+ if (pci_parse_sysfs_resource(filename, dev) < 0) {
+ RTE_LOG(ERR, EAL, "%s(): cannot parse resource\n", __func__);
+ free(dev);
+ return -1;
+ }
+
+ /* parse driver */
+ snprintf(filename, sizeof(filename), "%s/driver", dirname);
+ ret = pci_get_kernel_driver_by_path(filename, driver, sizeof(driver));
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Fail to get kernel driver\n");
+ free(dev);
+ return -1;
+ }
+
+ if (!ret) {
+ if (!strcmp(driver, "vfio-pci"))
+ dev->kdrv = RTE_KDRV_VFIO;
+ else if (!strcmp(driver, "igb_uio"))
+ dev->kdrv = RTE_KDRV_IGB_UIO;
+ else if (!strcmp(driver, "uio_pci_generic"))
+ dev->kdrv = RTE_KDRV_UIO_GENERIC;
+ else
+ dev->kdrv = RTE_KDRV_UNKNOWN;
+ } else
+ dev->kdrv = RTE_KDRV_NONE;
+
+ /* device is valid, add in list (sorted) */
+ if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
+ rte_pci_add_device(dev);
+ } else {
+ struct rte_pci_device *dev2;
+ int ret;
+
+ TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
+ ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr);
+ if (ret > 0)
+ continue;
+
+ if (ret < 0) {
+ rte_pci_insert_device(dev2, dev);
+ } else { /* already registered */
+ if (dev2->driver == NULL) {
+ dev2->kdrv = dev->kdrv;
+ dev2->max_vfs = dev->max_vfs;
+ pci_name_set(dev2);
+ memmove(dev2->mem_resource,
+ dev->mem_resource,
+ sizeof(dev->mem_resource));
+ } else {
+ /**
+ * If device is plugged and driver is
+ * probed already, we don't need to do
+ * anything here. (This happens when we
+ * call rte_eal_hotplug_add)
+ */
+ if (dev2->kdrv != dev->kdrv ||
+ dev2->max_vfs != dev->max_vfs)
+ /*
+ * This should not happens.
+ * But it is still possible if
+ * we unbind a device from
+ * vfio or uio before hotplug
+ * remove and rebind it with
+ * a different configure.
+ * So we just print out the
+ * error as an alarm.
+ */
+ RTE_LOG(ERR, EAL, "Unexpected device scan at %s!\n",
+ filename);
+ }
+ free(dev);
+ }
+ return 0;
+ }
+
+ rte_pci_add_device(dev);
+ }
+
+ return 0;
+}
+
+int
+pci_update_device(const struct rte_pci_addr *addr)
+{
+ char filename[PATH_MAX];
+
+ snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT,
+ rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid,
+ addr->function);
+
+ return pci_scan_one(filename, addr);
+}
+
+/*
+ * split up a pci address into its constituent parts.
+ */
+static int
+parse_pci_addr_format(const char *buf, int bufsize, struct rte_pci_addr *addr)
+{
+ /* first split on ':' */
+ union splitaddr {
+ struct {
+ char *domain;
+ char *bus;
+ char *devid;
+ char *function;
+ };
+ char *str[PCI_FMT_NVAL]; /* last element-separator is "." not ":" */
+ } splitaddr;
+
+ char *buf_copy = strndup(buf, bufsize);
+ if (buf_copy == NULL)
+ return -1;
+
+ if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+ != PCI_FMT_NVAL - 1)
+ goto error;
+ /* final split is on '.' between devid and function */
+ splitaddr.function = strchr(splitaddr.devid,'.');
+ if (splitaddr.function == NULL)
+ goto error;
+ *splitaddr.function++ = '\0';
+
+ /* now convert to int values */
+ errno = 0;
+ addr->domain = strtoul(splitaddr.domain, NULL, 16);
+ addr->bus = strtoul(splitaddr.bus, NULL, 16);
+ addr->devid = strtoul(splitaddr.devid, NULL, 16);
+ addr->function = strtoul(splitaddr.function, NULL, 10);
+ if (errno != 0)
+ goto error;
+
+ free(buf_copy); /* free the copy made with strdup */
+ return 0;
+error:
+ free(buf_copy);
+ return -1;
+}
+
+/*
+ * Scan the content of the PCI bus, and the devices in the devices
+ * list
+ */
+int
+rte_pci_scan(void)
+{
+ struct dirent *e;
+ DIR *dir;
+ char dirname[PATH_MAX];
+ struct rte_pci_addr addr;
+
+ /* for debug purposes, PCI can be disabled */
+ if (!rte_eal_has_pci())
+ return 0;
+
+#ifdef VFIO_PRESENT
+ if (!pci_vfio_is_enabled())
+ RTE_LOG(DEBUG, EAL, "VFIO PCI modules not loaded\n");
+#endif
+
+ dir = opendir(rte_pci_get_sysfs_path());
+ if (dir == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): opendir failed: %s\n",
+ __func__, strerror(errno));
+ return -1;
+ }
+
+ while ((e = readdir(dir)) != NULL) {
+ if (e->d_name[0] == '.')
+ continue;
+
+ if (parse_pci_addr_format(e->d_name, sizeof(e->d_name), &addr) != 0)
+ continue;
+
+ snprintf(dirname, sizeof(dirname), "%s/%s",
+ rte_pci_get_sysfs_path(), e->d_name);
+
+ if (pci_scan_one(dirname, &addr) < 0)
+ goto error;
+ }
+ closedir(dir);
+ return 0;
+
+error:
+ closedir(dir);
+ return -1;
+}
+
+/*
+ * Is pci device bound to any kdrv
+ */
+static inline int
+pci_one_device_is_bound(void)
+{
+ struct rte_pci_device *dev = NULL;
+ int ret = 0;
+
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ if (dev->kdrv == RTE_KDRV_UNKNOWN ||
+ dev->kdrv == RTE_KDRV_NONE) {
+ continue;
+ } else {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Any one of the device bound to uio
+ */
+static inline int
+pci_one_device_bound_uio(void)
+{
+ struct rte_pci_device *dev = NULL;
+ struct rte_devargs *devargs;
+ int need_check;
+
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ devargs = dev->device.devargs;
+
+ need_check = 0;
+ switch (rte_pci_bus.bus.conf.scan_mode) {
+ case RTE_BUS_SCAN_WHITELIST:
+ if (devargs && devargs->policy == RTE_DEV_WHITELISTED)
+ need_check = 1;
+ break;
+ case RTE_BUS_SCAN_UNDEFINED:
+ case RTE_BUS_SCAN_BLACKLIST:
+ if (devargs == NULL ||
+ devargs->policy != RTE_DEV_BLACKLISTED)
+ need_check = 1;
+ break;
+ }
+
+ if (!need_check)
+ continue;
+
+ if (dev->kdrv == RTE_KDRV_IGB_UIO ||
+ dev->kdrv == RTE_KDRV_UIO_GENERIC) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Any one of the device has iova as va
+ */
+static inline int
+pci_one_device_has_iova_va(void)
+{
+ struct rte_pci_device *dev = NULL;
+ struct rte_pci_driver *drv = NULL;
+
+ FOREACH_DRIVER_ON_PCIBUS(drv) {
+ if (drv && drv->drv_flags & RTE_PCI_DRV_IOVA_AS_VA) {
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ if (dev->kdrv == RTE_KDRV_VFIO &&
+ rte_pci_match(drv, dev))
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+#if defined(RTE_ARCH_X86)
+static bool
+pci_one_device_iommu_support_va(struct rte_pci_device *dev)
+{
+#define VTD_CAP_MGAW_SHIFT 16
+#define VTD_CAP_MGAW_MASK (0x3fULL << VTD_CAP_MGAW_SHIFT)
+#define X86_VA_WIDTH 47 /* From Documentation/x86/x86_64/mm.txt */
+ struct rte_pci_addr *addr = &dev->addr;
+ char filename[PATH_MAX];
+ FILE *fp;
+ uint64_t mgaw, vtd_cap_reg = 0;
+
+ snprintf(filename, sizeof(filename),
+ "%s/" PCI_PRI_FMT "/iommu/intel-iommu/cap",
+ rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid,
+ addr->function);
+ if (access(filename, F_OK) == -1) {
+ /* We don't have an Intel IOMMU, assume VA supported*/
+ return true;
+ }
+
+ /* We have an intel IOMMU */
+ fp = fopen(filename, "r");
+ if (fp == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): can't open %s\n", __func__, filename);
+ return false;
+ }
+
+ if (fscanf(fp, "%" PRIx64, &vtd_cap_reg) != 1) {
+ RTE_LOG(ERR, EAL, "%s(): can't read %s\n", __func__, filename);
+ fclose(fp);
+ return false;
+ }
+
+ fclose(fp);
+
+ mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1;
+ if (mgaw < X86_VA_WIDTH)
+ return false;
+
+ return true;
+}
+#elif defined(RTE_ARCH_PPC_64)
+static bool
+pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev)
+{
+ return false;
+}
+#else
+static bool
+pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev)
+{
+ return true;
+}
+#endif
+
+/*
+ * All devices IOMMUs support VA as IOVA
+ */
+static bool
+pci_devices_iommu_support_va(void)
+{
+ struct rte_pci_device *dev = NULL;
+ struct rte_pci_driver *drv = NULL;
+
+ FOREACH_DRIVER_ON_PCIBUS(drv) {
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ if (!rte_pci_match(drv, dev))
+ continue;
+ if (!pci_one_device_iommu_support_va(dev))
+ return false;
+ }
+ }
+ return true;
+}
+
+/*
+ * Get iommu class of PCI devices on the bus.
+ */
+enum rte_iova_mode
+rte_pci_get_iommu_class(void)
+{
+ bool is_bound;
+ bool is_vfio_noiommu_enabled = true;
+ bool has_iova_va;
+ bool is_bound_uio;
+ bool iommu_no_va;
+
+ is_bound = pci_one_device_is_bound();
+ if (!is_bound)
+ return RTE_IOVA_DC;
+
+ has_iova_va = pci_one_device_has_iova_va();
+ is_bound_uio = pci_one_device_bound_uio();
+ iommu_no_va = !pci_devices_iommu_support_va();
+#ifdef VFIO_PRESENT
+ is_vfio_noiommu_enabled = rte_vfio_noiommu_is_enabled() == true ?
+ true : false;
+#endif
+
+ if (has_iova_va && !is_bound_uio && !is_vfio_noiommu_enabled &&
+ !iommu_no_va)
+ return RTE_IOVA_VA;
+
+ if (has_iova_va) {
+ RTE_LOG(WARNING, EAL, "Some devices want iova as va but pa will be used because.. ");
+ if (is_vfio_noiommu_enabled)
+ RTE_LOG(WARNING, EAL, "vfio-noiommu mode configured\n");
+ if (is_bound_uio)
+ RTE_LOG(WARNING, EAL, "few device bound to UIO\n");
+ if (iommu_no_va)
+ RTE_LOG(WARNING, EAL, "IOMMU does not support IOVA as VA\n");
+ }
+
+ return RTE_IOVA_PA;
+}
+
+/* Read PCI config space. */
+int rte_pci_read_config(const struct rte_pci_device *device,
+ void *buf, size_t len, off_t offset)
+{
+ const struct rte_intr_handle *intr_handle = &device->intr_handle;
+
+ switch (intr_handle->type) {
+ case RTE_INTR_HANDLE_UIO:
+ case RTE_INTR_HANDLE_UIO_INTX:
+ return pci_uio_read_config(intr_handle, buf, len, offset);
+
+#ifdef VFIO_PRESENT
+ case RTE_INTR_HANDLE_VFIO_MSIX:
+ case RTE_INTR_HANDLE_VFIO_MSI:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ return pci_vfio_read_config(intr_handle, buf, len, offset);
+#endif
+ default:
+ RTE_LOG(ERR, EAL,
+ "Unknown handle type of fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+}
+
+/* Write PCI config space. */
+int rte_pci_write_config(const struct rte_pci_device *device,
+ const void *buf, size_t len, off_t offset)
+{
+ const struct rte_intr_handle *intr_handle = &device->intr_handle;
+
+ switch (intr_handle->type) {
+ case RTE_INTR_HANDLE_UIO:
+ case RTE_INTR_HANDLE_UIO_INTX:
+ return pci_uio_write_config(intr_handle, buf, len, offset);
+
+#ifdef VFIO_PRESENT
+ case RTE_INTR_HANDLE_VFIO_MSIX:
+ case RTE_INTR_HANDLE_VFIO_MSI:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ return pci_vfio_write_config(intr_handle, buf, len, offset);
+#endif
+ default:
+ RTE_LOG(ERR, EAL,
+ "Unknown handle type of fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+}
+
+#if defined(RTE_ARCH_X86)
+static int
+pci_ioport_map(struct rte_pci_device *dev, int bar __rte_unused,
+ struct rte_pci_ioport *p)
+{
+ uint16_t start, end;
+ FILE *fp;
+ char *line = NULL;
+ char pci_id[16];
+ int found = 0;
+ size_t linesz;
+
+ snprintf(pci_id, sizeof(pci_id), PCI_PRI_FMT,
+ dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+
+ fp = fopen("/proc/ioports", "r");
+ if (fp == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): can't open ioports\n", __func__);
+ return -1;
+ }
+
+ while (getdelim(&line, &linesz, '\n', fp) > 0) {
+ char *ptr = line;
+ char *left;
+ int n;
+
+ n = strcspn(ptr, ":");
+ ptr[n] = 0;
+ left = &ptr[n + 1];
+
+ while (*left && isspace(*left))
+ left++;
+
+ if (!strncmp(left, pci_id, strlen(pci_id))) {
+ found = 1;
+
+ while (*ptr && isspace(*ptr))
+ ptr++;
+
+ sscanf(ptr, "%04hx-%04hx", &start, &end);
+
+ break;
+ }
+ }
+
+ free(line);
+ fclose(fp);
+
+ if (!found)
+ return -1;
+
+ p->base = start;
+ RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%x\n", start);
+
+ return 0;
+}
+#endif
+
+int
+rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p)
+{
+ int ret = -1;
+
+ switch (dev->kdrv) {
+#ifdef VFIO_PRESENT
+ case RTE_KDRV_VFIO:
+ if (pci_vfio_is_enabled())
+ ret = pci_vfio_ioport_map(dev, bar, p);
+ break;
+#endif
+ case RTE_KDRV_IGB_UIO:
+ ret = pci_uio_ioport_map(dev, bar, p);
+ break;
+ case RTE_KDRV_UIO_GENERIC:
+#if defined(RTE_ARCH_X86)
+ ret = pci_ioport_map(dev, bar, p);
+#else
+ ret = pci_uio_ioport_map(dev, bar, p);
+#endif
+ break;
+ case RTE_KDRV_NONE:
+#if defined(RTE_ARCH_X86)
+ ret = pci_ioport_map(dev, bar, p);
+#endif
+ break;
+ default:
+ break;
+ }
+
+ if (!ret)
+ p->dev = dev;
+
+ return ret;
+}
+
+void
+rte_pci_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset)
+{
+ switch (p->dev->kdrv) {
+#ifdef VFIO_PRESENT
+ case RTE_KDRV_VFIO:
+ pci_vfio_ioport_read(p, data, len, offset);
+ break;
+#endif
+ case RTE_KDRV_IGB_UIO:
+ pci_uio_ioport_read(p, data, len, offset);
+ break;
+ case RTE_KDRV_UIO_GENERIC:
+ pci_uio_ioport_read(p, data, len, offset);
+ break;
+ case RTE_KDRV_NONE:
+#if defined(RTE_ARCH_X86)
+ pci_uio_ioport_read(p, data, len, offset);
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
+void
+rte_pci_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset)
+{
+ switch (p->dev->kdrv) {
+#ifdef VFIO_PRESENT
+ case RTE_KDRV_VFIO:
+ pci_vfio_ioport_write(p, data, len, offset);
+ break;
+#endif
+ case RTE_KDRV_IGB_UIO:
+ pci_uio_ioport_write(p, data, len, offset);
+ break;
+ case RTE_KDRV_UIO_GENERIC:
+ pci_uio_ioport_write(p, data, len, offset);
+ break;
+ case RTE_KDRV_NONE:
+#if defined(RTE_ARCH_X86)
+ pci_uio_ioport_write(p, data, len, offset);
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
+int
+rte_pci_ioport_unmap(struct rte_pci_ioport *p)
+{
+ int ret = -1;
+
+ switch (p->dev->kdrv) {
+#ifdef VFIO_PRESENT
+ case RTE_KDRV_VFIO:
+ if (pci_vfio_is_enabled())
+ ret = pci_vfio_ioport_unmap(p);
+ break;
+#endif
+ case RTE_KDRV_IGB_UIO:
+ ret = pci_uio_ioport_unmap(p);
+ break;
+ case RTE_KDRV_UIO_GENERIC:
+#if defined(RTE_ARCH_X86)
+ ret = 0;
+#else
+ ret = pci_uio_ioport_unmap(p);
+#endif
+ break;
+ case RTE_KDRV_NONE:
+#if defined(RTE_ARCH_X86)
+ ret = 0;
+#endif
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
diff --git a/src/spdk/dpdk/drivers/bus/pci/linux/pci_init.h b/src/spdk/dpdk/drivers/bus/pci/linux/pci_init.h
new file mode 100644
index 00000000..c2e603a3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/linux/pci_init.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef EAL_PCI_INIT_H_
+#define EAL_PCI_INIT_H_
+
+#include <rte_vfio.h>
+
+/** IO resource type: */
+#define IORESOURCE_IO 0x00000100
+#define IORESOURCE_MEM 0x00000200
+
+/*
+ * Helper function to map PCI resources right after hugepages in virtual memory
+ */
+extern void *pci_map_addr;
+void *pci_find_max_end_va(void);
+
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+int pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+ uint64_t *end_addr, uint64_t *flags);
+
+int pci_uio_alloc_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource **uio_res);
+void pci_uio_free_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource *uio_res);
+int pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
+ struct mapped_pci_resource *uio_res, int map_idx);
+
+int pci_uio_read_config(const struct rte_intr_handle *intr_handle,
+ void *buf, size_t len, off_t offs);
+int pci_uio_write_config(const struct rte_intr_handle *intr_handle,
+ const void *buf, size_t len, off_t offs);
+
+int pci_uio_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p);
+void pci_uio_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset);
+void pci_uio_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset);
+int pci_uio_ioport_unmap(struct rte_pci_ioport *p);
+
+#ifdef VFIO_PRESENT
+
+#ifdef PCI_MSIX_TABLE_BIR
+#define RTE_PCI_MSIX_TABLE_BIR PCI_MSIX_TABLE_BIR
+#else
+#define RTE_PCI_MSIX_TABLE_BIR 0x7
+#endif
+
+#ifdef PCI_MSIX_TABLE_OFFSET
+#define RTE_PCI_MSIX_TABLE_OFFSET PCI_MSIX_TABLE_OFFSET
+#else
+#define RTE_PCI_MSIX_TABLE_OFFSET 0xfffffff8
+#endif
+
+#ifdef PCI_MSIX_FLAGS_QSIZE
+#define RTE_PCI_MSIX_FLAGS_QSIZE PCI_MSIX_FLAGS_QSIZE
+#else
+#define RTE_PCI_MSIX_FLAGS_QSIZE 0x07ff
+#endif
+
+/* access config space */
+int pci_vfio_read_config(const struct rte_intr_handle *intr_handle,
+ void *buf, size_t len, off_t offs);
+int pci_vfio_write_config(const struct rte_intr_handle *intr_handle,
+ const void *buf, size_t len, off_t offs);
+
+int pci_vfio_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p);
+void pci_vfio_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset);
+void pci_vfio_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset);
+int pci_vfio_ioport_unmap(struct rte_pci_ioport *p);
+
+/* map/unmap VFIO resource prototype */
+int pci_vfio_map_resource(struct rte_pci_device *dev);
+int pci_vfio_unmap_resource(struct rte_pci_device *dev);
+
+int pci_vfio_is_enabled(void);
+
+#endif
+
+#endif /* EAL_PCI_INIT_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/pci/linux/pci_uio.c b/src/spdk/dpdk/drivers/bus/pci/linux/pci_uio.c
new file mode 100644
index 00000000..a7c14421
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/linux/pci_uio.c
@@ -0,0 +1,562 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <sys/sysmacros.h>
+#include <linux/pci_regs.h>
+
+#if defined(RTE_ARCH_X86)
+#include <sys/io.h>
+#endif
+
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_eal_memconfig.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+
+#include "eal_filesystem.h"
+#include "pci_init.h"
+
+void *pci_map_addr = NULL;
+
+#define OFF_MAX ((uint64_t)(off_t)-1)
+
+int
+pci_uio_read_config(const struct rte_intr_handle *intr_handle,
+ void *buf, size_t len, off_t offset)
+{
+ return pread(intr_handle->uio_cfg_fd, buf, len, offset);
+}
+
+int
+pci_uio_write_config(const struct rte_intr_handle *intr_handle,
+ const void *buf, size_t len, off_t offset)
+{
+ return pwrite(intr_handle->uio_cfg_fd, buf, len, offset);
+}
+
+static int
+pci_uio_set_bus_master(int dev_fd)
+{
+ uint16_t reg;
+ int ret;
+
+ ret = pread(dev_fd, &reg, sizeof(reg), PCI_COMMAND);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL,
+ "Cannot read command from PCI config space!\n");
+ return -1;
+ }
+
+ /* return if bus mastering is already on */
+ if (reg & PCI_COMMAND_MASTER)
+ return 0;
+
+ reg |= PCI_COMMAND_MASTER;
+
+ ret = pwrite(dev_fd, &reg, sizeof(reg), PCI_COMMAND);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL,
+ "Cannot write command to PCI config space!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+pci_mknod_uio_dev(const char *sysfs_uio_path, unsigned uio_num)
+{
+ FILE *f;
+ char filename[PATH_MAX];
+ int ret;
+ unsigned major, minor;
+ dev_t dev;
+
+ /* get the name of the sysfs file that contains the major and minor
+ * of the uio device and read its content */
+ snprintf(filename, sizeof(filename), "%s/dev", sysfs_uio_path);
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): cannot open sysfs to get major:minor\n",
+ __func__);
+ return -1;
+ }
+
+ ret = fscanf(f, "%u:%u", &major, &minor);
+ if (ret != 2) {
+ RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs to get major:minor\n",
+ __func__);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+
+ /* create the char device "mknod /dev/uioX c major minor" */
+ snprintf(filename, sizeof(filename), "/dev/uio%u", uio_num);
+ dev = makedev(major, minor);
+ ret = mknod(filename, S_IFCHR | S_IRUSR | S_IWUSR, dev);
+ if (ret != 0) {
+ RTE_LOG(ERR, EAL, "%s(): mknod() failed %s\n",
+ __func__, strerror(errno));
+ return -1;
+ }
+
+ return ret;
+}
+
+/*
+ * Return the uioX char device used for a pci device. On success, return
+ * the UIO number and fill dstbuf string with the path of the device in
+ * sysfs. On error, return a negative value. In this case dstbuf is
+ * invalid.
+ */
+static int
+pci_get_uio_dev(struct rte_pci_device *dev, char *dstbuf,
+ unsigned int buflen, int create)
+{
+ struct rte_pci_addr *loc = &dev->addr;
+ int uio_num = -1;
+ struct dirent *e;
+ DIR *dir;
+ char dirname[PATH_MAX];
+
+ /* depending on kernel version, uio can be located in uio/uioX
+ * or uio:uioX */
+
+ snprintf(dirname, sizeof(dirname),
+ "%s/" PCI_PRI_FMT "/uio", rte_pci_get_sysfs_path(),
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ dir = opendir(dirname);
+ if (dir == NULL) {
+ /* retry with the parent directory */
+ snprintf(dirname, sizeof(dirname),
+ "%s/" PCI_PRI_FMT, rte_pci_get_sysfs_path(),
+ loc->domain, loc->bus, loc->devid, loc->function);
+ dir = opendir(dirname);
+
+ if (dir == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot opendir %s\n", dirname);
+ return -1;
+ }
+ }
+
+ /* take the first file starting with "uio" */
+ while ((e = readdir(dir)) != NULL) {
+ /* format could be uio%d ...*/
+ int shortprefix_len = sizeof("uio") - 1;
+ /* ... or uio:uio%d */
+ int longprefix_len = sizeof("uio:uio") - 1;
+ char *endptr;
+
+ if (strncmp(e->d_name, "uio", 3) != 0)
+ continue;
+
+ /* first try uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
+ snprintf(dstbuf, buflen, "%s/uio%u", dirname, uio_num);
+ break;
+ }
+
+ /* then try uio:uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
+ snprintf(dstbuf, buflen, "%s/uio:uio%u", dirname, uio_num);
+ break;
+ }
+ }
+ closedir(dir);
+
+ /* No uio resource found */
+ if (e == NULL)
+ return -1;
+
+ /* create uio device if we've been asked to */
+ if (rte_eal_create_uio_dev() && create &&
+ pci_mknod_uio_dev(dstbuf, uio_num) < 0)
+ RTE_LOG(WARNING, EAL, "Cannot create /dev/uio%u\n", uio_num);
+
+ return uio_num;
+}
+
+void
+pci_uio_free_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource *uio_res)
+{
+ rte_free(uio_res);
+
+ if (dev->intr_handle.uio_cfg_fd >= 0) {
+ close(dev->intr_handle.uio_cfg_fd);
+ dev->intr_handle.uio_cfg_fd = -1;
+ }
+ if (dev->intr_handle.fd >= 0) {
+ close(dev->intr_handle.fd);
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ }
+}
+
+int
+pci_uio_alloc_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource **uio_res)
+{
+ char dirname[PATH_MAX];
+ char cfgname[PATH_MAX];
+ char devname[PATH_MAX]; /* contains the /dev/uioX */
+ int uio_num;
+ struct rte_pci_addr *loc;
+
+ loc = &dev->addr;
+
+ /* find uio resource */
+ uio_num = pci_get_uio_dev(dev, dirname, sizeof(dirname), 1);
+ if (uio_num < 0) {
+ RTE_LOG(WARNING, EAL, " "PCI_PRI_FMT" not managed by UIO driver, "
+ "skipping\n", loc->domain, loc->bus, loc->devid, loc->function);
+ return 1;
+ }
+ snprintf(devname, sizeof(devname), "/dev/uio%u", uio_num);
+
+ /* save fd if in primary process */
+ dev->intr_handle.fd = open(devname, O_RDWR);
+ if (dev->intr_handle.fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ devname, strerror(errno));
+ goto error;
+ }
+
+ snprintf(cfgname, sizeof(cfgname),
+ "/sys/class/uio/uio%u/device/config", uio_num);
+ dev->intr_handle.uio_cfg_fd = open(cfgname, O_RDWR);
+ if (dev->intr_handle.uio_cfg_fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ cfgname, strerror(errno));
+ goto error;
+ }
+
+ if (dev->kdrv == RTE_KDRV_IGB_UIO)
+ dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
+ else {
+ dev->intr_handle.type = RTE_INTR_HANDLE_UIO_INTX;
+
+ /* set bus master that is not done by uio_pci_generic */
+ if (pci_uio_set_bus_master(dev->intr_handle.uio_cfg_fd)) {
+ RTE_LOG(ERR, EAL, "Cannot set up bus mastering!\n");
+ goto error;
+ }
+ }
+
+ /* allocate the mapping details for secondary processes*/
+ *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
+ if (*uio_res == NULL) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot store uio mmap details\n", __func__);
+ goto error;
+ }
+
+ snprintf((*uio_res)->path, sizeof((*uio_res)->path), "%s", devname);
+ memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr));
+
+ return 0;
+
+error:
+ pci_uio_free_resource(dev, *uio_res);
+ return -1;
+}
+
+int
+pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
+ struct mapped_pci_resource *uio_res, int map_idx)
+{
+ int fd = -1;
+ char devname[PATH_MAX];
+ void *mapaddr;
+ struct rte_pci_addr *loc;
+ struct pci_map *maps;
+ int wc_activate = 0;
+
+ if (dev->driver != NULL)
+ wc_activate = dev->driver->drv_flags & RTE_PCI_DRV_WC_ACTIVATE;
+
+ loc = &dev->addr;
+ maps = uio_res->maps;
+
+ /* allocate memory to keep path */
+ maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
+ if (maps[map_idx].path == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ /*
+ * open resource file, to mmap it
+ */
+ if (wc_activate) {
+ /* update devname for mmap */
+ snprintf(devname, sizeof(devname),
+ "%s/" PCI_PRI_FMT "/resource%d_wc",
+ rte_pci_get_sysfs_path(),
+ loc->domain, loc->bus, loc->devid,
+ loc->function, res_idx);
+
+ if (access(devname, R_OK|W_OK) != -1) {
+ fd = open(devname, O_RDWR);
+ if (fd < 0)
+ RTE_LOG(INFO, EAL, "%s cannot be mapped. "
+ "Fall-back to non prefetchable mode.\n",
+ devname);
+ }
+ }
+
+ if (!wc_activate || fd < 0) {
+ snprintf(devname, sizeof(devname),
+ "%s/" PCI_PRI_FMT "/resource%d",
+ rte_pci_get_sysfs_path(),
+ loc->domain, loc->bus, loc->devid,
+ loc->function, res_idx);
+
+ /* then try to map resource file */
+ fd = open(devname, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ devname, strerror(errno));
+ goto error;
+ }
+ }
+
+ /* try mapping somewhere close to the end of hugepages */
+ if (pci_map_addr == NULL)
+ pci_map_addr = pci_find_max_end_va();
+
+ mapaddr = pci_map_resource(pci_map_addr, fd, 0,
+ (size_t)dev->mem_resource[res_idx].len, 0);
+ close(fd);
+ if (mapaddr == MAP_FAILED)
+ goto error;
+
+ pci_map_addr = RTE_PTR_ADD(mapaddr,
+ (size_t)dev->mem_resource[res_idx].len);
+
+ maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
+ maps[map_idx].size = dev->mem_resource[res_idx].len;
+ maps[map_idx].addr = mapaddr;
+ maps[map_idx].offset = 0;
+ strcpy(maps[map_idx].path, devname);
+ dev->mem_resource[res_idx].addr = mapaddr;
+
+ return 0;
+
+error:
+ rte_free(maps[map_idx].path);
+ return -1;
+}
+
+#if defined(RTE_ARCH_X86)
+int
+pci_uio_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p)
+{
+ char dirname[PATH_MAX];
+ char filename[PATH_MAX];
+ int uio_num;
+ unsigned long start;
+
+ uio_num = pci_get_uio_dev(dev, dirname, sizeof(dirname), 0);
+ if (uio_num < 0)
+ return -1;
+
+ /* get portio start */
+ snprintf(filename, sizeof(filename),
+ "%s/portio/port%d/start", dirname, bar);
+ if (eal_parse_sysfs_value(filename, &start) < 0) {
+ RTE_LOG(ERR, EAL, "%s(): cannot parse portio start\n",
+ __func__);
+ return -1;
+ }
+ /* ensure we don't get anything funny here, read/write will cast to
+ * uin16_t */
+ if (start > UINT16_MAX)
+ return -1;
+
+ /* FIXME only for primary process ? */
+ if (dev->intr_handle.type == RTE_INTR_HANDLE_UNKNOWN) {
+
+ snprintf(filename, sizeof(filename), "/dev/uio%u", uio_num);
+ dev->intr_handle.fd = open(filename, O_RDWR);
+ if (dev->intr_handle.fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ filename, strerror(errno));
+ return -1;
+ }
+ dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
+ }
+
+ RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%lx\n", start);
+
+ p->base = start;
+ p->len = 0;
+ return 0;
+}
+#else
+int
+pci_uio_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p)
+{
+ FILE *f;
+ char buf[BUFSIZ];
+ char filename[PATH_MAX];
+ uint64_t phys_addr, end_addr, flags;
+ int fd, i;
+ void *addr;
+
+ /* open and read addresses of the corresponding resource in sysfs */
+ snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT "/resource",
+ rte_pci_get_sysfs_path(), dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot open sysfs resource: %s\n",
+ strerror(errno));
+ return -1;
+ }
+ for (i = 0; i < bar + 1; i++) {
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot read sysfs resource\n");
+ goto error;
+ }
+ }
+ if (pci_parse_one_sysfs_resource(buf, sizeof(buf), &phys_addr,
+ &end_addr, &flags) < 0)
+ goto error;
+ if ((flags & IORESOURCE_IO) == 0) {
+ RTE_LOG(ERR, EAL, "BAR %d is not an IO resource\n", bar);
+ goto error;
+ }
+ snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT "/resource%d",
+ rte_pci_get_sysfs_path(), dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function, bar);
+
+ /* mmap the pci resource */
+ fd = open(filename, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
+ strerror(errno));
+ goto error;
+ }
+ addr = mmap(NULL, end_addr + 1, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ close(fd);
+ if (addr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "Cannot mmap IO port resource: %s\n",
+ strerror(errno));
+ goto error;
+ }
+
+ /* strangely, the base address is mmap addr + phys_addr */
+ p->base = (uintptr_t)addr + phys_addr;
+ p->len = end_addr + 1;
+ RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%"PRIx64"\n", p->base);
+ fclose(f);
+
+ return 0;
+
+error:
+ fclose(f);
+ return -1;
+}
+#endif
+
+void
+pci_uio_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset)
+{
+ uint8_t *d;
+ int size;
+ uintptr_t reg = p->base + offset;
+
+ for (d = data; len > 0; d += size, reg += size, len -= size) {
+ if (len >= 4) {
+ size = 4;
+#if defined(RTE_ARCH_X86)
+ *(uint32_t *)d = inl(reg);
+#else
+ *(uint32_t *)d = *(volatile uint32_t *)reg;
+#endif
+ } else if (len >= 2) {
+ size = 2;
+#if defined(RTE_ARCH_X86)
+ *(uint16_t *)d = inw(reg);
+#else
+ *(uint16_t *)d = *(volatile uint16_t *)reg;
+#endif
+ } else {
+ size = 1;
+#if defined(RTE_ARCH_X86)
+ *d = inb(reg);
+#else
+ *d = *(volatile uint8_t *)reg;
+#endif
+ }
+ }
+}
+
+void
+pci_uio_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset)
+{
+ const uint8_t *s;
+ int size;
+ uintptr_t reg = p->base + offset;
+
+ for (s = data; len > 0; s += size, reg += size, len -= size) {
+ if (len >= 4) {
+ size = 4;
+#if defined(RTE_ARCH_X86)
+ outl_p(*(const uint32_t *)s, reg);
+#else
+ *(volatile uint32_t *)reg = *(const uint32_t *)s;
+#endif
+ } else if (len >= 2) {
+ size = 2;
+#if defined(RTE_ARCH_X86)
+ outw_p(*(const uint16_t *)s, reg);
+#else
+ *(volatile uint16_t *)reg = *(const uint16_t *)s;
+#endif
+ } else {
+ size = 1;
+#if defined(RTE_ARCH_X86)
+ outb_p(*s, reg);
+#else
+ *(volatile uint8_t *)reg = *s;
+#endif
+ }
+ }
+}
+
+int
+pci_uio_ioport_unmap(struct rte_pci_ioport *p)
+{
+#if defined(RTE_ARCH_X86)
+ RTE_SET_USED(p);
+ /* FIXME close intr fd ? */
+ return 0;
+#else
+ return munmap((void *)(uintptr_t)p->base, p->len);
+#endif
+}
diff --git a/src/spdk/dpdk/drivers/bus/pci/linux/pci_vfio.c b/src/spdk/dpdk/drivers/bus/pci/linux/pci_vfio.c
new file mode 100644
index 00000000..686386d6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/linux/pci_vfio.c
@@ -0,0 +1,794 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include <string.h>
+#include <fcntl.h>
+#include <linux/pci_regs.h>
+#include <sys/eventfd.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <stdbool.h>
+
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+#include <rte_vfio.h>
+
+#include "eal_filesystem.h"
+
+#include "pci_init.h"
+#include "private.h"
+
+/**
+ * @file
+ * PCI probing under linux (VFIO version)
+ *
+ * This code tries to determine if the PCI device is bound to VFIO driver,
+ * and initialize it (map BARs, set up interrupts) if that's the case.
+ *
+ * This file is only compiled if CONFIG_RTE_EAL_VFIO is set to "y".
+ */
+
+#ifdef VFIO_PRESENT
+
+#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+static struct rte_tailq_elem rte_vfio_tailq = {
+ .name = "VFIO_RESOURCE_LIST",
+};
+EAL_REGISTER_TAILQ(rte_vfio_tailq)
+
+int
+pci_vfio_read_config(const struct rte_intr_handle *intr_handle,
+ void *buf, size_t len, off_t offs)
+{
+ return pread64(intr_handle->vfio_dev_fd, buf, len,
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs);
+}
+
+int
+pci_vfio_write_config(const struct rte_intr_handle *intr_handle,
+ const void *buf, size_t len, off_t offs)
+{
+ return pwrite64(intr_handle->vfio_dev_fd, buf, len,
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs);
+}
+
+/* get PCI BAR number where MSI-X interrupts are */
+static int
+pci_vfio_get_msix_bar(int fd, struct pci_msix_table *msix_table)
+{
+ int ret;
+ uint32_t reg;
+ uint16_t flags;
+ uint8_t cap_id, cap_offset;
+
+ /* read PCI capability pointer from config space */
+ ret = pread64(fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ PCI_CAPABILITY_LIST);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot read capability pointer from PCI "
+ "config space!\n");
+ return -1;
+ }
+
+ /* we need first byte */
+ cap_offset = reg & 0xFF;
+
+ while (cap_offset) {
+
+ /* read PCI capability ID */
+ ret = pread64(fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ cap_offset);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot read capability ID from PCI "
+ "config space!\n");
+ return -1;
+ }
+
+ /* we need first byte */
+ cap_id = reg & 0xFF;
+
+ /* if we haven't reached MSI-X, check next capability */
+ if (cap_id != PCI_CAP_ID_MSIX) {
+ ret = pread64(fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ cap_offset);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot read capability pointer from PCI "
+ "config space!\n");
+ return -1;
+ }
+
+ /* we need second byte */
+ cap_offset = (reg & 0xFF00) >> 8;
+
+ continue;
+ }
+ /* else, read table offset */
+ else {
+ /* table offset resides in the next 4 bytes */
+ ret = pread64(fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ cap_offset + 4);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot read table offset from PCI config "
+ "space!\n");
+ return -1;
+ }
+
+ ret = pread64(fd, &flags, sizeof(flags),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ cap_offset + 2);
+ if (ret != sizeof(flags)) {
+ RTE_LOG(ERR, EAL, "Cannot read table flags from PCI config "
+ "space!\n");
+ return -1;
+ }
+
+ msix_table->bar_index = reg & RTE_PCI_MSIX_TABLE_BIR;
+ msix_table->offset = reg & RTE_PCI_MSIX_TABLE_OFFSET;
+ msix_table->size =
+ 16 * (1 + (flags & RTE_PCI_MSIX_FLAGS_QSIZE));
+
+ return 0;
+ }
+ }
+ return 0;
+}
+
+/* set PCI bus mastering */
+static int
+pci_vfio_set_bus_master(int dev_fd, bool op)
+{
+ uint16_t reg;
+ int ret;
+
+ ret = pread64(dev_fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ PCI_COMMAND);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot read command from PCI config space!\n");
+ return -1;
+ }
+
+ if (op)
+ /* set the master bit */
+ reg |= PCI_COMMAND_MASTER;
+ else
+ reg &= ~(PCI_COMMAND_MASTER);
+
+ ret = pwrite64(dev_fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ PCI_COMMAND);
+
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot write command to PCI config space!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* set up interrupt support (but not enable interrupts) */
+static int
+pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
+{
+ int i, ret, intr_idx;
+ enum rte_intr_mode intr_mode;
+
+ /* default to invalid index */
+ intr_idx = VFIO_PCI_NUM_IRQS;
+
+ /* Get default / configured intr_mode */
+ intr_mode = rte_eal_vfio_intr_mode();
+
+ /* get interrupt type from internal config (MSI-X by default, can be
+ * overridden from the command line
+ */
+ switch (intr_mode) {
+ case RTE_INTR_MODE_MSIX:
+ intr_idx = VFIO_PCI_MSIX_IRQ_INDEX;
+ break;
+ case RTE_INTR_MODE_MSI:
+ intr_idx = VFIO_PCI_MSI_IRQ_INDEX;
+ break;
+ case RTE_INTR_MODE_LEGACY:
+ intr_idx = VFIO_PCI_INTX_IRQ_INDEX;
+ break;
+ /* don't do anything if we want to automatically determine interrupt type */
+ case RTE_INTR_MODE_NONE:
+ break;
+ default:
+ RTE_LOG(ERR, EAL, " unknown default interrupt type!\n");
+ return -1;
+ }
+
+ /* start from MSI-X interrupt type */
+ for (i = VFIO_PCI_MSIX_IRQ_INDEX; i >= 0; i--) {
+ struct vfio_irq_info irq = { .argsz = sizeof(irq) };
+ int fd = -1;
+
+ /* skip interrupt modes we don't want */
+ if (intr_mode != RTE_INTR_MODE_NONE &&
+ i != intr_idx)
+ continue;
+
+ irq.index = i;
+
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, " cannot get IRQ info, "
+ "error %i (%s)\n", errno, strerror(errno));
+ return -1;
+ }
+
+ /* if this vector cannot be used with eventfd, fail if we explicitly
+ * specified interrupt type, otherwise continue */
+ if ((irq.flags & VFIO_IRQ_INFO_EVENTFD) == 0) {
+ if (intr_mode != RTE_INTR_MODE_NONE) {
+ RTE_LOG(ERR, EAL,
+ " interrupt vector does not support eventfd!\n");
+ return -1;
+ } else
+ continue;
+ }
+
+ /* set up an eventfd for interrupts */
+ fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, " cannot set up eventfd, "
+ "error %i (%s)\n", errno, strerror(errno));
+ return -1;
+ }
+
+ dev->intr_handle.fd = fd;
+ dev->intr_handle.vfio_dev_fd = vfio_dev_fd;
+
+ switch (i) {
+ case VFIO_PCI_MSIX_IRQ_INDEX:
+ intr_mode = RTE_INTR_MODE_MSIX;
+ dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX;
+ break;
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ intr_mode = RTE_INTR_MODE_MSI;
+ dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSI;
+ break;
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ intr_mode = RTE_INTR_MODE_LEGACY;
+ dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_LEGACY;
+ break;
+ default:
+ RTE_LOG(ERR, EAL, " unknown interrupt type!\n");
+ return -1;
+ }
+
+ return 0;
+ }
+
+ /* if we're here, we haven't found a suitable interrupt vector */
+ return -1;
+}
+
+static int
+pci_vfio_is_ioport_bar(int vfio_dev_fd, int bar_index)
+{
+ uint32_t ioport_bar;
+ int ret;
+
+ ret = pread64(vfio_dev_fd, &ioport_bar, sizeof(ioport_bar),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX)
+ + PCI_BASE_ADDRESS_0 + bar_index*4);
+ if (ret != sizeof(ioport_bar)) {
+ RTE_LOG(ERR, EAL, "Cannot read command (%x) from config space!\n",
+ PCI_BASE_ADDRESS_0 + bar_index*4);
+ return -1;
+ }
+
+ return (ioport_bar & PCI_BASE_ADDRESS_SPACE_IO) != 0;
+}
+
+static int
+pci_rte_vfio_setup_device(struct rte_pci_device *dev, int vfio_dev_fd)
+{
+ if (pci_vfio_setup_interrupts(dev, vfio_dev_fd) != 0) {
+ RTE_LOG(ERR, EAL, "Error setting up interrupts!\n");
+ return -1;
+ }
+
+ /* set bus mastering for the device */
+ if (pci_vfio_set_bus_master(vfio_dev_fd, true)) {
+ RTE_LOG(ERR, EAL, "Cannot set up bus mastering!\n");
+ return -1;
+ }
+
+ /*
+ * Reset the device. If the device is not capable of resetting,
+ * then it updates errno as EINVAL.
+ */
+ if (ioctl(vfio_dev_fd, VFIO_DEVICE_RESET) && errno != EINVAL) {
+ RTE_LOG(ERR, EAL, "Unable to reset device! Error: %d (%s)\n",
+ errno, strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+pci_vfio_mmap_bar(int vfio_dev_fd, struct mapped_pci_resource *vfio_res,
+ int bar_index, int additional_flags)
+{
+ struct memreg {
+ unsigned long offset, size;
+ } memreg[2] = {};
+ void *bar_addr;
+ struct pci_msix_table *msix_table = &vfio_res->msix_table;
+ struct pci_map *bar = &vfio_res->maps[bar_index];
+
+ if (bar->size == 0)
+ /* Skip this BAR */
+ return 0;
+
+ if (msix_table->bar_index == bar_index) {
+ /*
+ * VFIO will not let us map the MSI-X table,
+ * but we can map around it.
+ */
+ uint32_t table_start = msix_table->offset;
+ uint32_t table_end = table_start + msix_table->size;
+ table_end = (table_end + ~PAGE_MASK) & PAGE_MASK;
+ table_start &= PAGE_MASK;
+
+ if (table_start == 0 && table_end >= bar->size) {
+ /* Cannot map this BAR */
+ RTE_LOG(DEBUG, EAL, "Skipping BAR%d\n", bar_index);
+ bar->size = 0;
+ bar->addr = 0;
+ return 0;
+ }
+
+ memreg[0].offset = bar->offset;
+ memreg[0].size = table_start;
+ memreg[1].offset = bar->offset + table_end;
+ memreg[1].size = bar->size - table_end;
+
+ RTE_LOG(DEBUG, EAL,
+ "Trying to map BAR%d that contains the MSI-X "
+ "table. Trying offsets: "
+ "0x%04lx:0x%04lx, 0x%04lx:0x%04lx\n", bar_index,
+ memreg[0].offset, memreg[0].size,
+ memreg[1].offset, memreg[1].size);
+ } else {
+ memreg[0].offset = bar->offset;
+ memreg[0].size = bar->size;
+ }
+
+ /* reserve the address using an inaccessible mapping */
+ bar_addr = mmap(bar->addr, bar->size, 0, MAP_PRIVATE |
+ MAP_ANONYMOUS | additional_flags, -1, 0);
+ if (bar_addr != MAP_FAILED) {
+ void *map_addr = NULL;
+ if (memreg[0].size) {
+ /* actual map of first part */
+ map_addr = pci_map_resource(bar_addr, vfio_dev_fd,
+ memreg[0].offset,
+ memreg[0].size,
+ MAP_FIXED);
+ }
+
+ /* if there's a second part, try to map it */
+ if (map_addr != MAP_FAILED
+ && memreg[1].offset && memreg[1].size) {
+ void *second_addr = RTE_PTR_ADD(bar_addr,
+ memreg[1].offset -
+ (uintptr_t)bar->offset);
+ map_addr = pci_map_resource(second_addr,
+ vfio_dev_fd,
+ memreg[1].offset,
+ memreg[1].size,
+ MAP_FIXED);
+ }
+
+ if (map_addr == MAP_FAILED || !map_addr) {
+ munmap(bar_addr, bar->size);
+ bar_addr = MAP_FAILED;
+ RTE_LOG(ERR, EAL, "Failed to map pci BAR%d\n",
+ bar_index);
+ return -1;
+ }
+ } else {
+ RTE_LOG(ERR, EAL,
+ "Failed to create inaccessible mapping for BAR%d\n",
+ bar_index);
+ return -1;
+ }
+
+ bar->addr = bar_addr;
+ return 0;
+}
+
+static int
+pci_vfio_map_resource_primary(struct rte_pci_device *dev)
+{
+ struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
+ char pci_addr[PATH_MAX] = {0};
+ int vfio_dev_fd;
+ struct rte_pci_addr *loc = &dev->addr;
+ int i, ret;
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct mapped_pci_res_list *vfio_res_list =
+ RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+
+ struct pci_map *maps;
+
+ dev->intr_handle.fd = -1;
+
+ /* store PCI address string */
+ snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ ret = rte_vfio_setup_device(rte_pci_get_sysfs_path(), pci_addr,
+ &vfio_dev_fd, &device_info);
+ if (ret)
+ return ret;
+
+ /* allocate vfio_res and get region info */
+ vfio_res = rte_zmalloc("VFIO_RES", sizeof(*vfio_res), 0);
+ if (vfio_res == NULL) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot store uio mmap details\n", __func__);
+ goto err_vfio_dev_fd;
+ }
+ memcpy(&vfio_res->pci_addr, &dev->addr, sizeof(vfio_res->pci_addr));
+
+ /* get number of registers (up to BAR5) */
+ vfio_res->nb_maps = RTE_MIN((int) device_info.num_regions,
+ VFIO_PCI_BAR5_REGION_INDEX + 1);
+
+ /* map BARs */
+ maps = vfio_res->maps;
+
+ vfio_res->msix_table.bar_index = -1;
+ /* get MSI-X BAR, if any (we have to know where it is because we can't
+ * easily mmap it when using VFIO)
+ */
+ ret = pci_vfio_get_msix_bar(vfio_dev_fd, &vfio_res->msix_table);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, " %s cannot get MSI-X BAR number!\n",
+ pci_addr);
+ goto err_vfio_dev_fd;
+ }
+
+ for (i = 0; i < (int) vfio_res->nb_maps; i++) {
+ struct vfio_region_info reg = { .argsz = sizeof(reg) };
+ void *bar_addr;
+
+ reg.index = i;
+
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg);
+ if (ret) {
+ RTE_LOG(ERR, EAL, " %s cannot get device region info "
+ "error %i (%s)\n", pci_addr, errno, strerror(errno));
+ goto err_vfio_res;
+ }
+
+ /* chk for io port region */
+ ret = pci_vfio_is_ioport_bar(vfio_dev_fd, i);
+ if (ret < 0)
+ goto err_vfio_res;
+ else if (ret) {
+ RTE_LOG(INFO, EAL, "Ignore mapping IO port bar(%d)\n",
+ i);
+ continue;
+ }
+
+ /* skip non-mmapable BARs */
+ if ((reg.flags & VFIO_REGION_INFO_FLAG_MMAP) == 0)
+ continue;
+
+ /* try mapping somewhere close to the end of hugepages */
+ if (pci_map_addr == NULL)
+ pci_map_addr = pci_find_max_end_va();
+
+ bar_addr = pci_map_addr;
+ pci_map_addr = RTE_PTR_ADD(bar_addr, (size_t) reg.size);
+
+ maps[i].addr = bar_addr;
+ maps[i].offset = reg.offset;
+ maps[i].size = reg.size;
+ maps[i].path = NULL; /* vfio doesn't have per-resource paths */
+
+ ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, 0);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, " %s mapping BAR%i failed: %s\n",
+ pci_addr, i, strerror(errno));
+ goto err_vfio_res;
+ }
+
+ dev->mem_resource[i].addr = maps[i].addr;
+ }
+
+ if (pci_rte_vfio_setup_device(dev, vfio_dev_fd) < 0) {
+ RTE_LOG(ERR, EAL, " %s setup device failed\n", pci_addr);
+ goto err_vfio_res;
+ }
+
+ TAILQ_INSERT_TAIL(vfio_res_list, vfio_res, next);
+
+ return 0;
+err_vfio_res:
+ rte_free(vfio_res);
+err_vfio_dev_fd:
+ close(vfio_dev_fd);
+ return -1;
+}
+
+static int
+pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
+{
+ struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
+ char pci_addr[PATH_MAX] = {0};
+ int vfio_dev_fd;
+ struct rte_pci_addr *loc = &dev->addr;
+ int i, ret;
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct mapped_pci_res_list *vfio_res_list =
+ RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+
+ struct pci_map *maps;
+
+ dev->intr_handle.fd = -1;
+
+ /* store PCI address string */
+ snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ ret = rte_vfio_setup_device(rte_pci_get_sysfs_path(), pci_addr,
+ &vfio_dev_fd, &device_info);
+ if (ret)
+ return ret;
+
+ /* if we're in a secondary process, just find our tailq entry */
+ TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
+ if (rte_pci_addr_cmp(&vfio_res->pci_addr,
+ &dev->addr))
+ continue;
+ break;
+ }
+ /* if we haven't found our tailq entry, something's wrong */
+ if (vfio_res == NULL) {
+ RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
+ pci_addr);
+ goto err_vfio_dev_fd;
+ }
+
+ /* map BARs */
+ maps = vfio_res->maps;
+
+ for (i = 0; i < (int) vfio_res->nb_maps; i++) {
+ ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, MAP_FIXED);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, " %s mapping BAR%i failed: %s\n",
+ pci_addr, i, strerror(errno));
+ goto err_vfio_dev_fd;
+ }
+
+ dev->mem_resource[i].addr = maps[i].addr;
+ }
+
+ /* we need save vfio_dev_fd, so it can be used during release */
+ dev->intr_handle.vfio_dev_fd = vfio_dev_fd;
+
+ return 0;
+err_vfio_dev_fd:
+ close(vfio_dev_fd);
+ return -1;
+}
+
+/*
+ * map the PCI resources of a PCI device in virtual memory (VFIO version).
+ * primary and secondary processes follow almost exactly the same path
+ */
+int
+pci_vfio_map_resource(struct rte_pci_device *dev)
+{
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ return pci_vfio_map_resource_primary(dev);
+ else
+ return pci_vfio_map_resource_secondary(dev);
+}
+
+static struct mapped_pci_resource *
+find_and_unmap_vfio_resource(struct mapped_pci_res_list *vfio_res_list,
+ struct rte_pci_device *dev,
+ const char *pci_addr)
+{
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct pci_map *maps;
+ int i;
+
+ /* Get vfio_res */
+ TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
+ if (rte_pci_addr_cmp(&vfio_res->pci_addr, &dev->addr))
+ continue;
+ break;
+ }
+
+ if (vfio_res == NULL)
+ return vfio_res;
+
+ RTE_LOG(INFO, EAL, "Releasing pci mapped resource for %s\n",
+ pci_addr);
+
+ maps = vfio_res->maps;
+ for (i = 0; i < (int) vfio_res->nb_maps; i++) {
+
+ /*
+ * We do not need to be aware of MSI-X table BAR mappings as
+ * when mapping. Just using current maps array is enough
+ */
+ if (maps[i].addr) {
+ RTE_LOG(INFO, EAL, "Calling pci_unmap_resource for %s at %p\n",
+ pci_addr, maps[i].addr);
+ pci_unmap_resource(maps[i].addr, maps[i].size);
+ }
+ }
+
+ return vfio_res;
+}
+
+static int
+pci_vfio_unmap_resource_primary(struct rte_pci_device *dev)
+{
+ char pci_addr[PATH_MAX] = {0};
+ struct rte_pci_addr *loc = &dev->addr;
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct mapped_pci_res_list *vfio_res_list;
+ int ret;
+
+ /* store PCI address string */
+ snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ if (close(dev->intr_handle.fd) < 0) {
+ RTE_LOG(INFO, EAL, "Error when closing eventfd file descriptor for %s\n",
+ pci_addr);
+ return -1;
+ }
+
+ if (pci_vfio_set_bus_master(dev->intr_handle.vfio_dev_fd, false)) {
+ RTE_LOG(ERR, EAL, " %s cannot unset bus mastering for PCI device!\n",
+ pci_addr);
+ return -1;
+ }
+
+ ret = rte_vfio_release_device(rte_pci_get_sysfs_path(), pci_addr,
+ dev->intr_handle.vfio_dev_fd);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot release device\n", __func__);
+ return ret;
+ }
+
+ vfio_res_list =
+ RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+ vfio_res = find_and_unmap_vfio_resource(vfio_res_list, dev, pci_addr);
+
+ /* if we haven't found our tailq entry, something's wrong */
+ if (vfio_res == NULL) {
+ RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
+ pci_addr);
+ return -1;
+ }
+
+ TAILQ_REMOVE(vfio_res_list, vfio_res, next);
+
+ return 0;
+}
+
+static int
+pci_vfio_unmap_resource_secondary(struct rte_pci_device *dev)
+{
+ char pci_addr[PATH_MAX] = {0};
+ struct rte_pci_addr *loc = &dev->addr;
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct mapped_pci_res_list *vfio_res_list;
+ int ret;
+
+ /* store PCI address string */
+ snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ ret = rte_vfio_release_device(rte_pci_get_sysfs_path(), pci_addr,
+ dev->intr_handle.vfio_dev_fd);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot release device\n", __func__);
+ return ret;
+ }
+
+ vfio_res_list =
+ RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+ vfio_res = find_and_unmap_vfio_resource(vfio_res_list, dev, pci_addr);
+
+ /* if we haven't found our tailq entry, something's wrong */
+ if (vfio_res == NULL) {
+ RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
+ pci_addr);
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+pci_vfio_unmap_resource(struct rte_pci_device *dev)
+{
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ return pci_vfio_unmap_resource_primary(dev);
+ else
+ return pci_vfio_unmap_resource_secondary(dev);
+}
+
+int
+pci_vfio_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p)
+{
+ if (bar < VFIO_PCI_BAR0_REGION_INDEX ||
+ bar > VFIO_PCI_BAR5_REGION_INDEX) {
+ RTE_LOG(ERR, EAL, "invalid bar (%d)!\n", bar);
+ return -1;
+ }
+
+ p->dev = dev;
+ p->base = VFIO_GET_REGION_ADDR(bar);
+ return 0;
+}
+
+void
+pci_vfio_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset)
+{
+ const struct rte_intr_handle *intr_handle = &p->dev->intr_handle;
+
+ if (pread64(intr_handle->vfio_dev_fd, data,
+ len, p->base + offset) <= 0)
+ RTE_LOG(ERR, EAL,
+ "Can't read from PCI bar (%" PRIu64 ") : offset (%x)\n",
+ VFIO_GET_REGION_IDX(p->base), (int)offset);
+}
+
+void
+pci_vfio_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset)
+{
+ const struct rte_intr_handle *intr_handle = &p->dev->intr_handle;
+
+ if (pwrite64(intr_handle->vfio_dev_fd, data,
+ len, p->base + offset) <= 0)
+ RTE_LOG(ERR, EAL,
+ "Can't write to PCI bar (%" PRIu64 ") : offset (%x)\n",
+ VFIO_GET_REGION_IDX(p->base), (int)offset);
+}
+
+int
+pci_vfio_ioport_unmap(struct rte_pci_ioport *p)
+{
+ RTE_SET_USED(p);
+ return -1;
+}
+
+int
+pci_vfio_is_enabled(void)
+{
+ return rte_vfio_is_enabled("vfio_pci");
+}
+#endif
diff --git a/src/spdk/dpdk/drivers/bus/pci/meson.build b/src/spdk/dpdk/drivers/bus/pci/meson.build
new file mode 100644
index 00000000..72939e59
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+deps += ['pci']
+install_headers('rte_bus_pci.h')
+sources = files('pci_common.c', 'pci_common_uio.c')
+if host_machine.system() == 'linux'
+ sources += files('linux/pci.c',
+ 'linux/pci_uio.c',
+ 'linux/pci_vfio.c')
+ includes += include_directories('linux')
+ cflags += ['-D_GNU_SOURCE']
+else
+ sources += files('bsd/pci.c')
+ includes += include_directories('bsd')
+endif
+
+# memseg walk is not part of stable API yet
+allow_experimental_apis = true
diff --git a/src/spdk/dpdk/drivers/bus/pci/pci_common.c b/src/spdk/dpdk/drivers/bus/pci/pci_common.c
new file mode 100644
index 00000000..7736b3f9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/pci_common.c
@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation.
+ * Copyright 2013-2014 6WIND S.A.
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+
+#include <rte_errno.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_string_fns.h>
+#include <rte_common.h>
+#include <rte_devargs.h>
+
+#include "private.h"
+
+
+extern struct rte_pci_bus rte_pci_bus;
+
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+
+const char *rte_pci_get_sysfs_path(void)
+{
+ const char *path = NULL;
+
+ path = getenv("SYSFS_PCI_DEVICES");
+ if (path == NULL)
+ return SYSFS_PCI_DEVICES;
+
+ return path;
+}
+
+static struct rte_devargs *pci_devargs_lookup(struct rte_pci_device *dev)
+{
+ struct rte_devargs *devargs;
+ struct rte_pci_addr addr;
+
+ RTE_EAL_DEVARGS_FOREACH("pci", devargs) {
+ devargs->bus->parse(devargs->name, &addr);
+ if (!rte_pci_addr_cmp(&dev->addr, &addr))
+ return devargs;
+ }
+ return NULL;
+}
+
+void
+pci_name_set(struct rte_pci_device *dev)
+{
+ struct rte_devargs *devargs;
+
+ /* Each device has its internal, canonical name set. */
+ rte_pci_device_name(&dev->addr,
+ dev->name, sizeof(dev->name));
+ devargs = pci_devargs_lookup(dev);
+ dev->device.devargs = devargs;
+ /* In blacklist mode, if the device is not blacklisted, no
+ * rte_devargs exists for it.
+ */
+ if (devargs != NULL)
+ /* If an rte_devargs exists, the generic rte_device uses the
+ * given name as its name.
+ */
+ dev->device.name = dev->device.devargs->name;
+ else
+ /* Otherwise, it uses the internal, canonical form. */
+ dev->device.name = dev->name;
+}
+
+/*
+ * Match the PCI Driver and Device using the ID Table
+ */
+int
+rte_pci_match(const struct rte_pci_driver *pci_drv,
+ const struct rte_pci_device *pci_dev)
+{
+ const struct rte_pci_id *id_table;
+
+ for (id_table = pci_drv->id_table; id_table->vendor_id != 0;
+ id_table++) {
+ /* check if device's identifiers match the driver's ones */
+ if (id_table->vendor_id != pci_dev->id.vendor_id &&
+ id_table->vendor_id != PCI_ANY_ID)
+ continue;
+ if (id_table->device_id != pci_dev->id.device_id &&
+ id_table->device_id != PCI_ANY_ID)
+ continue;
+ if (id_table->subsystem_vendor_id !=
+ pci_dev->id.subsystem_vendor_id &&
+ id_table->subsystem_vendor_id != PCI_ANY_ID)
+ continue;
+ if (id_table->subsystem_device_id !=
+ pci_dev->id.subsystem_device_id &&
+ id_table->subsystem_device_id != PCI_ANY_ID)
+ continue;
+ if (id_table->class_id != pci_dev->id.class_id &&
+ id_table->class_id != RTE_CLASS_ANY_ID)
+ continue;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * If vendor/device ID match, call the probe() function of the
+ * driver.
+ */
+static int
+rte_pci_probe_one_driver(struct rte_pci_driver *dr,
+ struct rte_pci_device *dev)
+{
+ int ret;
+ struct rte_pci_addr *loc;
+
+ if ((dr == NULL) || (dev == NULL))
+ return -EINVAL;
+
+ loc = &dev->addr;
+
+ /* The device is not blacklisted; Check if driver supports it */
+ if (!rte_pci_match(dr, dev))
+ /* Match of device and driver failed */
+ return 1;
+
+ RTE_LOG(INFO, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
+ loc->domain, loc->bus, loc->devid, loc->function,
+ dev->device.numa_node);
+
+ /* no initialization when blacklisted, return without error */
+ if (dev->device.devargs != NULL &&
+ dev->device.devargs->policy ==
+ RTE_DEV_BLACKLISTED) {
+ RTE_LOG(INFO, EAL, " Device is blacklisted, not"
+ " initializing\n");
+ return 1;
+ }
+
+ if (dev->device.numa_node < 0) {
+ RTE_LOG(WARNING, EAL, " Invalid NUMA socket, default to 0\n");
+ dev->device.numa_node = 0;
+ }
+
+ RTE_LOG(INFO, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
+ dev->id.device_id, dr->driver.name);
+
+ /*
+ * reference driver structure
+ * This needs to be before rte_pci_map_device(), as it enables to use
+ * driver flags for adjusting configuration.
+ */
+ dev->driver = dr;
+ dev->device.driver = &dr->driver;
+
+ if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
+ /* map resources for devices that use igb_uio */
+ ret = rte_pci_map_device(dev);
+ if (ret != 0) {
+ dev->driver = NULL;
+ dev->device.driver = NULL;
+ return ret;
+ }
+ }
+
+ /* call the driver probe() function */
+ ret = dr->probe(dr, dev);
+ if (ret) {
+ dev->driver = NULL;
+ dev->device.driver = NULL;
+ if ((dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) &&
+ /* Don't unmap if device is unsupported and
+ * driver needs mapped resources.
+ */
+ !(ret > 0 &&
+ (dr->drv_flags & RTE_PCI_DRV_KEEP_MAPPED_RES)))
+ rte_pci_unmap_device(dev);
+ }
+
+ return ret;
+}
+
+/*
+ * If vendor/device ID match, call the remove() function of the
+ * driver.
+ */
+static int
+rte_pci_detach_dev(struct rte_pci_device *dev)
+{
+ struct rte_pci_addr *loc;
+ struct rte_pci_driver *dr;
+ int ret = 0;
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ dr = dev->driver;
+ loc = &dev->addr;
+
+ RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
+ loc->domain, loc->bus, loc->devid,
+ loc->function, dev->device.numa_node);
+
+ RTE_LOG(DEBUG, EAL, " remove driver: %x:%x %s\n", dev->id.vendor_id,
+ dev->id.device_id, dr->driver.name);
+
+ if (dr->remove) {
+ ret = dr->remove(dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* clear driver structure */
+ dev->driver = NULL;
+
+ if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING)
+ /* unmap resources for devices that use igb_uio */
+ rte_pci_unmap_device(dev);
+
+ return 0;
+}
+
+/*
+ * If vendor/device ID match, call the probe() function of all
+ * registered driver for the given device. Return -1 if initialization
+ * failed, return 1 if no driver is found for this device.
+ */
+static int
+pci_probe_all_drivers(struct rte_pci_device *dev)
+{
+ struct rte_pci_driver *dr = NULL;
+ int rc = 0;
+
+ if (dev == NULL)
+ return -1;
+
+ /* Check if a driver is already loaded */
+ if (dev->driver != NULL)
+ return 0;
+
+ FOREACH_DRIVER_ON_PCIBUS(dr) {
+ rc = rte_pci_probe_one_driver(dr, dev);
+ if (rc < 0)
+ /* negative value is an error */
+ return -1;
+ if (rc > 0)
+ /* positive value means driver doesn't support it */
+ continue;
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Scan the content of the PCI bus, and call the probe() function for
+ * all registered drivers that have a matching entry in its id_table
+ * for discovered devices.
+ */
+int
+rte_pci_probe(void)
+{
+ struct rte_pci_device *dev = NULL;
+ size_t probed = 0, failed = 0;
+ struct rte_devargs *devargs;
+ int probe_all = 0;
+ int ret = 0;
+
+ if (rte_pci_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST)
+ probe_all = 1;
+
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ probed++;
+
+ devargs = dev->device.devargs;
+ /* probe all or only whitelisted devices */
+ if (probe_all)
+ ret = pci_probe_all_drivers(dev);
+ else if (devargs != NULL &&
+ devargs->policy == RTE_DEV_WHITELISTED)
+ ret = pci_probe_all_drivers(dev);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Requested device " PCI_PRI_FMT
+ " cannot be used\n", dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+ rte_errno = errno;
+ failed++;
+ ret = 0;
+ }
+ }
+
+ return (probed && probed == failed) ? -1 : 0;
+}
+
+/* dump one device */
+static int
+pci_dump_one_device(FILE *f, struct rte_pci_device *dev)
+{
+ int i;
+
+ fprintf(f, PCI_PRI_FMT, dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+ fprintf(f, " - vendor:%x device:%x\n", dev->id.vendor_id,
+ dev->id.device_id);
+
+ for (i = 0; i != sizeof(dev->mem_resource) /
+ sizeof(dev->mem_resource[0]); i++) {
+ fprintf(f, " %16.16"PRIx64" %16.16"PRIx64"\n",
+ dev->mem_resource[i].phys_addr,
+ dev->mem_resource[i].len);
+ }
+ return 0;
+}
+
+/* dump devices on the bus */
+void
+rte_pci_dump(FILE *f)
+{
+ struct rte_pci_device *dev = NULL;
+
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ pci_dump_one_device(f, dev);
+ }
+}
+
+static int
+pci_parse(const char *name, void *addr)
+{
+ struct rte_pci_addr *out = addr;
+ struct rte_pci_addr pci_addr;
+ bool parse;
+
+ parse = (rte_pci_addr_parse(name, &pci_addr) == 0);
+ if (parse && addr != NULL)
+ *out = pci_addr;
+ return parse == false;
+}
+
+/* register a driver */
+void
+rte_pci_register(struct rte_pci_driver *driver)
+{
+ TAILQ_INSERT_TAIL(&rte_pci_bus.driver_list, driver, next);
+ driver->bus = &rte_pci_bus;
+}
+
+/* unregister a driver */
+void
+rte_pci_unregister(struct rte_pci_driver *driver)
+{
+ TAILQ_REMOVE(&rte_pci_bus.driver_list, driver, next);
+ driver->bus = NULL;
+}
+
+/* Add a device to PCI bus */
+void
+rte_pci_add_device(struct rte_pci_device *pci_dev)
+{
+ TAILQ_INSERT_TAIL(&rte_pci_bus.device_list, pci_dev, next);
+}
+
+/* Insert a device into a predefined position in PCI bus */
+void
+rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
+ struct rte_pci_device *new_pci_dev)
+{
+ TAILQ_INSERT_BEFORE(exist_pci_dev, new_pci_dev, next);
+}
+
+/* Remove a device from PCI bus */
+static void
+rte_pci_remove_device(struct rte_pci_device *pci_dev)
+{
+ TAILQ_REMOVE(&rte_pci_bus.device_list, pci_dev, next);
+}
+
+static struct rte_device *
+pci_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ const struct rte_pci_device *pstart;
+ struct rte_pci_device *pdev;
+
+ if (start != NULL) {
+ pstart = RTE_DEV_TO_PCI_CONST(start);
+ pdev = TAILQ_NEXT(pstart, next);
+ } else {
+ pdev = TAILQ_FIRST(&rte_pci_bus.device_list);
+ }
+ while (pdev != NULL) {
+ if (cmp(&pdev->device, data) == 0)
+ return &pdev->device;
+ pdev = TAILQ_NEXT(pdev, next);
+ }
+ return NULL;
+}
+
+static int
+pci_plug(struct rte_device *dev)
+{
+ return pci_probe_all_drivers(RTE_DEV_TO_PCI(dev));
+}
+
+static int
+pci_unplug(struct rte_device *dev)
+{
+ struct rte_pci_device *pdev;
+ int ret;
+
+ pdev = RTE_DEV_TO_PCI(dev);
+ ret = rte_pci_detach_dev(pdev);
+ if (ret == 0) {
+ rte_pci_remove_device(pdev);
+ free(pdev);
+ }
+ return ret;
+}
+
+struct rte_pci_bus rte_pci_bus = {
+ .bus = {
+ .scan = rte_pci_scan,
+ .probe = rte_pci_probe,
+ .find_device = pci_find_device,
+ .plug = pci_plug,
+ .unplug = pci_unplug,
+ .parse = pci_parse,
+ .get_iommu_class = rte_pci_get_iommu_class,
+ },
+ .device_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.device_list),
+ .driver_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.driver_list),
+};
+
+RTE_REGISTER_BUS(pci, rte_pci_bus.bus);
diff --git a/src/spdk/dpdk/drivers/bus/pci/pci_common_uio.c b/src/spdk/dpdk/drivers/bus/pci/pci_common_uio.c
new file mode 100644
index 00000000..c37fcacf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/pci_common_uio.c
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+#include <rte_eal.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+
+#include "private.h"
+
+static struct rte_tailq_elem rte_uio_tailq = {
+ .name = "UIO_RESOURCE_LIST",
+};
+EAL_REGISTER_TAILQ(rte_uio_tailq)
+
+static int
+pci_uio_map_secondary(struct rte_pci_device *dev)
+{
+ int fd, i, j;
+ struct mapped_pci_resource *uio_res;
+ struct mapped_pci_res_list *uio_res_list =
+ RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+
+ TAILQ_FOREACH(uio_res, uio_res_list, next) {
+
+ /* skip this element if it doesn't match our PCI address */
+ if (rte_pci_addr_cmp(&uio_res->pci_addr, &dev->addr))
+ continue;
+
+ for (i = 0; i != uio_res->nb_maps; i++) {
+ /*
+ * open devname, to mmap it
+ */
+ fd = open(uio_res->maps[i].path, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ uio_res->maps[i].path, strerror(errno));
+ return -1;
+ }
+
+ void *mapaddr = pci_map_resource(uio_res->maps[i].addr,
+ fd, (off_t)uio_res->maps[i].offset,
+ (size_t)uio_res->maps[i].size, 0);
+ /* fd is not needed in slave process, close it */
+ close(fd);
+ if (mapaddr != uio_res->maps[i].addr) {
+ RTE_LOG(ERR, EAL,
+ "Cannot mmap device resource file %s to address: %p\n",
+ uio_res->maps[i].path,
+ uio_res->maps[i].addr);
+ if (mapaddr != MAP_FAILED) {
+ /* unmap addrs correctly mapped */
+ for (j = 0; j < i; j++)
+ pci_unmap_resource(
+ uio_res->maps[j].addr,
+ (size_t)uio_res->maps[j].size);
+ /* unmap addr wrongly mapped */
+ pci_unmap_resource(mapaddr,
+ (size_t)uio_res->maps[i].size);
+ }
+ return -1;
+ }
+ dev->mem_resource[i].addr = mapaddr;
+ }
+ return 0;
+ }
+
+ RTE_LOG(ERR, EAL, "Cannot find resource for device\n");
+ return 1;
+}
+
+/* map the PCI resource of a PCI device in virtual memory */
+int
+pci_uio_map_resource(struct rte_pci_device *dev)
+{
+ int i, map_idx = 0, ret;
+ uint64_t phaddr;
+ struct mapped_pci_resource *uio_res = NULL;
+ struct mapped_pci_res_list *uio_res_list =
+ RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.uio_cfg_fd = -1;
+
+ /* secondary processes - use already recorded details */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return pci_uio_map_secondary(dev);
+
+ /* allocate uio resource */
+ ret = pci_uio_alloc_resource(dev, &uio_res);
+ if (ret)
+ return ret;
+
+ /* Map all BARs */
+ for (i = 0; i != PCI_MAX_RESOURCE; i++) {
+ /* skip empty BAR */
+ phaddr = dev->mem_resource[i].phys_addr;
+ if (phaddr == 0)
+ continue;
+
+ ret = pci_uio_map_resource_by_index(dev, i,
+ uio_res, map_idx);
+ if (ret)
+ goto error;
+
+ map_idx++;
+ }
+
+ uio_res->nb_maps = map_idx;
+
+ TAILQ_INSERT_TAIL(uio_res_list, uio_res, next);
+
+ return 0;
+error:
+ for (i = 0; i < map_idx; i++) {
+ pci_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+ rte_free(uio_res->maps[i].path);
+ }
+ pci_uio_free_resource(dev, uio_res);
+ return -1;
+}
+
+static void
+pci_uio_unmap(struct mapped_pci_resource *uio_res)
+{
+ int i;
+
+ if (uio_res == NULL)
+ return;
+
+ for (i = 0; i != uio_res->nb_maps; i++) {
+ pci_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(uio_res->maps[i].path);
+ }
+}
+
+static struct mapped_pci_resource *
+pci_uio_find_resource(struct rte_pci_device *dev)
+{
+ struct mapped_pci_resource *uio_res;
+ struct mapped_pci_res_list *uio_res_list =
+ RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+
+ if (dev == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(uio_res, uio_res_list, next) {
+
+ /* skip this element if it doesn't match our PCI address */
+ if (!rte_pci_addr_cmp(&uio_res->pci_addr, &dev->addr))
+ return uio_res;
+ }
+ return NULL;
+}
+
+/* unmap the PCI resource of a PCI device in virtual memory */
+void
+pci_uio_unmap_resource(struct rte_pci_device *dev)
+{
+ struct mapped_pci_resource *uio_res;
+ struct mapped_pci_res_list *uio_res_list =
+ RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+
+ if (dev == NULL)
+ return;
+
+ /* find an entry for the device */
+ uio_res = pci_uio_find_resource(dev);
+ if (uio_res == NULL)
+ return;
+
+ /* secondary processes - just free maps */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return pci_uio_unmap(uio_res);
+
+ TAILQ_REMOVE(uio_res_list, uio_res, next);
+
+ /* unmap all resources */
+ pci_uio_unmap(uio_res);
+
+ /* free uio resource */
+ rte_free(uio_res);
+
+ /* close fd if in primary process */
+ close(dev->intr_handle.fd);
+ if (dev->intr_handle.uio_cfg_fd >= 0) {
+ close(dev->intr_handle.uio_cfg_fd);
+ dev->intr_handle.uio_cfg_fd = -1;
+ }
+
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+}
diff --git a/src/spdk/dpdk/drivers/bus/pci/private.h b/src/spdk/dpdk/drivers/bus/pci/private.h
new file mode 100644
index 00000000..8ddd03e1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/private.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 6WIND S.A.
+ */
+
+#ifndef _PCI_PRIVATE_H_
+#define _PCI_PRIVATE_H_
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
+struct rte_pci_driver;
+struct rte_pci_device;
+
+/**
+ * Probe the PCI bus
+ *
+ * @return
+ * - 0 on success.
+ * - !0 on error.
+ */
+int
+rte_pci_probe(void);
+
+/**
+ * Scan the content of the PCI bus, and the devices in the devices
+ * list
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_pci_scan(void);
+
+/**
+ * Find the name of a PCI device.
+ */
+void
+pci_name_set(struct rte_pci_device *dev);
+
+/**
+ * Add a PCI device to the PCI Bus (append to PCI Device list). This function
+ * also updates the bus references of the PCI Device (and the generic device
+ * object embedded within.
+ *
+ * @param pci_dev
+ * PCI device to add
+ * @return void
+ */
+void rte_pci_add_device(struct rte_pci_device *pci_dev);
+
+/**
+ * Insert a PCI device in the PCI Bus at a particular location in the device
+ * list. It also updates the PCI Bus reference of the new devices to be
+ * inserted.
+ *
+ * @param exist_pci_dev
+ * Existing PCI device in PCI Bus
+ * @param new_pci_dev
+ * PCI device to be added before exist_pci_dev
+ * @return void
+ */
+void rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
+ struct rte_pci_device *new_pci_dev);
+
+/**
+ * Update a pci device object by asking the kernel for the latest information.
+ *
+ * This function is private to EAL.
+ *
+ * @param addr
+ * The PCI Bus-Device-Function address to look for
+ * @return
+ * - 0 on success.
+ * - negative on error.
+ */
+int pci_update_device(const struct rte_pci_addr *addr);
+
+/**
+ * Map the PCI resource of a PCI device in virtual memory
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int pci_uio_map_resource(struct rte_pci_device *dev);
+
+/**
+ * Unmap the PCI resource of a PCI device
+ *
+ * This function is private to EAL.
+ */
+void pci_uio_unmap_resource(struct rte_pci_device *dev);
+
+/**
+ * Allocate uio resource for PCI device
+ *
+ * This function is private to EAL.
+ *
+ * @param dev
+ * PCI device to allocate uio resource
+ * @param uio_res
+ * Pointer to uio resource.
+ * If the function returns 0, the pointer will be filled.
+ * @return
+ * 0 on success, negative on error
+ */
+int pci_uio_alloc_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource **uio_res);
+
+/**
+ * Free uio resource for PCI device
+ *
+ * This function is private to EAL.
+ *
+ * @param dev
+ * PCI device to free uio resource
+ * @param uio_res
+ * Pointer to uio resource.
+ */
+void pci_uio_free_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource *uio_res);
+
+/**
+ * Map device memory to uio resource
+ *
+ * This function is private to EAL.
+ *
+ * @param dev
+ * PCI device that has memory information.
+ * @param res_idx
+ * Memory resource index of the PCI device.
+ * @param uio_res
+ * uio resource that will keep mapping information.
+ * @param map_idx
+ * Mapping information index of the uio resource.
+ * @return
+ * 0 on success, negative on error
+ */
+int pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
+ struct mapped_pci_resource *uio_res, int map_idx);
+
+/*
+ * Match the PCI Driver and Device using the ID Table
+ *
+ * @param pci_drv
+ * PCI driver from which ID table would be extracted
+ * @param pci_dev
+ * PCI device to match against the driver
+ * @return
+ * 1 for successful match
+ * 0 for unsuccessful match
+ */
+int
+rte_pci_match(const struct rte_pci_driver *pci_drv,
+ const struct rte_pci_device *pci_dev);
+
+/**
+ * Get iommu class of PCI devices on the bus.
+ * And return their preferred iova mapping mode.
+ *
+ * @return
+ * - enum rte_iova_mode.
+ */
+enum rte_iova_mode
+rte_pci_get_iommu_class(void);
+
+#endif /* _PCI_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/pci/rte_bus_pci.h b/src/spdk/dpdk/drivers/bus/pci/rte_bus_pci.h
new file mode 100644
index 00000000..0d1955ff
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/rte_bus_pci.h
@@ -0,0 +1,315 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation.
+ * Copyright 2013-2014 6WIND S.A.
+ */
+
+#ifndef _RTE_BUS_PCI_H_
+#define _RTE_BUS_PCI_H_
+
+/**
+ * @file
+ *
+ * RTE PCI Bus Interface
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_dev.h>
+#include <rte_bus.h>
+#include <rte_pci.h>
+
+/** Pathname of PCI devices directory. */
+const char *rte_pci_get_sysfs_path(void);
+
+/* Forward declarations */
+struct rte_pci_device;
+struct rte_pci_driver;
+
+/** List of PCI devices */
+TAILQ_HEAD(rte_pci_device_list, rte_pci_device);
+/** List of PCI drivers */
+TAILQ_HEAD(rte_pci_driver_list, rte_pci_driver);
+
+/* PCI Bus iterators */
+#define FOREACH_DEVICE_ON_PCIBUS(p) \
+ TAILQ_FOREACH(p, &(rte_pci_bus.device_list), next)
+
+#define FOREACH_DRIVER_ON_PCIBUS(p) \
+ TAILQ_FOREACH(p, &(rte_pci_bus.driver_list), next)
+
+struct rte_devargs;
+
+/**
+ * A structure describing a PCI device.
+ */
+struct rte_pci_device {
+ TAILQ_ENTRY(rte_pci_device) next; /**< Next probed PCI device. */
+ struct rte_device device; /**< Inherit core device */
+ struct rte_pci_addr addr; /**< PCI location. */
+ struct rte_pci_id id; /**< PCI ID. */
+ struct rte_mem_resource mem_resource[PCI_MAX_RESOURCE];
+ /**< PCI Memory Resource */
+ struct rte_intr_handle intr_handle; /**< Interrupt handle */
+ struct rte_pci_driver *driver; /**< Associated driver */
+ uint16_t max_vfs; /**< sriov enable if not zero */
+ enum rte_kernel_driver kdrv; /**< Kernel driver passthrough */
+ char name[PCI_PRI_STR_SIZE+1]; /**< PCI location (ASCII) */
+};
+
+/**
+ * @internal
+ * Helper macro for drivers that need to convert to struct rte_pci_device.
+ */
+#define RTE_DEV_TO_PCI(ptr) container_of(ptr, struct rte_pci_device, device)
+
+#define RTE_DEV_TO_PCI_CONST(ptr) \
+ container_of(ptr, const struct rte_pci_device, device)
+
+#define RTE_ETH_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
+
+/** Any PCI device identifier (vendor, device, ...) */
+#define PCI_ANY_ID (0xffff)
+#define RTE_CLASS_ANY_ID (0xffffff)
+
+#ifdef __cplusplus
+/** C++ macro used to help building up tables of device IDs */
+#define RTE_PCI_DEVICE(vend, dev) \
+ RTE_CLASS_ANY_ID, \
+ (vend), \
+ (dev), \
+ PCI_ANY_ID, \
+ PCI_ANY_ID
+#else
+/** Macro used to help building up tables of device IDs */
+#define RTE_PCI_DEVICE(vend, dev) \
+ .class_id = RTE_CLASS_ANY_ID, \
+ .vendor_id = (vend), \
+ .device_id = (dev), \
+ .subsystem_vendor_id = PCI_ANY_ID, \
+ .subsystem_device_id = PCI_ANY_ID
+#endif
+
+/**
+ * Initialisation function for the driver called during PCI probing.
+ */
+typedef int (pci_probe_t)(struct rte_pci_driver *, struct rte_pci_device *);
+
+/**
+ * Uninitialisation function for the driver called during hotplugging.
+ */
+typedef int (pci_remove_t)(struct rte_pci_device *);
+
+/**
+ * A structure describing a PCI driver.
+ */
+struct rte_pci_driver {
+ TAILQ_ENTRY(rte_pci_driver) next; /**< Next in list. */
+ struct rte_driver driver; /**< Inherit core driver. */
+ struct rte_pci_bus *bus; /**< PCI bus reference. */
+ pci_probe_t *probe; /**< Device Probe function. */
+ pci_remove_t *remove; /**< Device Remove function. */
+ const struct rte_pci_id *id_table; /**< ID table, NULL terminated. */
+ uint32_t drv_flags; /**< Flags contolling handling of device. */
+};
+
+/**
+ * Structure describing the PCI bus
+ */
+struct rte_pci_bus {
+ struct rte_bus bus; /**< Inherit the generic class */
+ struct rte_pci_device_list device_list; /**< List of PCI devices */
+ struct rte_pci_driver_list driver_list; /**< List of PCI drivers */
+};
+
+/** Device needs PCI BAR mapping (done with either IGB_UIO or VFIO) */
+#define RTE_PCI_DRV_NEED_MAPPING 0x0001
+/** Device needs PCI BAR mapping with enabled write combining (wc) */
+#define RTE_PCI_DRV_WC_ACTIVATE 0x0002
+/** Device driver supports link state interrupt */
+#define RTE_PCI_DRV_INTR_LSC 0x0008
+/** Device driver supports device removal interrupt */
+#define RTE_PCI_DRV_INTR_RMV 0x0010
+/** Device driver needs to keep mapped resources if unsupported dev detected */
+#define RTE_PCI_DRV_KEEP_MAPPED_RES 0x0020
+/** Device driver supports IOVA as VA */
+#define RTE_PCI_DRV_IOVA_AS_VA 0X0040
+
+/**
+ * Map the PCI device resources in user space virtual memory address
+ *
+ * Note that driver should not call this function when flag
+ * RTE_PCI_DRV_NEED_MAPPING is set, as EAL will do that for
+ * you when it's on.
+ *
+ * @param dev
+ * A pointer to a rte_pci_device structure describing the device
+ * to use
+ *
+ * @return
+ * 0 on success, negative on error and positive if no driver
+ * is found for the device.
+ */
+int rte_pci_map_device(struct rte_pci_device *dev);
+
+/**
+ * Unmap this device
+ *
+ * @param dev
+ * A pointer to a rte_pci_device structure describing the device
+ * to use
+ */
+void rte_pci_unmap_device(struct rte_pci_device *dev);
+
+/**
+ * Dump the content of the PCI bus.
+ *
+ * @param f
+ * A pointer to a file for output
+ */
+void rte_pci_dump(FILE *f);
+
+/**
+ * Register a PCI driver.
+ *
+ * @param driver
+ * A pointer to a rte_pci_driver structure describing the driver
+ * to be registered.
+ */
+void rte_pci_register(struct rte_pci_driver *driver);
+
+/** Helper for PCI device registration from driver (eth, crypto) instance */
+#define RTE_PMD_REGISTER_PCI(nm, pci_drv) \
+RTE_INIT(pciinitfn_ ##nm) \
+{\
+ (pci_drv).driver.name = RTE_STR(nm);\
+ rte_pci_register(&pci_drv); \
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+/**
+ * Unregister a PCI driver.
+ *
+ * @param driver
+ * A pointer to a rte_pci_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_pci_unregister(struct rte_pci_driver *driver);
+
+/**
+ * Read PCI config space.
+ *
+ * @param device
+ * A pointer to a rte_pci_device structure describing the device
+ * to use
+ * @param buf
+ * A data buffer where the bytes should be read into
+ * @param len
+ * The length of the data buffer.
+ * @param offset
+ * The offset into PCI config space
+ */
+int rte_pci_read_config(const struct rte_pci_device *device,
+ void *buf, size_t len, off_t offset);
+
+/**
+ * Write PCI config space.
+ *
+ * @param device
+ * A pointer to a rte_pci_device structure describing the device
+ * to use
+ * @param buf
+ * A data buffer containing the bytes should be written
+ * @param len
+ * The length of the data buffer.
+ * @param offset
+ * The offset into PCI config space
+ */
+int rte_pci_write_config(const struct rte_pci_device *device,
+ const void *buf, size_t len, off_t offset);
+
+/**
+ * A structure used to access io resources for a pci device.
+ * rte_pci_ioport is arch, os, driver specific, and should not be used outside
+ * of pci ioport api.
+ */
+struct rte_pci_ioport {
+ struct rte_pci_device *dev;
+ uint64_t base;
+ uint64_t len; /* only filled for memory mapped ports */
+};
+
+/**
+ * Initialize a rte_pci_ioport object for a pci device io resource.
+ *
+ * This object is then used to gain access to those io resources (see below).
+ *
+ * @param dev
+ * A pointer to a rte_pci_device structure describing the device
+ * to use.
+ * @param bar
+ * Index of the io pci resource we want to access.
+ * @param p
+ * The rte_pci_ioport object to be initialized.
+ * @return
+ * 0 on success, negative on error.
+ */
+int rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p);
+
+/**
+ * Release any resources used in a rte_pci_ioport object.
+ *
+ * @param p
+ * The rte_pci_ioport object to be uninitialized.
+ * @return
+ * 0 on success, negative on error.
+ */
+int rte_pci_ioport_unmap(struct rte_pci_ioport *p);
+
+/**
+ * Read from a io pci resource.
+ *
+ * @param p
+ * The rte_pci_ioport object from which we want to read.
+ * @param data
+ * A data buffer where the bytes should be read into
+ * @param len
+ * The length of the data buffer.
+ * @param offset
+ * The offset into the pci io resource.
+ */
+void rte_pci_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset);
+
+/**
+ * Write to a io pci resource.
+ *
+ * @param p
+ * The rte_pci_ioport object to which we want to write.
+ * @param data
+ * A data buffer where the bytes should be read into
+ * @param len
+ * The length of the data buffer.
+ * @param offset
+ * The offset into the pci io resource.
+ */
+void rte_pci_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BUS_PCI_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/pci/rte_bus_pci_version.map b/src/spdk/dpdk/drivers/bus/pci/rte_bus_pci_version.map
new file mode 100644
index 00000000..27e9c4f1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/pci/rte_bus_pci_version.map
@@ -0,0 +1,18 @@
+DPDK_17.11 {
+ global:
+
+ rte_pci_dump;
+ rte_pci_get_sysfs_path;
+ rte_pci_ioport_map;
+ rte_pci_ioport_read;
+ rte_pci_ioport_unmap;
+ rte_pci_ioport_write;
+ rte_pci_map_device;
+ rte_pci_read_config;
+ rte_pci_register;
+ rte_pci_unmap_device;
+ rte_pci_unregister;
+ rte_pci_write_config;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/bus/vdev/Makefile b/src/spdk/dpdk/drivers/bus/vdev/Makefile
new file mode 100644
index 00000000..bd0bb895
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vdev/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_bus_vdev.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# versioning export map
+EXPORT_MAP := rte_bus_vdev_version.map
+
+# library version
+LIBABIVER := 1
+
+SRCS-y += vdev.c
+
+LDLIBS += -lrte_eal
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_bus_vdev.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/bus/vdev/meson.build b/src/spdk/dpdk/drivers/bus/vdev/meson.build
new file mode 100644
index 00000000..2ee648b4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vdev/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = files('vdev.c')
+install_headers('rte_bus_vdev.h')
+
+allow_experimental_apis = true
diff --git a/src/spdk/dpdk/drivers/bus/vdev/rte_bus_vdev.h b/src/spdk/dpdk/drivers/bus/vdev/rte_bus_vdev.h
new file mode 100644
index 00000000..9ae3eaae
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vdev/rte_bus_vdev.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
+ */
+
+#ifndef RTE_VDEV_H
+#define RTE_VDEV_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/queue.h>
+#include <rte_dev.h>
+#include <rte_devargs.h>
+
+struct rte_vdev_device {
+ TAILQ_ENTRY(rte_vdev_device) next; /**< Next attached vdev */
+ struct rte_device device; /**< Inherit core device */
+};
+
+/**
+ * @internal
+ * Helper macro for drivers that need to convert to struct rte_vdev_device.
+ */
+#define RTE_DEV_TO_VDEV(ptr) \
+ container_of(ptr, struct rte_vdev_device, device)
+
+#define RTE_DEV_TO_VDEV_CONST(ptr) \
+ container_of(ptr, const struct rte_vdev_device, device)
+
+static inline const char *
+rte_vdev_device_name(const struct rte_vdev_device *dev)
+{
+ if (dev && dev->device.name)
+ return dev->device.name;
+ return NULL;
+}
+
+static inline const char *
+rte_vdev_device_args(const struct rte_vdev_device *dev)
+{
+ if (dev && dev->device.devargs)
+ return dev->device.devargs->args;
+ return "";
+}
+
+/** Double linked list of virtual device drivers. */
+TAILQ_HEAD(vdev_driver_list, rte_vdev_driver);
+
+/**
+ * Probe function called for each virtual device driver once.
+ */
+typedef int (rte_vdev_probe_t)(struct rte_vdev_device *dev);
+
+/**
+ * Remove function called for each virtual device driver once.
+ */
+typedef int (rte_vdev_remove_t)(struct rte_vdev_device *dev);
+
+/**
+ * A virtual device driver abstraction.
+ */
+struct rte_vdev_driver {
+ TAILQ_ENTRY(rte_vdev_driver) next; /**< Next in list. */
+ struct rte_driver driver; /**< Inherited general driver. */
+ rte_vdev_probe_t *probe; /**< Virtual device probe function. */
+ rte_vdev_remove_t *remove; /**< Virtual device remove function. */
+};
+
+/**
+ * Register a virtual device driver.
+ *
+ * @param driver
+ * A pointer to a rte_vdev_driver structure describing the driver
+ * to be registered.
+ */
+void rte_vdev_register(struct rte_vdev_driver *driver);
+
+/**
+ * Unregister a virtual device driver.
+ *
+ * @param driver
+ * A pointer to a rte_vdev_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_vdev_unregister(struct rte_vdev_driver *driver);
+
+#define RTE_PMD_REGISTER_VDEV(nm, vdrv)\
+static const char *vdrvinit_ ## nm ## _alias;\
+RTE_INIT(vdrvinitfn_ ##vdrv)\
+{\
+ (vdrv).driver.name = RTE_STR(nm);\
+ (vdrv).driver.alias = vdrvinit_ ## nm ## _alias;\
+ rte_vdev_register(&vdrv);\
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+#define RTE_PMD_REGISTER_ALIAS(nm, alias)\
+static const char *vdrvinit_ ## nm ## _alias = RTE_STR(alias)
+
+typedef void (*rte_vdev_scan_callback)(void *user_arg);
+
+/**
+ * Add a callback to be called on vdev scan
+ * before reading the devargs list.
+ *
+ * This function cannot be called in a scan callback
+ * because of deadlock.
+ *
+ * @param callback
+ * The function to be called which can update the devargs list.
+ * @param user_arg
+ * An opaque pointer passed to callback.
+ * @return
+ * 0 on success, negative on error
+ */
+int
+rte_vdev_add_custom_scan(rte_vdev_scan_callback callback, void *user_arg);
+
+/**
+ * Remove a registered scan callback.
+ *
+ * This function cannot be called in a scan callback
+ * because of deadlock.
+ *
+ * @param callback
+ * The registered function to be removed.
+ * @param user_arg
+ * The associated opaque pointer or (void*)-1 for any.
+ * @return
+ * 0 on success
+ */
+int
+rte_vdev_remove_custom_scan(rte_vdev_scan_callback callback, void *user_arg);
+
+/**
+ * Initialize a driver specified by name.
+ *
+ * @param name
+ * The pointer to a driver name to be initialized.
+ * @param args
+ * The pointer to arguments used by driver initialization.
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_vdev_init(const char *name, const char *args);
+
+/**
+ * Uninitalize a driver specified by name.
+ *
+ * @param name
+ * The pointer to a driver name to be initialized.
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_vdev_uninit(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/spdk/dpdk/drivers/bus/vdev/rte_bus_vdev_version.map b/src/spdk/dpdk/drivers/bus/vdev/rte_bus_vdev_version.map
new file mode 100644
index 00000000..590cf9b4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vdev/rte_bus_vdev_version.map
@@ -0,0 +1,18 @@
+DPDK_17.11 {
+ global:
+
+ rte_vdev_init;
+ rte_vdev_register;
+ rte_vdev_uninit;
+ rte_vdev_unregister;
+
+ local: *;
+};
+
+DPDK_18.02 {
+ global:
+
+ rte_vdev_add_custom_scan;
+ rte_vdev_remove_custom_scan;
+
+} DPDK_17.11;
diff --git a/src/spdk/dpdk/drivers/bus/vdev/vdev.c b/src/spdk/dpdk/drivers/bus/vdev/vdev.c
new file mode 100644
index 00000000..1cdffd43
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vdev/vdev.c
@@ -0,0 +1,549 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <sys/queue.h>
+
+#include <rte_eal.h>
+#include <rte_dev.h>
+#include <rte_bus.h>
+#include <rte_common.h>
+#include <rte_devargs.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+
+#include "rte_bus_vdev.h"
+#include "vdev_logs.h"
+
+#define VDEV_MP_KEY "bus_vdev_mp"
+
+int vdev_logtype_bus;
+
+/* Forward declare to access virtual bus name */
+static struct rte_bus rte_vdev_bus;
+
+/** Double linked list of virtual device drivers. */
+TAILQ_HEAD(vdev_device_list, rte_vdev_device);
+
+static struct vdev_device_list vdev_device_list =
+ TAILQ_HEAD_INITIALIZER(vdev_device_list);
+/* The lock needs to be recursive because a vdev can manage another vdev. */
+static rte_spinlock_recursive_t vdev_device_list_lock =
+ RTE_SPINLOCK_RECURSIVE_INITIALIZER;
+
+struct vdev_driver_list vdev_driver_list =
+ TAILQ_HEAD_INITIALIZER(vdev_driver_list);
+
+struct vdev_custom_scan {
+ TAILQ_ENTRY(vdev_custom_scan) next;
+ rte_vdev_scan_callback callback;
+ void *user_arg;
+};
+TAILQ_HEAD(vdev_custom_scans, vdev_custom_scan);
+static struct vdev_custom_scans vdev_custom_scans =
+ TAILQ_HEAD_INITIALIZER(vdev_custom_scans);
+static rte_spinlock_t vdev_custom_scan_lock = RTE_SPINLOCK_INITIALIZER;
+
+/* register a driver */
+void
+rte_vdev_register(struct rte_vdev_driver *driver)
+{
+ TAILQ_INSERT_TAIL(&vdev_driver_list, driver, next);
+}
+
+/* unregister a driver */
+void
+rte_vdev_unregister(struct rte_vdev_driver *driver)
+{
+ TAILQ_REMOVE(&vdev_driver_list, driver, next);
+}
+
+int
+rte_vdev_add_custom_scan(rte_vdev_scan_callback callback, void *user_arg)
+{
+ struct vdev_custom_scan *custom_scan;
+
+ rte_spinlock_lock(&vdev_custom_scan_lock);
+
+ /* check if already registered */
+ TAILQ_FOREACH(custom_scan, &vdev_custom_scans, next) {
+ if (custom_scan->callback == callback &&
+ custom_scan->user_arg == user_arg)
+ break;
+ }
+
+ if (custom_scan == NULL) {
+ custom_scan = malloc(sizeof(struct vdev_custom_scan));
+ if (custom_scan != NULL) {
+ custom_scan->callback = callback;
+ custom_scan->user_arg = user_arg;
+ TAILQ_INSERT_TAIL(&vdev_custom_scans, custom_scan, next);
+ }
+ }
+
+ rte_spinlock_unlock(&vdev_custom_scan_lock);
+
+ return (custom_scan == NULL) ? -1 : 0;
+}
+
+int
+rte_vdev_remove_custom_scan(rte_vdev_scan_callback callback, void *user_arg)
+{
+ struct vdev_custom_scan *custom_scan, *tmp_scan;
+
+ rte_spinlock_lock(&vdev_custom_scan_lock);
+ TAILQ_FOREACH_SAFE(custom_scan, &vdev_custom_scans, next, tmp_scan) {
+ if (custom_scan->callback != callback ||
+ (custom_scan->user_arg != (void *)-1 &&
+ custom_scan->user_arg != user_arg))
+ continue;
+ TAILQ_REMOVE(&vdev_custom_scans, custom_scan, next);
+ free(custom_scan);
+ }
+ rte_spinlock_unlock(&vdev_custom_scan_lock);
+
+ return 0;
+}
+
+static int
+vdev_parse(const char *name, void *addr)
+{
+ struct rte_vdev_driver **out = addr;
+ struct rte_vdev_driver *driver = NULL;
+
+ TAILQ_FOREACH(driver, &vdev_driver_list, next) {
+ if (strncmp(driver->driver.name, name,
+ strlen(driver->driver.name)) == 0)
+ break;
+ if (driver->driver.alias &&
+ strncmp(driver->driver.alias, name,
+ strlen(driver->driver.alias)) == 0)
+ break;
+ }
+ if (driver != NULL &&
+ addr != NULL)
+ *out = driver;
+ return driver == NULL;
+}
+
+static int
+vdev_probe_all_drivers(struct rte_vdev_device *dev)
+{
+ const char *name;
+ struct rte_vdev_driver *driver;
+ int ret;
+
+ name = rte_vdev_device_name(dev);
+
+ VDEV_LOG(DEBUG, "Search driver %s to probe device %s", name,
+ rte_vdev_device_name(dev));
+
+ if (vdev_parse(name, &driver))
+ return -1;
+ dev->device.driver = &driver->driver;
+ ret = driver->probe(dev);
+ if (ret)
+ dev->device.driver = NULL;
+ return ret;
+}
+
+/* The caller shall be responsible for thread-safe */
+static struct rte_vdev_device *
+find_vdev(const char *name)
+{
+ struct rte_vdev_device *dev;
+
+ if (!name)
+ return NULL;
+
+ TAILQ_FOREACH(dev, &vdev_device_list, next) {
+ const char *devname = rte_vdev_device_name(dev);
+
+ if (!strcmp(devname, name))
+ return dev;
+ }
+
+ return NULL;
+}
+
+static struct rte_devargs *
+alloc_devargs(const char *name, const char *args)
+{
+ struct rte_devargs *devargs;
+ int ret;
+
+ devargs = calloc(1, sizeof(*devargs));
+ if (!devargs)
+ return NULL;
+
+ devargs->bus = &rte_vdev_bus;
+ if (args)
+ devargs->args = strdup(args);
+ else
+ devargs->args = strdup("");
+
+ ret = snprintf(devargs->name, sizeof(devargs->name), "%s", name);
+ if (ret < 0 || ret >= (int)sizeof(devargs->name)) {
+ free(devargs->args);
+ free(devargs);
+ return NULL;
+ }
+
+ return devargs;
+}
+
+static int
+insert_vdev(const char *name, const char *args, struct rte_vdev_device **p_dev)
+{
+ struct rte_vdev_device *dev;
+ struct rte_devargs *devargs;
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ devargs = alloc_devargs(name, args);
+ if (!devargs)
+ return -ENOMEM;
+
+ dev = calloc(1, sizeof(*dev));
+ if (!dev) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ dev->device.devargs = devargs;
+ dev->device.numa_node = SOCKET_ID_ANY;
+ dev->device.name = devargs->name;
+
+ if (find_vdev(name)) {
+ ret = -EEXIST;
+ goto fail;
+ }
+
+ TAILQ_INSERT_TAIL(&vdev_device_list, dev, next);
+ rte_devargs_insert(devargs);
+
+ if (p_dev)
+ *p_dev = dev;
+
+ return 0;
+fail:
+ free(devargs->args);
+ free(devargs);
+ free(dev);
+ return ret;
+}
+
+int
+rte_vdev_init(const char *name, const char *args)
+{
+ struct rte_vdev_device *dev;
+ struct rte_devargs *devargs;
+ int ret;
+
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+ ret = insert_vdev(name, args, &dev);
+ if (ret == 0) {
+ ret = vdev_probe_all_drivers(dev);
+ if (ret) {
+ if (ret > 0)
+ VDEV_LOG(ERR, "no driver found for %s", name);
+ /* If fails, remove it from vdev list */
+ devargs = dev->device.devargs;
+ TAILQ_REMOVE(&vdev_device_list, dev, next);
+ rte_devargs_remove(devargs->bus->name, devargs->name);
+ free(dev);
+ }
+ }
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+ return ret;
+}
+
+static int
+vdev_remove_driver(struct rte_vdev_device *dev)
+{
+ const char *name = rte_vdev_device_name(dev);
+ const struct rte_vdev_driver *driver;
+
+ if (!dev->device.driver) {
+ VDEV_LOG(DEBUG, "no driver attach to device %s", name);
+ return 1;
+ }
+
+ driver = container_of(dev->device.driver, const struct rte_vdev_driver,
+ driver);
+ return driver->remove(dev);
+}
+
+int
+rte_vdev_uninit(const char *name)
+{
+ struct rte_vdev_device *dev;
+ struct rte_devargs *devargs;
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+
+ dev = find_vdev(name);
+ if (!dev) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ ret = vdev_remove_driver(dev);
+ if (ret)
+ goto unlock;
+
+ TAILQ_REMOVE(&vdev_device_list, dev, next);
+ devargs = dev->device.devargs;
+ rte_devargs_remove(devargs->bus->name, devargs->name);
+ free(dev);
+
+unlock:
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+ return ret;
+}
+
+struct vdev_param {
+#define VDEV_SCAN_REQ 1
+#define VDEV_SCAN_ONE 2
+#define VDEV_SCAN_REP 3
+ int type;
+ int num;
+ char name[RTE_DEV_NAME_MAX_LEN];
+};
+
+static int vdev_plug(struct rte_device *dev);
+
+/**
+ * This function works as the action for both primary and secondary process
+ * for static vdev discovery when a secondary process is booting.
+ *
+ * step 1, secondary process sends a sync request to ask for vdev in primary;
+ * step 2, primary process receives the request, and send vdevs one by one;
+ * step 3, primary process sends back reply, which indicates how many vdevs
+ * are sent.
+ */
+static int
+vdev_action(const struct rte_mp_msg *mp_msg, const void *peer)
+{
+ struct rte_vdev_device *dev;
+ struct rte_mp_msg mp_resp;
+ struct vdev_param *ou = (struct vdev_param *)&mp_resp.param;
+ const struct vdev_param *in = (const struct vdev_param *)mp_msg->param;
+ const char *devname;
+ int num;
+
+ strlcpy(mp_resp.name, VDEV_MP_KEY, sizeof(mp_resp.name));
+ mp_resp.len_param = sizeof(*ou);
+ mp_resp.num_fds = 0;
+
+ switch (in->type) {
+ case VDEV_SCAN_REQ:
+ ou->type = VDEV_SCAN_ONE;
+ ou->num = 1;
+ num = 0;
+
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+ TAILQ_FOREACH(dev, &vdev_device_list, next) {
+ devname = rte_vdev_device_name(dev);
+ if (strlen(devname) == 0) {
+ VDEV_LOG(INFO, "vdev with no name is not sent");
+ continue;
+ }
+ VDEV_LOG(INFO, "send vdev, %s", devname);
+ strlcpy(ou->name, devname, RTE_DEV_NAME_MAX_LEN);
+ if (rte_mp_sendmsg(&mp_resp) < 0)
+ VDEV_LOG(ERR, "send vdev, %s, failed, %s",
+ devname, strerror(rte_errno));
+ num++;
+ }
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+
+ ou->type = VDEV_SCAN_REP;
+ ou->num = num;
+ if (rte_mp_reply(&mp_resp, peer) < 0)
+ VDEV_LOG(ERR, "Failed to reply a scan request");
+ break;
+ case VDEV_SCAN_ONE:
+ VDEV_LOG(INFO, "receive vdev, %s", in->name);
+ if (insert_vdev(in->name, NULL, NULL) < 0)
+ VDEV_LOG(ERR, "failed to add vdev, %s", in->name);
+ break;
+ default:
+ VDEV_LOG(ERR, "vdev cannot recognize this message");
+ }
+
+ return 0;
+}
+
+static int
+vdev_scan(void)
+{
+ struct rte_vdev_device *dev;
+ struct rte_devargs *devargs;
+ struct vdev_custom_scan *custom_scan;
+
+ if (rte_mp_action_register(VDEV_MP_KEY, vdev_action) < 0 &&
+ rte_errno != EEXIST) {
+ VDEV_LOG(ERR, "Failed to add vdev mp action");
+ return -1;
+ }
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ struct rte_mp_msg mp_req, *mp_rep;
+ struct rte_mp_reply mp_reply;
+ struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
+ struct vdev_param *req = (struct vdev_param *)mp_req.param;
+ struct vdev_param *resp;
+
+ strlcpy(mp_req.name, VDEV_MP_KEY, sizeof(mp_req.name));
+ mp_req.len_param = sizeof(*req);
+ mp_req.num_fds = 0;
+ req->type = VDEV_SCAN_REQ;
+ if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
+ mp_reply.nb_received == 1) {
+ mp_rep = &mp_reply.msgs[0];
+ resp = (struct vdev_param *)mp_rep->param;
+ VDEV_LOG(INFO, "Received %d vdevs", resp->num);
+ free(mp_reply.msgs);
+ } else
+ VDEV_LOG(ERR, "Failed to request vdev from primary");
+
+ /* Fall through to allow private vdevs in secondary process */
+ }
+
+ /* call custom scan callbacks if any */
+ rte_spinlock_lock(&vdev_custom_scan_lock);
+ TAILQ_FOREACH(custom_scan, &vdev_custom_scans, next) {
+ if (custom_scan->callback != NULL)
+ /*
+ * the callback should update devargs list
+ * by calling rte_devargs_insert() with
+ * devargs.bus = rte_bus_find_by_name("vdev");
+ * devargs.type = RTE_DEVTYPE_VIRTUAL;
+ * devargs.policy = RTE_DEV_WHITELISTED;
+ */
+ custom_scan->callback(custom_scan->user_arg);
+ }
+ rte_spinlock_unlock(&vdev_custom_scan_lock);
+
+ /* for virtual devices we scan the devargs_list populated via cmdline */
+ RTE_EAL_DEVARGS_FOREACH("vdev", devargs) {
+
+ dev = calloc(1, sizeof(*dev));
+ if (!dev)
+ return -1;
+
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+
+ if (find_vdev(devargs->name)) {
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+ free(dev);
+ continue;
+ }
+
+ dev->device.devargs = devargs;
+ dev->device.numa_node = SOCKET_ID_ANY;
+ dev->device.name = devargs->name;
+
+ TAILQ_INSERT_TAIL(&vdev_device_list, dev, next);
+
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+ }
+
+ return 0;
+}
+
+static int
+vdev_probe(void)
+{
+ struct rte_vdev_device *dev;
+ int ret = 0;
+
+ /* call the init function for each virtual device */
+ TAILQ_FOREACH(dev, &vdev_device_list, next) {
+ /* we don't use the vdev lock here, as it's only used in DPDK
+ * initialization; and we don't want to hold such a lock when
+ * we call each driver probe.
+ */
+
+ if (dev->device.driver)
+ continue;
+
+ if (vdev_probe_all_drivers(dev)) {
+ VDEV_LOG(ERR, "failed to initialize %s device",
+ rte_vdev_device_name(dev));
+ ret = -1;
+ }
+ }
+
+ return ret;
+}
+
+static struct rte_device *
+vdev_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ const struct rte_vdev_device *vstart;
+ struct rte_vdev_device *dev;
+
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+ if (start != NULL) {
+ vstart = RTE_DEV_TO_VDEV_CONST(start);
+ dev = TAILQ_NEXT(vstart, next);
+ } else {
+ dev = TAILQ_FIRST(&vdev_device_list);
+ }
+ while (dev != NULL) {
+ if (cmp(&dev->device, data) == 0)
+ break;
+ dev = TAILQ_NEXT(dev, next);
+ }
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+
+ return dev ? &dev->device : NULL;
+}
+
+static int
+vdev_plug(struct rte_device *dev)
+{
+ return vdev_probe_all_drivers(RTE_DEV_TO_VDEV(dev));
+}
+
+static int
+vdev_unplug(struct rte_device *dev)
+{
+ return rte_vdev_uninit(dev->name);
+}
+
+static struct rte_bus rte_vdev_bus = {
+ .scan = vdev_scan,
+ .probe = vdev_probe,
+ .find_device = vdev_find_device,
+ .plug = vdev_plug,
+ .unplug = vdev_unplug,
+ .parse = vdev_parse,
+};
+
+RTE_REGISTER_BUS(vdev, rte_vdev_bus);
+
+RTE_INIT(vdev_init_log)
+{
+ vdev_logtype_bus = rte_log_register("bus.vdev");
+ if (vdev_logtype_bus >= 0)
+ rte_log_set_level(vdev_logtype_bus, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/bus/vdev/vdev_logs.h b/src/spdk/dpdk/drivers/bus/vdev/vdev_logs.h
new file mode 100644
index 00000000..87593741
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vdev/vdev_logs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _VDEV_LOGS_H_
+#define _VDEV_LOGS_H_
+
+#include <rte_log.h>
+
+extern int vdev_logtype_bus;
+
+#define VDEV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, vdev_logtype_bus, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#endif /* _VDEV_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/Makefile b/src/spdk/dpdk/drivers/bus/vmbus/Makefile
new file mode 100644
index 00000000..deee9dd1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+LIB = librte_bus_vmbus.a
+LIBABIVER := 1
+EXPORT_MAP := rte_bus_vmbus_version.map
+
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),)
+SYSTEM := linux
+endif
+ifneq ($(CONFIG_RTE_EXEC_ENV_BSDAPP),)
+$(error "VMBUS not implemented for BSD yet")
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/vmbus/$(SYSTEM)
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/$(SYSTEM)app/eal
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev
+
+include $(RTE_SDK)/drivers/bus/vmbus/$(SYSTEM)/Makefile
+SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) := $(addprefix $(SYSTEM)/,$(SRCS))
+SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus_channel.c vmbus_bufring.c
+SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus_common_uio.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_VMBUS)-include += rte_bus_vmbus.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_VMBUS)-include += rte_vmbus_reg.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/linux/Makefile b/src/spdk/dpdk/drivers/bus/vmbus/linux/Makefile
new file mode 100644
index 00000000..ef0d30b2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/linux/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+SRCS += vmbus_bus.c vmbus_uio.c
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/linux/vmbus_bus.c b/src/spdk/dpdk/drivers/bus/vmbus/linux/vmbus_bus.c
new file mode 100644
index 00000000..52d6a3c0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/linux/vmbus_bus.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+
+#include <rte_eal.h>
+#include <rte_uuid.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_devargs.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_bus_vmbus.h>
+
+#include "eal_filesystem.h"
+#include "private.h"
+
+/** Pathname of VMBUS devices directory. */
+#define SYSFS_VMBUS_DEVICES "/sys/bus/vmbus/devices"
+
+extern struct rte_vmbus_bus rte_vmbus_bus;
+
+/* Read sysfs file to get UUID */
+static int
+parse_sysfs_uuid(const char *filename, rte_uuid_t uu)
+{
+ char buf[BUFSIZ];
+ char *cp, *in = buf;
+ FILE *f;
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ VMBUS_LOG(ERR, "cannot open sysfs value %s: %s",
+ filename, strerror(errno));
+ return -1;
+ }
+
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ VMBUS_LOG(ERR, "cannot read sysfs value %s",
+ filename);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+
+ cp = strchr(buf, '\n');
+ if (cp)
+ *cp = '\0';
+
+ /* strip { } notation */
+ if (buf[0] == '{') {
+ in = buf + 1;
+ cp = strchr(in, '}');
+ if (cp)
+ *cp = '\0';
+ }
+
+ if (rte_uuid_parse(in, uu) < 0) {
+ VMBUS_LOG(ERR, "%s %s not a valid UUID",
+ filename, buf);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+get_sysfs_string(const char *filename, char *buf, size_t buflen)
+{
+ char *cp;
+ FILE *f;
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ VMBUS_LOG(ERR, "cannot open sysfs value %s:%s",
+ filename, strerror(errno));
+ return -1;
+ }
+
+ if (fgets(buf, buflen, f) == NULL) {
+ VMBUS_LOG(ERR, "cannot read sysfs value %s",
+ filename);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+
+ /* remove trailing newline */
+ cp = memchr(buf, '\n', buflen);
+ if (cp)
+ *cp = '\0';
+
+ return 0;
+}
+
+static int
+vmbus_get_uio_dev(const struct rte_vmbus_device *dev,
+ char *dstbuf, size_t buflen)
+{
+ char dirname[PATH_MAX];
+ unsigned int uio_num;
+ struct dirent *e;
+ DIR *dir;
+
+ /* Assume recent kernel where uio is in uio/uioX */
+ snprintf(dirname, sizeof(dirname),
+ SYSFS_VMBUS_DEVICES "/%s/uio", dev->device.name);
+
+ dir = opendir(dirname);
+ if (dir == NULL)
+ return -1; /* Not a UIO device */
+
+ /* take the first file starting with "uio" */
+ while ((e = readdir(dir)) != NULL) {
+ const int prefix_len = 3;
+ char *endptr;
+
+ if (strncmp(e->d_name, "uio", prefix_len) != 0)
+ continue;
+
+ /* try uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + prefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + prefix_len)) {
+ snprintf(dstbuf, buflen, "%s/uio%u", dirname, uio_num);
+ break;
+ }
+ }
+ closedir(dir);
+
+ if (e == NULL)
+ return -1;
+
+ return uio_num;
+}
+
+/* Check map names with kernel names */
+static const char *map_names[VMBUS_MAX_RESOURCE] = {
+ [HV_TXRX_RING_MAP] = "txrx_rings",
+ [HV_INT_PAGE_MAP] = "int_page",
+ [HV_MON_PAGE_MAP] = "monitor_page",
+ [HV_RECV_BUF_MAP] = "recv:",
+ [HV_SEND_BUF_MAP] = "send:",
+};
+
+
+/* map the resources of a vmbus device in virtual memory */
+int
+rte_vmbus_map_device(struct rte_vmbus_device *dev)
+{
+ char uioname[PATH_MAX], filename[PATH_MAX];
+ char dirname[PATH_MAX], mapname[64];
+ int i;
+
+ dev->uio_num = vmbus_get_uio_dev(dev, uioname, sizeof(uioname));
+ if (dev->uio_num < 0) {
+ VMBUS_LOG(DEBUG, "Not managed by UIO driver, skipped");
+ return 1;
+ }
+
+ /* Extract resource value */
+ for (i = 0; i < VMBUS_MAX_RESOURCE; i++) {
+ struct rte_mem_resource *res = &dev->resource[i];
+ unsigned long len, gpad = 0;
+ char *cp;
+
+ snprintf(dirname, sizeof(dirname),
+ "%s/maps/map%d", uioname, i);
+
+ snprintf(filename, sizeof(filename),
+ "%s/name", dirname);
+
+ if (get_sysfs_string(filename, mapname, sizeof(mapname)) < 0) {
+ VMBUS_LOG(ERR, "could not read %s", filename);
+ return -1;
+ }
+
+ if (strncmp(map_names[i], mapname, strlen(map_names[i])) != 0) {
+ VMBUS_LOG(ERR,
+ "unexpected resource %s (expected %s)",
+ mapname, map_names[i]);
+ return -1;
+ }
+
+ snprintf(filename, sizeof(filename),
+ "%s/size", dirname);
+ if (eal_parse_sysfs_value(filename, &len) < 0) {
+ VMBUS_LOG(ERR,
+ "could not read %s", filename);
+ return -1;
+ }
+ res->len = len;
+
+ /* both send and receive buffers have gpad in name */
+ cp = memchr(mapname, ':', sizeof(mapname));
+ if (cp)
+ gpad = strtoul(cp+1, NULL, 0);
+
+ /* put the GPAD value in physical address */
+ res->phys_addr = gpad;
+ }
+
+ return vmbus_uio_map_resource(dev);
+}
+
+void
+rte_vmbus_unmap_device(struct rte_vmbus_device *dev)
+{
+ vmbus_uio_unmap_resource(dev);
+}
+
+/* Scan one vmbus sysfs entry, and fill the devices list from it. */
+static int
+vmbus_scan_one(const char *name)
+{
+ struct rte_vmbus_device *dev, *dev2;
+ char filename[PATH_MAX];
+ char dirname[PATH_MAX];
+ unsigned long tmp;
+
+ dev = calloc(1, sizeof(*dev));
+ if (dev == NULL)
+ return -1;
+
+ dev->device.name = strdup(name);
+ if (!dev->device.name)
+ goto error;
+
+ /* sysfs base directory
+ * /sys/bus/vmbus/devices/7a08391f-f5a0-4ac0-9802-d13fd964f8df
+ * or on older kernel
+ * /sys/bus/vmbus/devices/vmbus_1
+ */
+ snprintf(dirname, sizeof(dirname), "%s/%s",
+ SYSFS_VMBUS_DEVICES, name);
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device_id", dirname);
+ if (parse_sysfs_uuid(filename, dev->device_id) < 0)
+ goto error;
+
+ /* get device class */
+ snprintf(filename, sizeof(filename), "%s/class_id", dirname);
+ if (parse_sysfs_uuid(filename, dev->class_id) < 0)
+ goto error;
+
+ /* get relid */
+ snprintf(filename, sizeof(filename), "%s/id", dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0)
+ goto error;
+ dev->relid = tmp;
+
+ /* get monitor id */
+ snprintf(filename, sizeof(filename), "%s/monitor_id", dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0)
+ goto error;
+ dev->monitor_id = tmp;
+
+ /* get numa node (if present) */
+ snprintf(filename, sizeof(filename), "%s/numa_node",
+ dirname);
+
+ if (access(filename, R_OK) == 0) {
+ if (eal_parse_sysfs_value(filename, &tmp) < 0)
+ goto error;
+ dev->device.numa_node = tmp;
+ } else {
+ /* if no NUMA support, set default to 0 */
+ dev->device.numa_node = SOCKET_ID_ANY;
+ }
+
+ /* device is valid, add in list (sorted) */
+ VMBUS_LOG(DEBUG, "Adding vmbus device %s", name);
+
+ TAILQ_FOREACH(dev2, &rte_vmbus_bus.device_list, next) {
+ int ret;
+
+ ret = rte_uuid_compare(dev->device_id, dev2->device_id);
+ if (ret > 0)
+ continue;
+
+ if (ret < 0) {
+ vmbus_insert_device(dev2, dev);
+ } else { /* already registered */
+ VMBUS_LOG(NOTICE,
+ "%s already registered", name);
+ free(dev);
+ }
+ return 0;
+ }
+
+ vmbus_add_device(dev);
+ return 0;
+error:
+ VMBUS_LOG(DEBUG, "failed");
+
+ free(dev);
+ return -1;
+}
+
+/*
+ * Scan the content of the vmbus, and the devices in the devices list
+ */
+int
+rte_vmbus_scan(void)
+{
+ struct dirent *e;
+ DIR *dir;
+
+ dir = opendir(SYSFS_VMBUS_DEVICES);
+ if (dir == NULL) {
+ if (errno == ENOENT)
+ return 0;
+
+ VMBUS_LOG(ERR, "opendir %s failed: %s",
+ SYSFS_VMBUS_DEVICES, strerror(errno));
+ return -1;
+ }
+
+ while ((e = readdir(dir)) != NULL) {
+ if (e->d_name[0] == '.')
+ continue;
+
+ if (vmbus_scan_one(e->d_name) < 0)
+ goto error;
+ }
+ closedir(dir);
+ return 0;
+
+error:
+ closedir(dir);
+ return -1;
+}
+
+void rte_vmbus_irq_mask(struct rte_vmbus_device *device)
+{
+ vmbus_uio_irq_control(device, 1);
+}
+
+void rte_vmbus_irq_unmask(struct rte_vmbus_device *device)
+{
+ vmbus_uio_irq_control(device, 0);
+}
+
+int rte_vmbus_irq_read(struct rte_vmbus_device *device)
+{
+ return vmbus_uio_irq_read(device);
+}
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/linux/vmbus_uio.c b/src/spdk/dpdk/drivers/bus/vmbus/linux/vmbus_uio.c
new file mode 100644
index 00000000..856c6d66
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/linux/vmbus_uio.c
@@ -0,0 +1,398 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_memory.h>
+#include <rte_eal_memconfig.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_bus_vmbus.h>
+#include <rte_string_fns.h>
+
+#include "private.h"
+
+/** Pathname of VMBUS devices directory. */
+#define SYSFS_VMBUS_DEVICES "/sys/bus/vmbus/devices"
+
+static void *vmbus_map_addr;
+
+/* Control interrupts */
+void vmbus_uio_irq_control(struct rte_vmbus_device *dev, int32_t onoff)
+{
+ if (write(dev->intr_handle.fd, &onoff, sizeof(onoff)) < 0) {
+ VMBUS_LOG(ERR, "cannot write to %d:%s",
+ dev->intr_handle.fd, strerror(errno));
+ }
+}
+
+int vmbus_uio_irq_read(struct rte_vmbus_device *dev)
+{
+ int32_t count;
+ int cc;
+
+ cc = read(dev->intr_handle.fd, &count, sizeof(count));
+ if (cc < (int)sizeof(count)) {
+ if (cc < 0) {
+ VMBUS_LOG(ERR, "IRQ read failed %s",
+ strerror(errno));
+ return -errno;
+ }
+ VMBUS_LOG(ERR, "can't read IRQ count");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+void
+vmbus_uio_free_resource(struct rte_vmbus_device *dev,
+ struct mapped_vmbus_resource *uio_res)
+{
+ rte_free(uio_res);
+
+ if (dev->intr_handle.uio_cfg_fd >= 0) {
+ close(dev->intr_handle.uio_cfg_fd);
+ dev->intr_handle.uio_cfg_fd = -1;
+ }
+
+ if (dev->intr_handle.fd >= 0) {
+ close(dev->intr_handle.fd);
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ }
+}
+
+int
+vmbus_uio_alloc_resource(struct rte_vmbus_device *dev,
+ struct mapped_vmbus_resource **uio_res)
+{
+ char devname[PATH_MAX]; /* contains the /dev/uioX */
+
+ /* save fd if in primary process */
+ snprintf(devname, sizeof(devname), "/dev/uio%u", dev->uio_num);
+ dev->intr_handle.fd = open(devname, O_RDWR);
+ if (dev->intr_handle.fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ devname, strerror(errno));
+ goto error;
+ }
+ dev->intr_handle.type = RTE_INTR_HANDLE_UIO_INTX;
+
+ /* allocate the mapping details for secondary processes*/
+ *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
+ if (*uio_res == NULL) {
+ VMBUS_LOG(ERR, "cannot store uio mmap details");
+ goto error;
+ }
+
+ strlcpy((*uio_res)->path, devname, PATH_MAX);
+ rte_uuid_copy((*uio_res)->id, dev->device_id);
+
+ return 0;
+
+error:
+ vmbus_uio_free_resource(dev, *uio_res);
+ return -1;
+}
+
+static int
+find_max_end_va(const struct rte_memseg_list *msl, void *arg)
+{
+ size_t sz = msl->memseg_arr.len * msl->page_sz;
+ void *end_va = RTE_PTR_ADD(msl->base_va, sz);
+ void **max_va = arg;
+
+ if (*max_va < end_va)
+ *max_va = end_va;
+ return 0;
+}
+
+/*
+ * TODO: this should be part of memseg api.
+ * code is duplicated from PCI.
+ */
+static void *
+vmbus_find_max_end_va(void)
+{
+ void *va = NULL;
+
+ rte_memseg_list_walk(find_max_end_va, &va);
+ return va;
+}
+
+int
+vmbus_uio_map_resource_by_index(struct rte_vmbus_device *dev, int idx,
+ struct mapped_vmbus_resource *uio_res,
+ int flags)
+{
+ size_t size = dev->resource[idx].len;
+ struct vmbus_map *maps = uio_res->maps;
+ void *mapaddr;
+ off_t offset;
+ int fd;
+
+ /* devname for mmap */
+ fd = open(uio_res->path, O_RDWR);
+ if (fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ uio_res->path, strerror(errno));
+ return -1;
+ }
+
+ /* try mapping somewhere close to the end of hugepages */
+ if (vmbus_map_addr == NULL)
+ vmbus_map_addr = vmbus_find_max_end_va();
+
+ /* offset is special in uio it indicates which resource */
+ offset = idx * PAGE_SIZE;
+
+ mapaddr = vmbus_map_resource(vmbus_map_addr, fd, offset, size, flags);
+ close(fd);
+
+ if (mapaddr == MAP_FAILED)
+ return -1;
+
+ dev->resource[idx].addr = mapaddr;
+ vmbus_map_addr = RTE_PTR_ADD(mapaddr, size);
+
+ /* Record result of sucessful mapping for use by secondary */
+ maps[idx].addr = mapaddr;
+ maps[idx].size = size;
+
+ return 0;
+}
+
+static int vmbus_uio_map_primary(struct vmbus_channel *chan,
+ void **ring_buf, uint32_t *ring_size)
+{
+ struct mapped_vmbus_resource *uio_res;
+
+ uio_res = vmbus_uio_find_resource(chan->device);
+ if (!uio_res) {
+ VMBUS_LOG(ERR, "can not find resources!");
+ return -ENOMEM;
+ }
+
+ if (uio_res->nb_maps < VMBUS_MAX_RESOURCE) {
+ VMBUS_LOG(ERR, "VMBUS: only %u resources found!",
+ uio_res->nb_maps);
+ return -EINVAL;
+ }
+
+ *ring_size = uio_res->maps[HV_TXRX_RING_MAP].size / 2;
+ *ring_buf = uio_res->maps[HV_TXRX_RING_MAP].addr;
+ return 0;
+}
+
+static int vmbus_uio_map_subchan(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan,
+ void **ring_buf, uint32_t *ring_size)
+{
+ char ring_path[PATH_MAX];
+ size_t file_size;
+ struct stat sb;
+ int fd;
+
+ snprintf(ring_path, sizeof(ring_path),
+ "%s/%s/channels/%u/ring",
+ SYSFS_VMBUS_DEVICES, dev->device.name,
+ chan->relid);
+
+ fd = open(ring_path, O_RDWR);
+ if (fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ ring_path, strerror(errno));
+ return -errno;
+ }
+
+ if (fstat(fd, &sb) < 0) {
+ VMBUS_LOG(ERR, "Cannot state %s: %s",
+ ring_path, strerror(errno));
+ close(fd);
+ return -errno;
+ }
+ file_size = sb.st_size;
+
+ if (file_size == 0 || (file_size & (PAGE_SIZE - 1))) {
+ VMBUS_LOG(ERR, "incorrect size %s: %zu",
+ ring_path, file_size);
+
+ close(fd);
+ return -EINVAL;
+ }
+
+ *ring_size = file_size / 2;
+ *ring_buf = vmbus_map_resource(vmbus_map_addr, fd,
+ 0, sb.st_size, 0);
+ close(fd);
+
+ if (ring_buf == MAP_FAILED)
+ return -EIO;
+
+ vmbus_map_addr = RTE_PTR_ADD(ring_buf, file_size);
+ return 0;
+}
+
+int vmbus_uio_map_rings(struct vmbus_channel *chan)
+{
+ const struct rte_vmbus_device *dev = chan->device;
+ uint32_t ring_size;
+ void *ring_buf;
+ int ret;
+
+ /* Primary channel */
+ if (chan->subchannel_id == 0)
+ ret = vmbus_uio_map_primary(chan, &ring_buf, &ring_size);
+ else
+ ret = vmbus_uio_map_subchan(dev, chan, &ring_buf, &ring_size);
+
+ if (ret)
+ return ret;
+
+ vmbus_br_setup(&chan->txbr, ring_buf, ring_size);
+ vmbus_br_setup(&chan->rxbr, (char *)ring_buf + ring_size, ring_size);
+ return 0;
+}
+
+static int vmbus_uio_sysfs_read(const char *dir, const char *name,
+ unsigned long *val, unsigned long max_range)
+{
+ char path[PATH_MAX];
+ FILE *f;
+ int ret;
+
+ snprintf(path, sizeof(path), "%s/%s", dir, name);
+ f = fopen(path, "r");
+ if (!f) {
+ VMBUS_LOG(ERR, "can't open %s:%s",
+ path, strerror(errno));
+ return -errno;
+ }
+
+ if (fscanf(f, "%lu", val) != 1)
+ ret = -EIO;
+ else if (*val > max_range)
+ ret = -ERANGE;
+ else
+ ret = 0;
+ fclose(f);
+
+ return ret;
+}
+
+static bool vmbus_uio_ring_present(const struct rte_vmbus_device *dev,
+ uint32_t relid)
+{
+ char ring_path[PATH_MAX];
+
+ /* Check if kernel has subchannel sysfs files */
+ snprintf(ring_path, sizeof(ring_path),
+ "%s/%s/channels/%u/ring",
+ SYSFS_VMBUS_DEVICES, dev->device.name, relid);
+
+ return access(ring_path, R_OK|W_OK) == 0;
+}
+
+bool vmbus_uio_subchannels_supported(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan)
+{
+ return vmbus_uio_ring_present(dev, chan->relid);
+}
+
+static bool vmbus_isnew_subchannel(struct vmbus_channel *primary,
+ unsigned long id)
+{
+ const struct vmbus_channel *c;
+
+ STAILQ_FOREACH(c, &primary->subchannel_list, next) {
+ if (c->relid == id)
+ return false;
+ }
+ return true;
+}
+
+int vmbus_uio_get_subchan(struct vmbus_channel *primary,
+ struct vmbus_channel **subchan)
+{
+ const struct rte_vmbus_device *dev = primary->device;
+ char chan_path[PATH_MAX], subchan_path[PATH_MAX];
+ struct dirent *ent;
+ DIR *chan_dir;
+
+ snprintf(chan_path, sizeof(chan_path),
+ "%s/%s/channels",
+ SYSFS_VMBUS_DEVICES, dev->device.name);
+
+ chan_dir = opendir(chan_path);
+ if (!chan_dir) {
+ VMBUS_LOG(ERR, "cannot open %s: %s",
+ chan_path, strerror(errno));
+ return -errno;
+ }
+
+ while ((ent = readdir(chan_dir))) {
+ unsigned long relid, subid, monid;
+ char *endp;
+ int err;
+
+ if (ent->d_name[0] == '.')
+ continue;
+
+ errno = 0;
+ relid = strtoul(ent->d_name, &endp, 0);
+ if (*endp || errno != 0 || relid > UINT16_MAX) {
+ VMBUS_LOG(NOTICE, "not a valid channel relid: %s",
+ ent->d_name);
+ continue;
+ }
+
+ snprintf(subchan_path, sizeof(subchan_path), "%s/%lu",
+ chan_path, relid);
+ err = vmbus_uio_sysfs_read(subchan_path, "subchannel_id",
+ &subid, UINT16_MAX);
+ if (err) {
+ VMBUS_LOG(NOTICE, "invalid subchannel id %lu",
+ subid);
+ closedir(chan_dir);
+ return err;
+ }
+
+ if (subid == 0)
+ continue; /* skip primary channel */
+
+ if (!vmbus_isnew_subchannel(primary, relid))
+ continue;
+
+ if (!vmbus_uio_ring_present(dev, relid))
+ continue; /* Ring may not be ready yet */
+
+ err = vmbus_uio_sysfs_read(subchan_path, "monitor_id",
+ &monid, UINT8_MAX);
+ if (err) {
+ VMBUS_LOG(NOTICE, "invalid monitor id %lu",
+ monid);
+ return err;
+ }
+
+ err = vmbus_chan_create(dev, relid, subid, monid, subchan);
+ if (err) {
+ VMBUS_LOG(NOTICE, "subchannel setup failed");
+ return err;
+ }
+ break;
+ }
+ closedir(chan_dir);
+
+ return (ent == NULL) ? -ENOENT : 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/meson.build b/src/spdk/dpdk/drivers/bus/vmbus/meson.build
new file mode 100644
index 00000000..18daabec
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+allow_experimental_apis = true
+
+install_headers('rte_bus_vmbus.h','rte_vmbus_reg.h')
+
+sources = files('vmbus_common.c',
+ 'vmbus_channel.c',
+ 'vmbus_bufring.c',
+ 'vmbus_common_uio.c')
+
+if host_machine.system() == 'linux'
+ sources += files('linux/vmbus_bus.c',
+ 'linux/vmbus_uio.c')
+ includes += include_directories('linux')
+else
+ build = false
+endif
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/private.h b/src/spdk/dpdk/drivers/bus/vmbus/private.h
new file mode 100644
index 00000000..9964fc42
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/private.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#ifndef _VMBUS_PRIVATE_H_
+#define _VMBUS_PRIVATE_H_
+
+#include <stdbool.h>
+#include <sys/uio.h>
+#include <rte_log.h>
+#include <rte_vmbus_reg.h>
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+extern int vmbus_logtype_bus;
+#define VMBUS_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, vmbus_logtype_bus, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+struct vmbus_br {
+ struct vmbus_bufring *vbr;
+ uint32_t dsize;
+ uint32_t windex; /* next available location */
+};
+
+#define UIO_NAME_MAX 64
+
+struct vmbus_map {
+ void *addr; /* user mmap of resource */
+ uint64_t size; /* length */
+};
+
+/*
+ * For multi-process we need to reproduce all vmbus mappings in secondary
+ * processes, so save them in a tailq.
+ */
+struct mapped_vmbus_resource {
+ TAILQ_ENTRY(mapped_vmbus_resource) next;
+
+ rte_uuid_t id;
+ int nb_maps;
+ struct vmbus_map maps[VMBUS_MAX_RESOURCE];
+ char path[PATH_MAX];
+};
+
+TAILQ_HEAD(mapped_vmbus_res_list, mapped_vmbus_resource);
+
+#define HV_MON_TRIG_LEN 32
+#define HV_MON_TRIG_MAX 4
+
+struct vmbus_channel {
+ STAILQ_HEAD(, vmbus_channel) subchannel_list;
+ STAILQ_ENTRY(vmbus_channel) next;
+ const struct rte_vmbus_device *device;
+
+ struct vmbus_br rxbr;
+ struct vmbus_br txbr;
+
+ uint16_t relid;
+ uint16_t subchannel_id;
+ uint8_t monitor_id;
+};
+
+#define VMBUS_MAX_CHANNELS 64
+
+int vmbus_chan_create(const struct rte_vmbus_device *device,
+ uint16_t relid, uint16_t subid, uint8_t monitor_id,
+ struct vmbus_channel **new_chan);
+
+void vmbus_add_device(struct rte_vmbus_device *vmbus_dev);
+void vmbus_insert_device(struct rte_vmbus_device *exist_vmbus_dev,
+ struct rte_vmbus_device *new_vmbus_dev);
+void vmbus_remove_device(struct rte_vmbus_device *vmbus_device);
+
+void vmbus_uio_irq_control(struct rte_vmbus_device *dev, int32_t onoff);
+int vmbus_uio_irq_read(struct rte_vmbus_device *dev);
+
+int vmbus_uio_map_resource(struct rte_vmbus_device *dev);
+void vmbus_uio_unmap_resource(struct rte_vmbus_device *dev);
+
+int vmbus_uio_alloc_resource(struct rte_vmbus_device *dev,
+ struct mapped_vmbus_resource **uio_res);
+void vmbus_uio_free_resource(struct rte_vmbus_device *dev,
+ struct mapped_vmbus_resource *uio_res);
+
+struct mapped_vmbus_resource *
+vmbus_uio_find_resource(const struct rte_vmbus_device *dev);
+int vmbus_uio_map_resource_by_index(struct rte_vmbus_device *dev, int res_idx,
+ struct mapped_vmbus_resource *uio_res,
+ int flags);
+
+void *vmbus_map_resource(void *requested_addr, int fd, off_t offset,
+ size_t size, int additional_flags);
+void vmbus_unmap_resource(void *requested_addr, size_t size);
+
+bool vmbus_uio_subchannels_supported(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan);
+int vmbus_uio_get_subchan(struct vmbus_channel *primary,
+ struct vmbus_channel **subchan);
+int vmbus_uio_map_rings(struct vmbus_channel *chan);
+
+void vmbus_br_setup(struct vmbus_br *br, void *buf, unsigned int blen);
+
+/* Amount of space available for write */
+static inline uint32_t
+vmbus_br_availwrite(const struct vmbus_br *br, uint32_t windex)
+{
+ uint32_t rindex = br->vbr->rindex;
+
+ if (windex >= rindex)
+ return br->dsize - (windex - rindex);
+ else
+ return rindex - windex;
+}
+
+static inline uint32_t
+vmbus_br_availread(const struct vmbus_br *br)
+{
+ return br->dsize - vmbus_br_availwrite(br, br->vbr->windex);
+}
+
+int vmbus_txbr_write(struct vmbus_br *tbr, const struct iovec iov[], int iovlen,
+ bool *need_sig);
+
+int vmbus_rxbr_peek(const struct vmbus_br *rbr, void *data, size_t dlen);
+
+int vmbus_rxbr_read(struct vmbus_br *rbr, void *data, size_t dlen, size_t hlen);
+
+#endif /* _VMBUS_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/rte_bus_vmbus.h b/src/spdk/dpdk/drivers/bus/vmbus/rte_bus_vmbus.h
new file mode 100644
index 00000000..4a2c1f6f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/rte_bus_vmbus.h
@@ -0,0 +1,407 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#ifndef _VMBUS_H_
+#define _VMBUS_H_
+
+/**
+ * @file
+ *
+ * VMBUS Interface
+ */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_compat.h>
+#include <rte_uuid.h>
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_dev.h>
+#include <rte_vmbus_reg.h>
+
+/* Forward declarations */
+struct rte_vmbus_device;
+struct rte_vmbus_driver;
+struct rte_vmbus_bus;
+struct vmbus_channel;
+struct vmbus_mon_page;
+
+TAILQ_HEAD(rte_vmbus_device_list, rte_vmbus_device);
+TAILQ_HEAD(rte_vmbus_driver_list, rte_vmbus_driver);
+
+/* VMBus iterators */
+#define FOREACH_DEVICE_ON_VMBUS(p) \
+ TAILQ_FOREACH(p, &(rte_vmbus_bus.device_list), next)
+
+#define FOREACH_DRIVER_ON_VMBUS(p) \
+ TAILQ_FOREACH(p, &(rte_vmbus_bus.driver_list), next)
+
+/** Maximum number of VMBUS resources. */
+enum hv_uio_map {
+ HV_TXRX_RING_MAP = 0,
+ HV_INT_PAGE_MAP,
+ HV_MON_PAGE_MAP,
+ HV_RECV_BUF_MAP,
+ HV_SEND_BUF_MAP
+};
+#define VMBUS_MAX_RESOURCE 5
+
+/**
+ * A structure describing a VMBUS device.
+ */
+struct rte_vmbus_device {
+ TAILQ_ENTRY(rte_vmbus_device) next; /**< Next probed VMBUS device */
+ const struct rte_vmbus_driver *driver; /**< Associated driver */
+ struct rte_device device; /**< Inherit core device */
+ rte_uuid_t device_id; /**< VMBUS device id */
+ rte_uuid_t class_id; /**< VMBUS device type */
+ uint32_t relid; /**< id for primary */
+ uint8_t monitor_id; /**< monitor page */
+ int uio_num; /**< UIO device number */
+ uint32_t *int_page; /**< VMBUS interrupt page */
+ struct vmbus_channel *primary; /**< VMBUS primary channel */
+ struct vmbus_mon_page *monitor_page; /**< VMBUS monitor page */
+
+ struct rte_intr_handle intr_handle; /**< Interrupt handle */
+ struct rte_mem_resource resource[VMBUS_MAX_RESOURCE];
+};
+
+/**
+ * Initialization function for the driver called during VMBUS probing.
+ */
+typedef int (vmbus_probe_t)(struct rte_vmbus_driver *,
+ struct rte_vmbus_device *);
+
+/**
+ * Initialization function for the driver called during hot plugging.
+ */
+typedef int (vmbus_remove_t)(struct rte_vmbus_device *);
+
+/**
+ * A structure describing a VMBUS driver.
+ */
+struct rte_vmbus_driver {
+ TAILQ_ENTRY(rte_vmbus_driver) next; /**< Next in list. */
+ struct rte_driver driver;
+ struct rte_vmbus_bus *bus; /**< VM bus reference. */
+ vmbus_probe_t *probe; /**< Device Probe function. */
+ vmbus_remove_t *remove; /**< Device Remove function. */
+
+ const rte_uuid_t *id_table; /**< ID table. */
+};
+
+
+/**
+ * Structure describing the VM bus
+ */
+struct rte_vmbus_bus {
+ struct rte_bus bus; /**< Inherit the generic class */
+ struct rte_vmbus_device_list device_list; /**< List of devices */
+ struct rte_vmbus_driver_list driver_list; /**< List of drivers */
+};
+
+/**
+ * Scan the content of the VMBUS bus, and the devices in the devices
+ * list
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_vmbus_scan(void);
+
+/**
+ * Probe the VMBUS bus
+ *
+ * @return
+ * - 0 on success.
+ * - !0 on error.
+ */
+int rte_vmbus_probe(void);
+
+/**
+ * Map the VMBUS device resources in user space virtual memory address
+ *
+ * @param dev
+ * A pointer to a rte_vmbus_device structure describing the device
+ * to use
+ *
+ * @return
+ * 0 on success, negative on error and positive if no driver
+ * is found for the device.
+ */
+int rte_vmbus_map_device(struct rte_vmbus_device *dev);
+
+/**
+ * Unmap this device
+ *
+ * @param dev
+ * A pointer to a rte_vmbus_device structure describing the device
+ * to use
+ */
+void rte_vmbus_unmap_device(struct rte_vmbus_device *dev);
+
+/**
+ * Get connection to primary VMBUS channel
+ *
+ * @param device
+ * A pointer to a rte_vmbus_device structure describing the device
+ * @param chan
+ * A pointer to a VMBUS channel pointer that will be filled.
+ * @return
+ * - 0 Success; channel opened.
+ * - -ENOMEM: Not enough memory available.
+ * - -EINVAL: Regions could not be mapped.
+ */
+int rte_vmbus_chan_open(struct rte_vmbus_device *device,
+ struct vmbus_channel **chan);
+
+/**
+ * Free connection to VMBUS channel
+ *
+ * @param chan
+ * VMBUS channel
+ */
+void rte_vmbus_chan_close(struct vmbus_channel *chan);
+
+/**
+ * Gets the maximum number of channels supported on device
+ *
+ * @param device
+ * A pointer to a rte_vmbus_device structure describing the device
+ * @return
+ * Number of channels available.
+ */
+int rte_vmbus_max_channels(const struct rte_vmbus_device *device);
+
+/**
+ * Get a connection to new secondary vmbus channel
+ *
+ * @param primary
+ * A pointer to primary VMBUS channel
+ * @param chan
+ * A pointer to a secondary VMBUS channel pointer that will be filled.
+ * @return
+ * - 0 Success; channel opened.
+ * - -ENOMEM: Not enough memory available.
+ * - -EINVAL: Regions could not be mapped.
+ */
+int rte_vmbus_subchan_open(struct vmbus_channel *primary,
+ struct vmbus_channel **new_chan);
+
+/**
+ * Disable IRQ for device
+ *
+ * @param device
+ * VMBUS device
+ */
+void rte_vmbus_irq_mask(struct rte_vmbus_device *device);
+
+/**
+ * Enable IRQ for device
+ *
+ * @param device
+ * VMBUS device
+ */
+void rte_vmbus_irq_unmask(struct rte_vmbus_device *device);
+
+/**
+ * Read (and wait) for IRQ
+ *
+ * @param device
+ * VMBUS device
+ */
+int rte_vmbus_irq_read(struct rte_vmbus_device *device);
+
+/**
+ * Test if channel is empty
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @return
+ * Return true if no data present in incoming ring.
+ */
+bool rte_vmbus_chan_rx_empty(const struct vmbus_channel *channel);
+
+/**
+ * Send the specified buffer on the given channel
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param type
+ * Type of packet that is being send e.g. negotiate, time
+ * packet etc.
+ * @param data
+ * Pointer to the buffer to send
+ * @param dlen
+ * Number of bytes of data to send
+ * @param xact
+ * Identifier of the request
+ * @param flags
+ * Message type inband, rxbuf, gpa
+ * @param need_sig
+ * Is host signal tx is required (optional)
+ *
+ * Sends data in buffer directly to hyper-v via the vmbus
+ */
+int rte_vmbus_chan_send(struct vmbus_channel *channel, uint16_t type,
+ void *data, uint32_t dlen,
+ uint64_t xact, uint32_t flags, bool *need_sig);
+
+/**
+ * Explicitly signal host that data is available
+ *
+ * @param
+ * Pointer to vmbus_channel structure.
+ *
+ * Used when batching multiple sends and only signaling host
+ * after the last send.
+ */
+void rte_vmbus_chan_signal_tx(const struct vmbus_channel *channel);
+
+/* Structure for scatter/gather I/O */
+struct iova_list {
+ rte_iova_t addr;
+ uint32_t len;
+};
+#define MAX_PAGE_BUFFER_COUNT 32
+
+/**
+ * Send a scattered buffer on the given channel
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param type
+ * Type of packet that is being send e.g. negotiate, time
+ * packet etc.
+ * @param gpa
+ * Array of buffers to send
+ * @param gpacnt
+ * Number of elements in iov
+ * @param data
+ * Pointer to the buffer additional data to send
+ * @param dlen
+ * Maximum size of what the the buffer will hold
+ * @param xact
+ * Identifier of the request
+ * @param flags
+ * Message type inband, rxbuf, gpa
+ * @param need_sig
+ * Is host signal tx is required (optional)
+ *
+ * Sends data in buffer directly to hyper-v via the vmbus
+ */
+int rte_vmbus_chan_send_sglist(struct vmbus_channel *channel,
+ struct vmbus_gpa gpa[], uint32_t gpacnt,
+ void *data, uint32_t dlen,
+ uint64_t xact, bool *need_sig);
+/**
+ * Receive response to request on the given channel
+ * skips the channel header.
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param data
+ * Pointer to the buffer you want to receive the data into.
+ * @param len
+ * Pointer to size of receive buffer (in/out)
+ * @param
+ * Pointer to received transaction_id
+ * @return
+ * On success, returns 0
+ * On failure, returns negative errno.
+ */
+int rte_vmbus_chan_recv(struct vmbus_channel *chan,
+ void *data, uint32_t *len,
+ uint64_t *request_id);
+
+/**
+ * Receive response to request on the given channel
+ * includes the channel header.
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param data
+ * Pointer to the buffer you want to receive the data into.
+ * @param len
+ * Pointer to size of receive buffer (in/out)
+ * @return
+ * On success, returns number of bytes read.
+ * On failure, returns negative errno.
+ */
+int rte_vmbus_chan_recv_raw(struct vmbus_channel *chan,
+ void *data, uint32_t *len);
+
+/**
+ * Notify host of bytes read (after recv_raw)
+ * Signals host if required.
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param bytes_read
+ * Number of bytes read since last signal
+ */
+void rte_vmbus_chan_signal_read(struct vmbus_channel *chan, uint32_t bytes_read);
+
+/**
+ * Determine sub channel index of the given channel
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @return
+ * Sub channel index (0 for primary)
+ */
+uint16_t rte_vmbus_sub_channel_index(const struct vmbus_channel *chan);
+
+/**
+ * Register a VMBUS driver.
+ *
+ * @param driver
+ * A pointer to a rte_vmbus_driver structure describing the driver
+ * to be registered.
+ */
+void rte_vmbus_register(struct rte_vmbus_driver *driver);
+
+/**
+ * For debug dump contents of ring buffer.
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ */
+void rte_vmbus_chan_dump(FILE *f, const struct vmbus_channel *chan);
+
+/**
+ * Unregister a VMBUS driver.
+ *
+ * @param driver
+ * A pointer to a rte_vmbus_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_vmbus_unregister(struct rte_vmbus_driver *driver);
+
+/** Helper for VMBUS device registration from driver instance */
+#define RTE_PMD_REGISTER_VMBUS(nm, vmbus_drv) \
+ RTE_INIT(vmbusinitfn_ ##nm); \
+ static void vmbusinitfn_ ##nm(void) \
+ { \
+ (vmbus_drv).driver.name = RTE_STR(nm); \
+ rte_vmbus_register(&vmbus_drv); \
+ } \
+ RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _VMBUS_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/rte_bus_vmbus_version.map b/src/spdk/dpdk/drivers/bus/vmbus/rte_bus_vmbus_version.map
new file mode 100644
index 00000000..dabb9203
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/rte_bus_vmbus_version.map
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+DPDK_18.08 {
+ global:
+
+ rte_vmbus_chan_close;
+ rte_vmbus_chan_open;
+ rte_vmbus_chan_recv;
+ rte_vmbus_chan_recv_raw;
+ rte_vmbus_chan_rx_empty;
+ rte_vmbus_chan_send;
+ rte_vmbus_chan_send_sglist;
+ rte_vmbus_chan_signal_read;
+ rte_vmbus_chan_signal_tx;
+ rte_vmbus_irq_mask;
+ rte_vmbus_irq_read;
+ rte_vmbus_irq_unmask;
+ rte_vmbus_map_device;
+ rte_vmbus_max_channels;
+ rte_vmbus_probe;
+ rte_vmbus_register;
+ rte_vmbus_scan;
+ rte_vmbus_sub_channel_index;
+ rte_vmbus_subchan_open;
+ rte_vmbus_unmap_device;
+ rte_vmbus_unregister;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/rte_vmbus_reg.h b/src/spdk/dpdk/drivers/bus/vmbus/rte_vmbus_reg.h
new file mode 100644
index 00000000..f5a0693d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/rte_vmbus_reg.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#ifndef _VMBUS_REG_H_
+#define _VMBUS_REG_H_
+
+/*
+ * Hyper-V SynIC message format.
+ */
+#define VMBUS_MSG_DSIZE_MAX 240
+#define VMBUS_MSG_SIZE 256
+
+struct vmbus_message {
+ uint32_t type; /* HYPERV_MSGTYPE_ */
+ uint8_t dsize; /* data size */
+ uint8_t flags; /* VMBUS_MSGFLAG_ */
+ uint16_t rsvd;
+ uint64_t id;
+ uint8_t data[VMBUS_MSG_DSIZE_MAX];
+} __rte_packed;
+
+#define VMBUS_MSGFLAG_PENDING 0x01
+
+/*
+ * Hyper-V Monitor Notification Facility
+ */
+
+struct vmbus_mon_trig {
+ uint32_t pending;
+ uint32_t armed;
+} __rte_packed;
+
+#define VMBUS_MONTRIGS_MAX 4
+#define VMBUS_MONTRIG_LEN 32
+
+/*
+ * Hyper-V Monitor Notification Facility
+ */
+struct hyperv_mon_param {
+ uint32_t connid;
+ uint16_t evtflag_ofs;
+ uint16_t rsvd;
+} __rte_packed;
+
+struct vmbus_mon_page {
+ uint32_t state;
+ uint32_t rsvd1;
+
+ struct vmbus_mon_trig trigs[VMBUS_MONTRIGS_MAX];
+ uint8_t rsvd2[536];
+
+ uint16_t lat[VMBUS_MONTRIGS_MAX][VMBUS_MONTRIG_LEN];
+ uint8_t rsvd3[256];
+
+ struct hyperv_mon_param
+ param[VMBUS_MONTRIGS_MAX][VMBUS_MONTRIG_LEN];
+ uint8_t rsvd4[1984];
+} __rte_packed;
+
+/*
+ * Buffer ring
+ */
+
+struct vmbus_bufring {
+ volatile uint32_t windex;
+ volatile uint32_t rindex;
+
+ /*
+ * Interrupt mask {0,1}
+ *
+ * For TX bufring, host set this to 1, when it is processing
+ * the TX bufring, so that we can safely skip the TX event
+ * notification to host.
+ *
+ * For RX bufring, once this is set to 1 by us, host will not
+ * further dispatch interrupts to us, even if there are data
+ * pending on the RX bufring. This effectively disables the
+ * interrupt of the channel to which this RX bufring is attached.
+ */
+ volatile uint32_t imask;
+
+ /*
+ * Win8 uses some of the reserved bits to implement
+ * interrupt driven flow management. On the send side
+ * we can request that the receiver interrupt the sender
+ * when the ring transitions from being full to being able
+ * to handle a message of size "pending_send_sz".
+ *
+ * Add necessary state for this enhancement.
+ */
+ volatile uint32_t pending_send;
+ uint32_t reserved1[12];
+
+ union {
+ struct {
+ uint32_t feat_pending_send_sz:1;
+ };
+ uint32_t value;
+ } feature_bits;
+
+ /* Pad it to PAGE_SIZE so that data starts on page boundary */
+ uint8_t reserved2[4028];
+
+ /*
+ * Ring data starts here + RingDataStartOffset
+ * !!! DO NOT place any fields below this !!!
+ */
+ uint8_t data[0];
+} __rte_packed;
+
+/*
+ * Channel packets
+ */
+
+/* Channel packet flags */
+#define VMBUS_CHANPKT_TYPE_INBAND 0x0006
+#define VMBUS_CHANPKT_TYPE_RXBUF 0x0007
+#define VMBUS_CHANPKT_TYPE_GPA 0x0009
+#define VMBUS_CHANPKT_TYPE_COMP 0x000b
+
+#define VMBUS_CHANPKT_FLAG_NONE 0
+#define VMBUS_CHANPKT_FLAG_RC 0x0001 /* report completion */
+
+#define VMBUS_CHANPKT_SIZE_SHIFT 3
+#define VMBUS_CHANPKT_SIZE_ALIGN (1 << VMBUS_CHANPKT_SIZE_SHIFT)
+#define VMBUS_CHANPKT_HLEN_MIN \
+ (sizeof(struct vmbus_chanpkt_hdr) >> VMBUS_CHANPKT_SIZE_SHIFT)
+
+static inline uint32_t
+vmbus_chanpkt_getlen(uint16_t pktlen)
+{
+ return (uint32_t)pktlen << VMBUS_CHANPKT_SIZE_SHIFT;
+}
+
+/*
+ * GPA stuffs.
+ */
+struct vmbus_gpa_range {
+ uint32_t len;
+ uint32_t ofs;
+ uint64_t page[0];
+} __rte_packed;
+
+/* This is actually vmbus_gpa_range.gpa_page[1] */
+struct vmbus_gpa {
+ uint32_t len;
+ uint32_t ofs;
+ uint64_t page;
+} __rte_packed;
+
+struct vmbus_chanpkt_hdr {
+ uint16_t type; /* VMBUS_CHANPKT_TYPE_ */
+ uint16_t hlen; /* header len, in 8 bytes */
+ uint16_t tlen; /* total len, in 8 bytes */
+ uint16_t flags; /* VMBUS_CHANPKT_FLAG_ */
+ uint64_t xactid;
+} __rte_packed;
+
+static inline uint32_t
+vmbus_chanpkt_datalen(const struct vmbus_chanpkt_hdr *pkt)
+{
+ return vmbus_chanpkt_getlen(pkt->tlen)
+ - vmbus_chanpkt_getlen(pkt->hlen);
+}
+
+struct vmbus_chanpkt {
+ struct vmbus_chanpkt_hdr hdr;
+} __rte_packed;
+
+struct vmbus_rxbuf_desc {
+ uint32_t len;
+ uint32_t ofs;
+} __rte_packed;
+
+struct vmbus_chanpkt_rxbuf {
+ struct vmbus_chanpkt_hdr hdr;
+ uint16_t rxbuf_id;
+ uint16_t rsvd;
+ uint32_t rxbuf_cnt;
+ struct vmbus_rxbuf_desc rxbuf[];
+} __rte_packed;
+
+struct vmbus_chanpkt_sglist {
+ struct vmbus_chanpkt_hdr hdr;
+ uint32_t rsvd;
+ uint32_t gpa_cnt;
+ struct vmbus_gpa gpa[];
+} __rte_packed;
+
+/*
+ * Channel messages
+ * - Embedded in vmbus_message.msg_data, e.g. response and notification.
+ * - Embedded in hypercall_postmsg_in.hc_data, e.g. request.
+ */
+
+#define VMBUS_CHANMSG_TYPE_CHOFFER 1 /* NOTE */
+#define VMBUS_CHANMSG_TYPE_CHRESCIND 2 /* NOTE */
+#define VMBUS_CHANMSG_TYPE_CHREQUEST 3 /* REQ */
+#define VMBUS_CHANMSG_TYPE_CHOFFER_DONE 4 /* NOTE */
+#define VMBUS_CHANMSG_TYPE_CHOPEN 5 /* REQ */
+#define VMBUS_CHANMSG_TYPE_CHOPEN_RESP 6 /* RESP */
+#define VMBUS_CHANMSG_TYPE_CHCLOSE 7 /* REQ */
+#define VMBUS_CHANMSG_TYPE_GPADL_CONN 8 /* REQ */
+#define VMBUS_CHANMSG_TYPE_GPADL_SUBCONN 9 /* REQ */
+#define VMBUS_CHANMSG_TYPE_GPADL_CONNRESP 10 /* RESP */
+#define VMBUS_CHANMSG_TYPE_GPADL_DISCONN 11 /* REQ */
+#define VMBUS_CHANMSG_TYPE_GPADL_DISCONNRESP 12 /* RESP */
+#define VMBUS_CHANMSG_TYPE_CHFREE 13 /* REQ */
+#define VMBUS_CHANMSG_TYPE_CONNECT 14 /* REQ */
+#define VMBUS_CHANMSG_TYPE_CONNECT_RESP 15 /* RESP */
+#define VMBUS_CHANMSG_TYPE_DISCONNECT 16 /* REQ */
+#define VMBUS_CHANMSG_TYPE_MAX 22
+
+struct vmbus_chanmsg_hdr {
+ uint32_t type; /* VMBUS_CHANMSG_TYPE_ */
+ uint32_t rsvd;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CONNECT */
+struct vmbus_chanmsg_connect {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t ver;
+ uint32_t rsvd;
+ uint64_t evtflags;
+ uint64_t mnf1;
+ uint64_t mnf2;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CONNECT_RESP */
+struct vmbus_chanmsg_connect_resp {
+ struct vmbus_chanmsg_hdr hdr;
+ uint8_t done;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHREQUEST */
+struct vmbus_chanmsg_chrequest {
+ struct vmbus_chanmsg_hdr hdr;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_DISCONNECT */
+struct vmbus_chanmsg_disconnect {
+ struct vmbus_chanmsg_hdr hdr;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHOPEN */
+struct vmbus_chanmsg_chopen {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t openid;
+ uint32_t gpadl;
+ uint32_t vcpuid;
+ uint32_t txbr_pgcnt;
+#define VMBUS_CHANMSG_CHOPEN_UDATA_SIZE 120
+ uint8_t udata[VMBUS_CHANMSG_CHOPEN_UDATA_SIZE];
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHOPEN_RESP */
+struct vmbus_chanmsg_chopen_resp {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t openid;
+ uint32_t status;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_GPADL_CONN */
+struct vmbus_chanmsg_gpadl_conn {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t gpadl;
+ uint16_t range_len;
+ uint16_t range_cnt;
+ struct vmbus_gpa_range range;
+} __rte_packed;
+
+#define VMBUS_CHANMSG_GPADL_CONN_PGMAX 26
+
+/* VMBUS_CHANMSG_TYPE_GPADL_SUBCONN */
+struct vmbus_chanmsg_gpadl_subconn {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t msgno;
+ uint32_t gpadl;
+ uint64_t gpa_page[];
+} __rte_packed;
+
+#define VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX 28
+
+/* VMBUS_CHANMSG_TYPE_GPADL_CONNRESP */
+struct vmbus_chanmsg_gpadl_connresp {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t gpadl;
+ uint32_t status;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHCLOSE */
+struct vmbus_chanmsg_chclose {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_GPADL_DISCONN */
+struct vmbus_chanmsg_gpadl_disconn {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t gpadl;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHFREE */
+struct vmbus_chanmsg_chfree {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHRESCIND */
+struct vmbus_chanmsg_chrescind {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHOFFER */
+struct vmbus_chanmsg_choffer {
+ struct vmbus_chanmsg_hdr hdr;
+ rte_uuid_t chtype;
+ rte_uuid_t chinst;
+ uint64_t chlat; /* unit: 100ns */
+ uint32_t chrev;
+ uint32_t svrctx_sz;
+ uint16_t chflags;
+ uint16_t mmio_sz; /* unit: MB */
+ uint8_t udata[120];
+ uint16_t subidx;
+ uint16_t rsvd;
+ uint32_t chanid;
+ uint8_t montrig;
+ uint8_t flags1; /* VMBUS_CHOFFER_FLAG1_ */
+ uint16_t flags2;
+ uint32_t connid;
+} __rte_packed;
+
+#define VMBUS_CHOFFER_FLAG1_HASMNF 0x01
+
+#endif /* !_VMBUS_REG_H_ */
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/vmbus_bufring.c b/src/spdk/dpdk/drivers/bus/vmbus/vmbus_bufring.c
new file mode 100644
index 00000000..c8800160
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/vmbus_bufring.c
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2009-2012,2016 Microsoft Corp.
+ * Copyright (c) 2012 NetApp Inc.
+ * Copyright (c) 2012 Citrix Inc.
+ * All rights reserved.
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+#include <sys/uio.h>
+
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_bus.h>
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_pause.h>
+#include <rte_bus_vmbus.h>
+
+#include "private.h"
+
+/* Increase bufring index by inc with wraparound */
+static inline uint32_t vmbus_br_idxinc(uint32_t idx, uint32_t inc, uint32_t sz)
+{
+ idx += inc;
+ if (idx >= sz)
+ idx -= sz;
+
+ return idx;
+}
+
+void vmbus_br_setup(struct vmbus_br *br, void *buf, unsigned int blen)
+{
+ br->vbr = buf;
+ br->windex = br->vbr->windex;
+ br->dsize = blen - sizeof(struct vmbus_bufring);
+}
+
+/*
+ * When we write to the ring buffer, check if the host needs to be
+ * signaled.
+ *
+ * The contract:
+ * - The host guarantees that while it is draining the TX bufring,
+ * it will set the br_imask to indicate it does not need to be
+ * interrupted when new data are added.
+ * - The host guarantees that it will completely drain the TX bufring
+ * before exiting the read loop. Further, once the TX bufring is
+ * empty, it will clear the br_imask and re-check to see if new
+ * data have arrived.
+ */
+static inline bool
+vmbus_txbr_need_signal(const struct vmbus_br *tbr, uint32_t old_windex)
+{
+ rte_smp_mb();
+ if (tbr->vbr->imask)
+ return false;
+
+ rte_smp_rmb();
+
+ /*
+ * This is the only case we need to signal when the
+ * ring transitions from being empty to non-empty.
+ */
+ return old_windex == tbr->vbr->rindex;
+}
+
+static inline uint32_t
+vmbus_txbr_copyto(const struct vmbus_br *tbr, uint32_t windex,
+ const void *src0, uint32_t cplen)
+{
+ uint8_t *br_data = tbr->vbr->data;
+ uint32_t br_dsize = tbr->dsize;
+ const uint8_t *src = src0;
+
+ /* XXX use double mapping like Linux kernel? */
+ if (cplen > br_dsize - windex) {
+ uint32_t fraglen = br_dsize - windex;
+
+ /* Wrap-around detected */
+ memcpy(br_data + windex, src, fraglen);
+ memcpy(br_data, src + fraglen, cplen - fraglen);
+ } else {
+ memcpy(br_data + windex, src, cplen);
+ }
+
+ return vmbus_br_idxinc(windex, cplen, br_dsize);
+}
+
+/*
+ * Write scattered channel packet to TX bufring.
+ *
+ * The offset of this channel packet is written as a 64bits value
+ * immediately after this channel packet.
+ *
+ * The write goes through three stages:
+ * 1. Reserve space in ring buffer for the new data.
+ * Writer atomically moves priv_write_index.
+ * 2. Copy the new data into the ring.
+ * 3. Update the tail of the ring (visible to host) that indicates
+ * next read location. Writer updates write_index
+ */
+int
+vmbus_txbr_write(struct vmbus_br *tbr, const struct iovec iov[], int iovlen,
+ bool *need_sig)
+{
+ struct vmbus_bufring *vbr = tbr->vbr;
+ uint32_t ring_size = tbr->dsize;
+ uint32_t old_windex, next_windex, windex, total;
+ uint64_t save_windex;
+ int i;
+
+ total = 0;
+ for (i = 0; i < iovlen; i++)
+ total += iov[i].iov_len;
+ total += sizeof(save_windex);
+
+ /* Reserve space in ring */
+ do {
+ uint32_t avail;
+
+ /* Get current free location */
+ old_windex = tbr->windex;
+
+ /* Prevent compiler reordering this with calculation */
+ rte_compiler_barrier();
+
+ avail = vmbus_br_availwrite(tbr, old_windex);
+
+ /* If not enough space in ring, then tell caller. */
+ if (avail <= total)
+ return -EAGAIN;
+
+ next_windex = vmbus_br_idxinc(old_windex, total, ring_size);
+
+ /* Atomic update of next write_index for other threads */
+ } while (!rte_atomic32_cmpset(&tbr->windex, old_windex, next_windex));
+
+ /* Space from old..new is now reserved */
+ windex = old_windex;
+ for (i = 0; i < iovlen; i++) {
+ windex = vmbus_txbr_copyto(tbr, windex,
+ iov[i].iov_base, iov[i].iov_len);
+ }
+
+ /* Set the offset of the current channel packet. */
+ save_windex = ((uint64_t)old_windex) << 32;
+ windex = vmbus_txbr_copyto(tbr, windex, &save_windex,
+ sizeof(save_windex));
+
+ /* The region reserved should match region used */
+ RTE_ASSERT(windex == next_windex);
+
+ /* Ensure that data is available before updating host index */
+ rte_smp_wmb();
+
+ /* Checkin for our reservation. wait for our turn to update host */
+ while (!rte_atomic32_cmpset(&vbr->windex, old_windex, next_windex))
+ rte_pause();
+
+ /* If host had read all data before this, then need to signal */
+ *need_sig |= vmbus_txbr_need_signal(tbr, old_windex);
+ return 0;
+}
+
+static inline uint32_t
+vmbus_rxbr_copyfrom(const struct vmbus_br *rbr, uint32_t rindex,
+ void *dst0, size_t cplen)
+{
+ const uint8_t *br_data = rbr->vbr->data;
+ uint32_t br_dsize = rbr->dsize;
+ uint8_t *dst = dst0;
+
+ if (cplen > br_dsize - rindex) {
+ uint32_t fraglen = br_dsize - rindex;
+
+ /* Wrap-around detected. */
+ memcpy(dst, br_data + rindex, fraglen);
+ memcpy(dst + fraglen, br_data, cplen - fraglen);
+ } else {
+ memcpy(dst, br_data + rindex, cplen);
+ }
+
+ return vmbus_br_idxinc(rindex, cplen, br_dsize);
+}
+
+/* Copy data from receive ring but don't change index */
+int
+vmbus_rxbr_peek(const struct vmbus_br *rbr, void *data, size_t dlen)
+{
+ uint32_t avail;
+
+ /*
+ * The requested data and the 64bits channel packet
+ * offset should be there at least.
+ */
+ avail = vmbus_br_availread(rbr);
+ if (avail < dlen + sizeof(uint64_t))
+ return -EAGAIN;
+
+ vmbus_rxbr_copyfrom(rbr, rbr->vbr->rindex, data, dlen);
+ return 0;
+}
+
+/*
+ * Copy data from receive ring and change index
+ * NOTE:
+ * We assume (dlen + skip) == sizeof(channel packet).
+ */
+int
+vmbus_rxbr_read(struct vmbus_br *rbr, void *data, size_t dlen, size_t skip)
+{
+ struct vmbus_bufring *vbr = rbr->vbr;
+ uint32_t br_dsize = rbr->dsize;
+ uint32_t rindex;
+
+ if (vmbus_br_availread(rbr) < dlen + skip + sizeof(uint64_t))
+ return -EAGAIN;
+
+ /* Record where host was when we started read (for debug) */
+ rbr->windex = rbr->vbr->windex;
+
+ /*
+ * Copy channel packet from RX bufring.
+ */
+ rindex = vmbus_br_idxinc(rbr->vbr->rindex, skip, br_dsize);
+ rindex = vmbus_rxbr_copyfrom(rbr, rindex, data, dlen);
+
+ /*
+ * Discard this channel packet's 64bits offset, which is useless to us.
+ */
+ rindex = vmbus_br_idxinc(rindex, sizeof(uint64_t), br_dsize);
+
+ /* Update the read index _after_ the channel packet is fetched. */
+ rte_compiler_barrier();
+
+ vbr->rindex = rindex;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/vmbus_channel.c b/src/spdk/dpdk/drivers/bus/vmbus/vmbus_channel.c
new file mode 100644
index 00000000..cc5f3e83
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/vmbus_channel.c
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/uio.h>
+
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_bus.h>
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_bus_vmbus.h>
+
+#include "private.h"
+
+static inline void
+vmbus_sync_set_bit(volatile uint32_t *addr, uint32_t mask)
+{
+ /* Use GCC builtin which atomic does atomic OR operation */
+ __sync_or_and_fetch(addr, mask);
+}
+
+static inline void
+vmbus_send_interrupt(const struct rte_vmbus_device *dev, uint32_t relid)
+{
+ uint32_t *int_addr;
+ uint32_t int_mask;
+
+ int_addr = dev->int_page + relid / 32;
+ int_mask = 1u << (relid % 32);
+
+ vmbus_sync_set_bit(int_addr, int_mask);
+}
+
+static inline void
+vmbus_set_monitor(const struct rte_vmbus_device *dev, uint32_t monitor_id)
+{
+ uint32_t *monitor_addr, monitor_mask;
+ unsigned int trigger_index;
+
+ trigger_index = monitor_id / HV_MON_TRIG_LEN;
+ monitor_mask = 1u << (monitor_id % HV_MON_TRIG_LEN);
+
+ monitor_addr = &dev->monitor_page->trigs[trigger_index].pending;
+ vmbus_sync_set_bit(monitor_addr, monitor_mask);
+}
+
+static void
+vmbus_set_event(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan)
+{
+ vmbus_send_interrupt(dev, chan->relid);
+ vmbus_set_monitor(dev, chan->monitor_id);
+}
+
+/*
+ * Notify host that there are data pending on our TX bufring.
+ *
+ * Since this in userspace, rely on the monitor page.
+ * Can't do a hypercall from userspace.
+ */
+void
+rte_vmbus_chan_signal_tx(const struct vmbus_channel *chan)
+{
+ const struct rte_vmbus_device *dev = chan->device;
+ const struct vmbus_br *tbr = &chan->txbr;
+
+ /* Make sure all updates are done before signaling host */
+ rte_smp_wmb();
+
+ /* If host is ignoring interrupts? */
+ if (tbr->vbr->imask)
+ return;
+
+ vmbus_set_event(dev, chan);
+}
+
+
+/* Do a simple send directly using transmit ring. */
+int rte_vmbus_chan_send(struct vmbus_channel *chan, uint16_t type,
+ void *data, uint32_t dlen,
+ uint64_t xactid, uint32_t flags, bool *need_sig)
+{
+ struct vmbus_chanpkt pkt;
+ unsigned int pktlen, pad_pktlen;
+ const uint32_t hlen = sizeof(pkt);
+ bool send_evt = false;
+ uint64_t pad = 0;
+ struct iovec iov[3];
+ int error;
+
+ pktlen = hlen + dlen;
+ pad_pktlen = RTE_ALIGN(pktlen, sizeof(uint64_t));
+
+ pkt.hdr.type = type;
+ pkt.hdr.flags = flags;
+ pkt.hdr.hlen = hlen >> VMBUS_CHANPKT_SIZE_SHIFT;
+ pkt.hdr.tlen = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT;
+ pkt.hdr.xactid = xactid;
+
+ iov[0].iov_base = &pkt;
+ iov[0].iov_len = hlen;
+ iov[1].iov_base = data;
+ iov[1].iov_len = dlen;
+ iov[2].iov_base = &pad;
+ iov[2].iov_len = pad_pktlen - pktlen;
+
+ error = vmbus_txbr_write(&chan->txbr, iov, 3, &send_evt);
+
+ /*
+ * caller sets need_sig to non-NULL if it will handle
+ * signaling if required later.
+ * if need_sig is NULL, signal now if needed.
+ */
+ if (need_sig)
+ *need_sig |= send_evt;
+ else if (error == 0 && send_evt)
+ rte_vmbus_chan_signal_tx(chan);
+ return error;
+}
+
+/* Do a scatter/gather send where the descriptor points to data. */
+int rte_vmbus_chan_send_sglist(struct vmbus_channel *chan,
+ struct vmbus_gpa sg[], uint32_t sglen,
+ void *data, uint32_t dlen,
+ uint64_t xactid, bool *need_sig)
+{
+ struct vmbus_chanpkt_sglist pkt;
+ unsigned int pktlen, pad_pktlen, hlen;
+ bool send_evt = false;
+ struct iovec iov[4];
+ uint64_t pad = 0;
+ int error;
+
+ hlen = offsetof(struct vmbus_chanpkt_sglist, gpa[sglen]);
+ pktlen = hlen + dlen;
+ pad_pktlen = RTE_ALIGN(pktlen, sizeof(uint64_t));
+
+ pkt.hdr.type = VMBUS_CHANPKT_TYPE_GPA;
+ pkt.hdr.flags = VMBUS_CHANPKT_FLAG_RC;
+ pkt.hdr.hlen = hlen >> VMBUS_CHANPKT_SIZE_SHIFT;
+ pkt.hdr.tlen = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT;
+ pkt.hdr.xactid = xactid;
+ pkt.rsvd = 0;
+ pkt.gpa_cnt = sglen;
+
+ iov[0].iov_base = &pkt;
+ iov[0].iov_len = sizeof(pkt);
+ iov[1].iov_base = sg;
+ iov[1].iov_len = sizeof(struct vmbus_gpa) * sglen;
+ iov[2].iov_base = data;
+ iov[2].iov_len = dlen;
+ iov[3].iov_base = &pad;
+ iov[3].iov_len = pad_pktlen - pktlen;
+
+ error = vmbus_txbr_write(&chan->txbr, iov, 4, &send_evt);
+
+ /* if caller is batching, just propagate the status */
+ if (need_sig)
+ *need_sig |= send_evt;
+ else if (error == 0 && send_evt)
+ rte_vmbus_chan_signal_tx(chan);
+ return error;
+}
+
+bool rte_vmbus_chan_rx_empty(const struct vmbus_channel *channel)
+{
+ const struct vmbus_br *br = &channel->rxbr;
+
+ return br->vbr->rindex == br->vbr->windex;
+}
+
+/* Signal host after reading N bytes */
+void rte_vmbus_chan_signal_read(struct vmbus_channel *chan, uint32_t bytes_read)
+{
+ struct vmbus_br *rbr = &chan->rxbr;
+ uint32_t write_sz, pending_sz;
+
+ /* No need for signaling on older versions */
+ if (!rbr->vbr->feature_bits.feat_pending_send_sz)
+ return;
+
+ /* Make sure reading of pending happens after new read index */
+ rte_mb();
+
+ pending_sz = rbr->vbr->pending_send;
+ if (!pending_sz)
+ return;
+
+ rte_smp_rmb();
+ write_sz = vmbus_br_availwrite(rbr, rbr->vbr->windex);
+
+ /* If there was space before then host was not blocked */
+ if (write_sz - bytes_read > pending_sz)
+ return;
+
+ /* If pending write will not fit */
+ if (write_sz <= pending_sz)
+ return;
+
+ vmbus_set_event(chan->device, chan);
+}
+
+int rte_vmbus_chan_recv(struct vmbus_channel *chan, void *data, uint32_t *len,
+ uint64_t *request_id)
+{
+ struct vmbus_chanpkt_hdr pkt;
+ uint32_t dlen, hlen, bufferlen = *len;
+ int error;
+
+ *len = 0;
+
+ error = vmbus_rxbr_peek(&chan->rxbr, &pkt, sizeof(pkt));
+ if (error)
+ return error;
+
+ if (unlikely(pkt.hlen < VMBUS_CHANPKT_HLEN_MIN)) {
+ VMBUS_LOG(ERR, "VMBUS recv, invalid hlen %u", pkt.hlen);
+ /* XXX this channel is dead actually. */
+ return -EIO;
+ }
+
+ if (unlikely(pkt.hlen > pkt.tlen)) {
+ VMBUS_LOG(ERR, "VMBUS recv,invalid hlen %u and tlen %u",
+ pkt.hlen, pkt.tlen);
+ return -EIO;
+ }
+
+ /* Length are in quad words */
+ hlen = pkt.hlen << VMBUS_CHANPKT_SIZE_SHIFT;
+ dlen = (pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT) - hlen;
+ *len = dlen;
+
+ /* If caller buffer is not large enough */
+ if (unlikely(dlen > bufferlen))
+ return -ENOBUFS;
+
+ if (request_id)
+ *request_id = pkt.xactid;
+
+ /* Read data and skip packet header */
+ error = vmbus_rxbr_read(&chan->rxbr, data, dlen, hlen);
+ if (error)
+ return error;
+
+ rte_vmbus_chan_signal_read(chan, dlen + hlen + sizeof(uint64_t));
+ return 0;
+}
+
+/* TODO: replace this with inplace ring buffer (no copy) */
+int rte_vmbus_chan_recv_raw(struct vmbus_channel *chan,
+ void *data, uint32_t *len)
+{
+ struct vmbus_chanpkt_hdr pkt;
+ uint32_t dlen, bufferlen = *len;
+ int error;
+
+ error = vmbus_rxbr_peek(&chan->rxbr, &pkt, sizeof(pkt));
+ if (error)
+ return error;
+
+ if (unlikely(pkt.hlen < VMBUS_CHANPKT_HLEN_MIN)) {
+ VMBUS_LOG(ERR, "VMBUS recv, invalid hlen %u", pkt.hlen);
+ /* XXX this channel is dead actually. */
+ return -EIO;
+ }
+
+ if (unlikely(pkt.hlen > pkt.tlen)) {
+ VMBUS_LOG(ERR, "VMBUS recv,invalid hlen %u and tlen %u",
+ pkt.hlen, pkt.tlen);
+ return -EIO;
+ }
+
+ /* Length are in quad words */
+ dlen = pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT;
+ *len = dlen;
+
+ /* If caller buffer is not large enough */
+ if (unlikely(dlen > bufferlen))
+ return -ENOBUFS;
+
+ /* Read data and skip packet header */
+ error = vmbus_rxbr_read(&chan->rxbr, data, dlen, 0);
+ if (error)
+ return error;
+
+ /* Return the number of bytes read */
+ return dlen + sizeof(uint64_t);
+}
+
+int vmbus_chan_create(const struct rte_vmbus_device *device,
+ uint16_t relid, uint16_t subid, uint8_t monitor_id,
+ struct vmbus_channel **new_chan)
+{
+ struct vmbus_channel *chan;
+ int err;
+
+ chan = rte_zmalloc_socket("VMBUS", sizeof(*chan), RTE_CACHE_LINE_SIZE,
+ device->device.numa_node);
+ if (!chan)
+ return -ENOMEM;
+
+ STAILQ_INIT(&chan->subchannel_list);
+ chan->device = device;
+ chan->subchannel_id = subid;
+ chan->relid = relid;
+ chan->monitor_id = monitor_id;
+ *new_chan = chan;
+
+ err = vmbus_uio_map_rings(chan);
+ if (err) {
+ rte_free(chan);
+ return err;
+ }
+
+ return 0;
+}
+
+/* Setup the primary channel */
+int rte_vmbus_chan_open(struct rte_vmbus_device *device,
+ struct vmbus_channel **new_chan)
+{
+ int err;
+
+ err = vmbus_chan_create(device, device->relid, 0,
+ device->monitor_id, new_chan);
+ if (!err)
+ device->primary = *new_chan;
+
+ return err;
+}
+
+int rte_vmbus_max_channels(const struct rte_vmbus_device *device)
+{
+ if (vmbus_uio_subchannels_supported(device, device->primary))
+ return VMBUS_MAX_CHANNELS;
+ else
+ return 1;
+}
+
+/* Setup secondary channel */
+int rte_vmbus_subchan_open(struct vmbus_channel *primary,
+ struct vmbus_channel **new_chan)
+{
+ struct vmbus_channel *chan;
+ int err;
+
+ err = vmbus_uio_get_subchan(primary, &chan);
+ if (err)
+ return err;
+
+ STAILQ_INSERT_TAIL(&primary->subchannel_list, chan, next);
+ *new_chan = chan;
+ return 0;
+}
+
+uint16_t rte_vmbus_sub_channel_index(const struct vmbus_channel *chan)
+{
+ return chan->subchannel_id;
+}
+
+void rte_vmbus_chan_close(struct vmbus_channel *chan)
+{
+ const struct rte_vmbus_device *device = chan->device;
+ struct vmbus_channel *primary = device->primary;
+
+ if (chan != primary)
+ STAILQ_REMOVE(&primary->subchannel_list, chan,
+ vmbus_channel, next);
+
+ rte_free(chan);
+}
+
+static void vmbus_dump_ring(FILE *f, const char *id, const struct vmbus_br *br)
+{
+ const struct vmbus_bufring *vbr = br->vbr;
+ struct vmbus_chanpkt_hdr pkt;
+
+ fprintf(f, "%s windex=%u rindex=%u mask=%u pending=%u feature=%#x\n",
+ id, vbr->windex, vbr->rindex, vbr->imask,
+ vbr->pending_send, vbr->feature_bits.value);
+ fprintf(f, " size=%u avail write=%u read=%u\n",
+ br->dsize, vmbus_br_availwrite(br, vbr->windex),
+ vmbus_br_availread(br));
+
+ if (vmbus_rxbr_peek(br, &pkt, sizeof(pkt)) == 0)
+ fprintf(f, " pkt type %#x len %u flags %#x xactid %#"PRIx64"\n",
+ pkt.type,
+ pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT,
+ pkt.flags, pkt.xactid);
+}
+
+void rte_vmbus_chan_dump(FILE *f, const struct vmbus_channel *chan)
+{
+ fprintf(f, "channel[%u] relid=%u monitor=%u\n",
+ chan->subchannel_id, chan->relid, chan->monitor_id);
+ vmbus_dump_ring(f, "rxbr", &chan->rxbr);
+ vmbus_dump_ring(f, "txbr", &chan->txbr);
+}
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/vmbus_common.c b/src/spdk/dpdk/drivers/bus/vmbus/vmbus_common.c
new file mode 100644
index 00000000..c7165ad5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/vmbus_common.c
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_devargs.h>
+#include <rte_malloc.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_bus_vmbus.h>
+
+#include "private.h"
+
+int vmbus_logtype_bus;
+extern struct rte_vmbus_bus rte_vmbus_bus;
+
+/* map a particular resource from a file */
+void *
+vmbus_map_resource(void *requested_addr, int fd, off_t offset, size_t size,
+ int flags)
+{
+ void *mapaddr;
+
+ /* Map the memory resource of device */
+ mapaddr = mmap(requested_addr, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | flags, fd, offset);
+ if (mapaddr == MAP_FAILED) {
+ VMBUS_LOG(ERR,
+ "mmap(%d, %p, %zu, %ld) failed: %s",
+ fd, requested_addr, size, (long)offset,
+ strerror(errno));
+ }
+ return mapaddr;
+}
+
+/* unmap a particular resource */
+void
+vmbus_unmap_resource(void *requested_addr, size_t size)
+{
+ if (requested_addr == NULL)
+ return;
+
+ /* Unmap the VMBUS memory resource of device */
+ if (munmap(requested_addr, size)) {
+ VMBUS_LOG(ERR, "munmap(%p, 0x%lx) failed: %s",
+ requested_addr, (unsigned long)size,
+ strerror(errno));
+ } else
+ VMBUS_LOG(DEBUG, " VMBUS memory unmapped at %p",
+ requested_addr);
+}
+
+/**
+ * Match the VMBUS driver and device using UUID table
+ *
+ * @param drv
+ * VMBUS driver from which ID table would be extracted
+ * @param pci_dev
+ * VMBUS device to match against the driver
+ * @return
+ * true for successful match
+ * false for unsuccessful match
+ */
+static bool
+vmbus_match(const struct rte_vmbus_driver *dr,
+ const struct rte_vmbus_device *dev)
+{
+ const rte_uuid_t *id_table;
+
+ for (id_table = dr->id_table; !rte_uuid_is_null(*id_table); ++id_table) {
+ if (rte_uuid_compare(*id_table, dev->class_id) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * If device ID match, call the devinit() function of the driver.
+ */
+static int
+vmbus_probe_one_driver(struct rte_vmbus_driver *dr,
+ struct rte_vmbus_device *dev)
+{
+ char guid[RTE_UUID_STRLEN];
+ int ret;
+
+ if (!vmbus_match(dr, dev))
+ return 1; /* not supported */
+
+ rte_uuid_unparse(dev->device_id, guid, sizeof(guid));
+ VMBUS_LOG(INFO, "VMBUS device %s on NUMA socket %i",
+ guid, dev->device.numa_node);
+
+ /* TODO add blacklisted */
+
+ /* map resources for device */
+ ret = rte_vmbus_map_device(dev);
+ if (ret != 0)
+ return ret;
+
+ /* reference driver structure */
+ dev->driver = dr;
+ dev->device.driver = &dr->driver;
+
+ if (dev->device.numa_node < 0) {
+ VMBUS_LOG(WARNING, " Invalid NUMA socket, default to 0");
+ dev->device.numa_node = 0;
+ }
+
+ /* call the driver probe() function */
+ VMBUS_LOG(INFO, " probe driver: %s", dr->driver.name);
+ ret = dr->probe(dr, dev);
+ if (ret) {
+ dev->driver = NULL;
+ rte_vmbus_unmap_device(dev);
+ }
+
+ return ret;
+}
+
+/*
+ * IF device class GUID mathces, call the probe function of
+ * registere drivers for the vmbus device.
+ * Return -1 if initialization failed,
+ * and 1 if no driver found for this device.
+ */
+static int
+vmbus_probe_all_drivers(struct rte_vmbus_device *dev)
+{
+ struct rte_vmbus_driver *dr;
+ int rc;
+
+ /* Check if a driver is already loaded */
+ if (dev->driver != NULL) {
+ VMBUS_LOG(DEBUG, "VMBUS driver already loaded");
+ return 0;
+ }
+
+ FOREACH_DRIVER_ON_VMBUS(dr) {
+ rc = vmbus_probe_one_driver(dr, dev);
+ if (rc < 0) /* negative is an error */
+ return -1;
+
+ if (rc > 0) /* positive driver doesn't support it */
+ continue;
+
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Scan the vmbus, and call the devinit() function for
+ * all registered drivers that have a matching entry in its id_table
+ * for discovered devices.
+ */
+int
+rte_vmbus_probe(void)
+{
+ struct rte_vmbus_device *dev;
+ size_t probed = 0, failed = 0;
+ char ubuf[RTE_UUID_STRLEN];
+
+ FOREACH_DEVICE_ON_VMBUS(dev) {
+ probed++;
+
+ rte_uuid_unparse(dev->device_id, ubuf, sizeof(ubuf));
+
+ /* TODO: add whitelist/blacklist */
+
+ if (vmbus_probe_all_drivers(dev) < 0) {
+ VMBUS_LOG(NOTICE,
+ "Requested device %s cannot be used", ubuf);
+ rte_errno = errno;
+ failed++;
+ }
+ }
+
+ return (probed && probed == failed) ? -1 : 0;
+}
+
+static int
+vmbus_parse(const char *name, void *addr)
+{
+ rte_uuid_t guid;
+ int ret;
+
+ ret = rte_uuid_parse(name, guid);
+ if (ret == 0 && addr)
+ memcpy(addr, &guid, sizeof(guid));
+
+ return ret;
+}
+
+/* register vmbus driver */
+void
+rte_vmbus_register(struct rte_vmbus_driver *driver)
+{
+ VMBUS_LOG(DEBUG,
+ "Registered driver %s", driver->driver.name);
+
+ TAILQ_INSERT_TAIL(&rte_vmbus_bus.driver_list, driver, next);
+ driver->bus = &rte_vmbus_bus;
+}
+
+/* unregister vmbus driver */
+void
+rte_vmbus_unregister(struct rte_vmbus_driver *driver)
+{
+ TAILQ_REMOVE(&rte_vmbus_bus.driver_list, driver, next);
+ driver->bus = NULL;
+}
+
+/* Add a device to VMBUS bus */
+void
+vmbus_add_device(struct rte_vmbus_device *vmbus_dev)
+{
+ TAILQ_INSERT_TAIL(&rte_vmbus_bus.device_list, vmbus_dev, next);
+}
+
+/* Insert a device into a predefined position in VMBUS bus */
+void
+vmbus_insert_device(struct rte_vmbus_device *exist_vmbus_dev,
+ struct rte_vmbus_device *new_vmbus_dev)
+{
+ TAILQ_INSERT_BEFORE(exist_vmbus_dev, new_vmbus_dev, next);
+}
+
+/* Remove a device from VMBUS bus */
+void
+vmbus_remove_device(struct rte_vmbus_device *vmbus_dev)
+{
+ TAILQ_REMOVE(&rte_vmbus_bus.device_list, vmbus_dev, next);
+}
+
+/* VMBUS doesn't support hotplug */
+static struct rte_device *
+vmbus_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ struct rte_vmbus_device *dev;
+
+ FOREACH_DEVICE_ON_VMBUS(dev) {
+ if (start && &dev->device == start) {
+ start = NULL;
+ continue;
+ }
+ if (cmp(&dev->device, data) == 0)
+ return &dev->device;
+ }
+
+ return NULL;
+}
+
+
+struct rte_vmbus_bus rte_vmbus_bus = {
+ .bus = {
+ .scan = rte_vmbus_scan,
+ .probe = rte_vmbus_probe,
+ .find_device = vmbus_find_device,
+ .parse = vmbus_parse,
+ },
+ .device_list = TAILQ_HEAD_INITIALIZER(rte_vmbus_bus.device_list),
+ .driver_list = TAILQ_HEAD_INITIALIZER(rte_vmbus_bus.driver_list),
+};
+
+RTE_REGISTER_BUS(vmbus, rte_vmbus_bus.bus);
+
+RTE_INIT(vmbus_init_log)
+{
+ vmbus_logtype_bus = rte_log_register("bus.vmbus");
+ if (vmbus_logtype_bus >= 0)
+ rte_log_set_level(vmbus_logtype_bus, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/bus/vmbus/vmbus_common_uio.c b/src/spdk/dpdk/drivers/bus/vmbus/vmbus_common_uio.c
new file mode 100644
index 00000000..5ddd36ab
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/vmbus/vmbus_common_uio.c
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_bus.h>
+#include <rte_bus_vmbus.h>
+
+#include "private.h"
+
+static struct rte_tailq_elem vmbus_tailq = {
+ .name = "VMBUS_RESOURCE_LIST",
+};
+EAL_REGISTER_TAILQ(vmbus_tailq)
+
+static int
+vmbus_uio_map_secondary(struct rte_vmbus_device *dev)
+{
+ int fd, i;
+ struct mapped_vmbus_resource *uio_res;
+ struct mapped_vmbus_res_list *uio_res_list
+ = RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
+
+ TAILQ_FOREACH(uio_res, uio_res_list, next) {
+
+ /* skip this element if it doesn't match our UUID */
+ if (rte_uuid_compare(uio_res->id, dev->device_id) != 0)
+ continue;
+
+ /* open /dev/uioX */
+ fd = open(uio_res->path, O_RDWR);
+ if (fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ uio_res->path, strerror(errno));
+ return -1;
+ }
+
+ for (i = 0; i != uio_res->nb_maps; i++) {
+ void *mapaddr;
+
+ mapaddr = vmbus_map_resource(uio_res->maps[i].addr,
+ fd, 0,
+ uio_res->maps[i].size, 0);
+
+ if (mapaddr == uio_res->maps[i].addr)
+ continue;
+
+ VMBUS_LOG(ERR,
+ "Cannot mmap device resource file %s to address: %p",
+ uio_res->path, uio_res->maps[i].addr);
+
+ if (mapaddr != MAP_FAILED)
+ /* unmap addr wrongly mapped */
+ vmbus_unmap_resource(mapaddr,
+ (size_t)uio_res->maps[i].size);
+
+ /* unmap addrs correctly mapped */
+ while (--i >= 0)
+ vmbus_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+
+ close(fd);
+ return -1;
+ }
+
+ /* fd is not needed in slave process, close it */
+ close(fd);
+ return 0;
+ }
+
+ VMBUS_LOG(ERR, "Cannot find resource for device");
+ return 1;
+}
+
+static int
+vmbus_uio_map_primary(struct rte_vmbus_device *dev)
+{
+ int i, ret;
+ struct mapped_vmbus_resource *uio_res = NULL;
+ struct mapped_vmbus_res_list *uio_res_list =
+ RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
+
+ /* allocate uio resource */
+ ret = vmbus_uio_alloc_resource(dev, &uio_res);
+ if (ret)
+ return ret;
+
+ /* Map the resources */
+ for (i = 0; i < VMBUS_MAX_RESOURCE; i++) {
+ /* skip empty BAR */
+ if (dev->resource[i].len == 0)
+ continue;
+
+ ret = vmbus_uio_map_resource_by_index(dev, i, uio_res, 0);
+ if (ret)
+ goto error;
+ }
+
+ uio_res->nb_maps = i;
+
+ TAILQ_INSERT_TAIL(uio_res_list, uio_res, next);
+
+ return 0;
+error:
+ while (--i >= 0) {
+ vmbus_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+ }
+ vmbus_uio_free_resource(dev, uio_res);
+ return -1;
+}
+
+
+struct mapped_vmbus_resource *
+vmbus_uio_find_resource(const struct rte_vmbus_device *dev)
+{
+ struct mapped_vmbus_resource *uio_res;
+ struct mapped_vmbus_res_list *uio_res_list =
+ RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
+
+ if (dev == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(uio_res, uio_res_list, next) {
+ /* skip this element if it doesn't match our VMBUS address */
+ if (rte_uuid_compare(uio_res->id, dev->device_id) == 0)
+ return uio_res;
+ }
+ return NULL;
+}
+
+/* map the VMBUS resource of a VMBUS device in virtual memory */
+int
+vmbus_uio_map_resource(struct rte_vmbus_device *dev)
+{
+ struct mapped_vmbus_resource *uio_res;
+ int ret;
+
+ /* TODO: handle rescind */
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.uio_cfg_fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+
+ /* secondary processes - use already recorded details */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ ret = vmbus_uio_map_secondary(dev);
+ else
+ ret = vmbus_uio_map_primary(dev);
+
+ if (ret != 0)
+ return ret;
+
+ uio_res = vmbus_uio_find_resource(dev);
+ if (!uio_res) {
+ VMBUS_LOG(ERR, "can not find resources!");
+ return -EIO;
+ }
+
+ if (uio_res->nb_maps <= HV_MON_PAGE_MAP) {
+ VMBUS_LOG(ERR, "VMBUS: only %u resources found!",
+ uio_res->nb_maps);
+ return -EINVAL;
+ }
+
+ dev->int_page = (uint32_t *)((char *)uio_res->maps[HV_INT_PAGE_MAP].addr
+ + (PAGE_SIZE >> 1));
+ dev->monitor_page = uio_res->maps[HV_MON_PAGE_MAP].addr;
+ return 0;
+}
+
+static void
+vmbus_uio_unmap(struct mapped_vmbus_resource *uio_res)
+{
+ int i;
+
+ if (uio_res == NULL)
+ return;
+
+ for (i = 0; i != uio_res->nb_maps; i++) {
+ vmbus_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+ }
+}
+
+/* unmap the VMBUS resource of a VMBUS device in virtual memory */
+void
+vmbus_uio_unmap_resource(struct rte_vmbus_device *dev)
+{
+ struct mapped_vmbus_resource *uio_res;
+ struct mapped_vmbus_res_list *uio_res_list =
+ RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
+
+ if (dev == NULL)
+ return;
+
+ /* find an entry for the device */
+ uio_res = vmbus_uio_find_resource(dev);
+ if (uio_res == NULL)
+ return;
+
+ /* secondary processes - just free maps */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return vmbus_uio_unmap(uio_res);
+
+ TAILQ_REMOVE(uio_res_list, uio_res, next);
+
+ /* unmap all resources */
+ vmbus_uio_unmap(uio_res);
+
+ /* free uio resource */
+ rte_free(uio_res);
+
+ /* close fd if in primary process */
+ close(dev->intr_handle.fd);
+ if (dev->intr_handle.uio_cfg_fd >= 0) {
+ close(dev->intr_handle.uio_cfg_fd);
+ dev->intr_handle.uio_cfg_fd = -1;
+ }
+
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+}