summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/bus/dpaa
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/drivers/bus/dpaa
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/bus/dpaa')
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/Makefile48
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman.c583
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c611
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/fman/netcfg_layer.c160
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.c361
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.h541
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_driver.c295
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_priv.h92
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_alloc.c71
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.c103
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.h29
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/process.c298
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.c2852
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.h913
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_driver.c353
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_priv.h287
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/dpaa_bus.c758
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_bits.h39
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_rbtree.h117
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fman.h426
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fsl_bman.h348
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman.h181
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman_crc64.h230
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fsl_qman.h2139
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/fsl_usd.h94
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/netcfg.h65
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/include/process.h77
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/meson.build29
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/rte_bus_dpaa_version.map93
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_bus.h224
-rw-r--r--src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_logs.h40
31 files changed, 12457 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/Makefile b/src/spdk/dpdk/drivers/bus/dpaa/Makefile
new file mode 100644
index 000000000..ba40b2116
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/Makefile
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+RTE_BUS_DPAA=$(RTE_SDK)/drivers/bus/dpaa
+
+#
+# library name
+#
+LIB = librte_bus_dpaa.a
+
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -Wno-pointer-arith
+CFLAGS += -Wno-cast-qual
+CFLAGS += -I$(RTE_BUS_DPAA)/
+CFLAGS += -I$(RTE_BUS_DPAA)/include
+CFLAGS += -I$(RTE_BUS_DPAA)/base/qbman
+CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/include
+
+# versioning export map
+EXPORT_MAP := rte_bus_dpaa_version.map
+
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
+ dpaa_bus.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
+ base/fman/fman.c \
+ base/fman/fman_hw.c \
+ base/fman/netcfg_layer.c \
+ base/qbman/process.c \
+ base/qbman/bman.c \
+ base/qbman/bman_driver.c \
+ base/qbman/qman.c \
+ base/qbman/qman_driver.c \
+ base/qbman/dpaa_alloc.c \
+ base/qbman/dpaa_sys.c
+
+# Link Pthread
+LDLIBS += -lpthread
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev
+LDLIBS += -lrte_common_dpaax
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman.c b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman.c
new file mode 100644
index 000000000..6d77a7e39
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman.c
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2019 NXP
+ *
+ */
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <ifaddrs.h>
+
+/* This header declares the driver interface we implement */
+#include <fman.h>
+#include <dpaa_of.h>
+#include <rte_malloc.h>
+#include <rte_dpaa_logs.h>
+#include <rte_string_fns.h>
+
+#define QMI_PORT_REGS_OFFSET 0x400
+
+/* CCSR map address to access ccsr based register */
+void *fman_ccsr_map;
+/* fman version info */
+u16 fman_ip_rev;
+static int get_once;
+u32 fman_dealloc_bufs_mask_hi;
+u32 fman_dealloc_bufs_mask_lo;
+
+int fman_ccsr_map_fd = -1;
+static COMPAT_LIST_HEAD(__ifs);
+
+/* This is the (const) global variable that callers have read-only access to.
+ * Internally, we have read-write access directly to __ifs.
+ */
+const struct list_head *fman_if_list = &__ifs;
+
+static void
+if_destructor(struct __fman_if *__if)
+{
+ struct fman_if_bpool *bp, *tmpbp;
+
+ if (!__if)
+ return;
+
+ if (__if->__if.mac_type == fman_offline)
+ goto cleanup;
+
+ list_for_each_entry_safe(bp, tmpbp, &__if->__if.bpool_list, node) {
+ list_del(&bp->node);
+ free(bp);
+ }
+cleanup:
+ free(__if);
+}
+
+static int
+fman_get_ip_rev(const struct device_node *fman_node)
+{
+ const uint32_t *fman_addr;
+ uint64_t phys_addr;
+ uint64_t regs_size;
+ uint32_t ip_rev_1;
+ int _errno;
+
+ fman_addr = of_get_address(fman_node, 0, &regs_size, NULL);
+ if (!fman_addr) {
+ pr_err("of_get_address cannot return fman address\n");
+ return -EINVAL;
+ }
+ phys_addr = of_translate_address(fman_node, fman_addr);
+ if (!phys_addr) {
+ pr_err("of_translate_address failed\n");
+ return -EINVAL;
+ }
+ fman_ccsr_map = mmap(NULL, regs_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fman_ccsr_map_fd, phys_addr);
+ if (fman_ccsr_map == MAP_FAILED) {
+ pr_err("Can not map FMan ccsr base");
+ return -EINVAL;
+ }
+
+ ip_rev_1 = in_be32(fman_ccsr_map + FMAN_IP_REV_1);
+ fman_ip_rev = (ip_rev_1 & FMAN_IP_REV_1_MAJOR_MASK) >>
+ FMAN_IP_REV_1_MAJOR_SHIFT;
+
+ _errno = munmap(fman_ccsr_map, regs_size);
+ if (_errno)
+ pr_err("munmap() of FMan ccsr failed");
+
+ return 0;
+}
+
+static int
+fman_get_mac_index(uint64_t regs_addr_host, uint8_t *mac_idx)
+{
+ int ret = 0;
+
+ /*
+ * MAC1 : E_0000h
+ * MAC2 : E_2000h
+ * MAC3 : E_4000h
+ * MAC4 : E_6000h
+ * MAC5 : E_8000h
+ * MAC6 : E_A000h
+ * MAC7 : E_C000h
+ * MAC8 : E_E000h
+ * MAC9 : F_0000h
+ * MAC10: F_2000h
+ */
+ switch (regs_addr_host) {
+ case 0xE0000:
+ *mac_idx = 1;
+ break;
+ case 0xE2000:
+ *mac_idx = 2;
+ break;
+ case 0xE4000:
+ *mac_idx = 3;
+ break;
+ case 0xE6000:
+ *mac_idx = 4;
+ break;
+ case 0xE8000:
+ *mac_idx = 5;
+ break;
+ case 0xEA000:
+ *mac_idx = 6;
+ break;
+ case 0xEC000:
+ *mac_idx = 7;
+ break;
+ case 0xEE000:
+ *mac_idx = 8;
+ break;
+ case 0xF0000:
+ *mac_idx = 9;
+ break;
+ case 0xF2000:
+ *mac_idx = 10;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int
+fman_if_init(const struct device_node *dpa_node)
+{
+ const char *rprop, *mprop;
+ uint64_t phys_addr;
+ struct __fman_if *__if;
+ struct fman_if_bpool *bpool;
+
+ const phandle *mac_phandle, *ports_phandle, *pools_phandle;
+ const phandle *tx_channel_id = NULL, *mac_addr, *cell_idx;
+ const phandle *rx_phandle, *tx_phandle;
+ uint64_t tx_phandle_host[4] = {0};
+ uint64_t rx_phandle_host[4] = {0};
+ uint64_t regs_addr_host = 0;
+ uint64_t cell_idx_host = 0;
+
+ const struct device_node *mac_node = NULL, *tx_node;
+ const struct device_node *pool_node, *fman_node, *rx_node;
+ const uint32_t *regs_addr = NULL;
+ const char *mname, *fname;
+ const char *dname = dpa_node->full_name;
+ size_t lenp;
+ int _errno;
+ const char *char_prop;
+ uint32_t na;
+
+ if (of_device_is_available(dpa_node) == false)
+ return 0;
+
+ rprop = "fsl,qman-frame-queues-rx";
+ mprop = "fsl,fman-mac";
+
+ /* Allocate an object for this network interface */
+ __if = rte_malloc(NULL, sizeof(*__if), RTE_CACHE_LINE_SIZE);
+ if (!__if) {
+ FMAN_ERR(-ENOMEM, "malloc(%zu)\n", sizeof(*__if));
+ goto err;
+ }
+ memset(__if, 0, sizeof(*__if));
+ INIT_LIST_HEAD(&__if->__if.bpool_list);
+ strlcpy(__if->node_path, dpa_node->full_name, PATH_MAX - 1);
+ __if->node_path[PATH_MAX - 1] = '\0';
+
+ /* Obtain the MAC node used by this interface except macless */
+ mac_phandle = of_get_property(dpa_node, mprop, &lenp);
+ if (!mac_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no %s\n", dname, mprop);
+ goto err;
+ }
+ assert(lenp == sizeof(phandle));
+ mac_node = of_find_node_by_phandle(*mac_phandle);
+ if (!mac_node) {
+ FMAN_ERR(-ENXIO, "%s: bad 'fsl,fman-mac\n", dname);
+ goto err;
+ }
+ mname = mac_node->full_name;
+
+ /* Map the CCSR regs for the MAC node */
+ regs_addr = of_get_address(mac_node, 0, &__if->regs_size, NULL);
+ if (!regs_addr) {
+ FMAN_ERR(-EINVAL, "of_get_address(%s)\n", mname);
+ goto err;
+ }
+ phys_addr = of_translate_address(mac_node, regs_addr);
+ if (!phys_addr) {
+ FMAN_ERR(-EINVAL, "of_translate_address(%s, %p)\n",
+ mname, regs_addr);
+ goto err;
+ }
+ __if->ccsr_map = mmap(NULL, __if->regs_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ fman_ccsr_map_fd, phys_addr);
+ if (__if->ccsr_map == MAP_FAILED) {
+ FMAN_ERR(-errno, "mmap(0x%"PRIx64")\n", phys_addr);
+ goto err;
+ }
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues). Convert to host byte order */
+ regs_addr_host = of_read_number(regs_addr, na);
+
+
+ /* Get the index of the Fman this i/f belongs to */
+ fman_node = of_get_parent(mac_node);
+ na = of_n_addr_cells(mac_node);
+ if (!fman_node) {
+ FMAN_ERR(-ENXIO, "of_get_parent(%s)\n", mname);
+ goto err;
+ }
+ fname = fman_node->full_name;
+ cell_idx = of_get_property(fman_node, "cell-index", &lenp);
+ if (!cell_idx) {
+ FMAN_ERR(-ENXIO, "%s: no cell-index)\n", fname);
+ goto err;
+ }
+ assert(lenp == sizeof(*cell_idx));
+ cell_idx_host = of_read_number(cell_idx, lenp / sizeof(phandle));
+ __if->__if.fman_idx = cell_idx_host;
+ if (!get_once) {
+ _errno = fman_get_ip_rev(fman_node);
+ if (_errno) {
+ FMAN_ERR(-ENXIO, "%s: ip_rev is not available\n",
+ fname);
+ goto err;
+ }
+ }
+
+ if (fman_ip_rev >= FMAN_V3) {
+ /*
+ * Set A2V, OVOM, EBD bits in contextA to allow external
+ * buffer deallocation by fman.
+ */
+ fman_dealloc_bufs_mask_hi = FMAN_V3_CONTEXTA_EN_A2V |
+ FMAN_V3_CONTEXTA_EN_OVOM;
+ fman_dealloc_bufs_mask_lo = FMAN_V3_CONTEXTA_EN_EBD;
+ } else {
+ fman_dealloc_bufs_mask_hi = 0;
+ fman_dealloc_bufs_mask_lo = 0;
+ }
+ /* Is the MAC node 1G, 10G? */
+ __if->__if.is_memac = 0;
+
+ if (of_device_is_compatible(mac_node, "fsl,fman-1g-mac"))
+ __if->__if.mac_type = fman_mac_1g;
+ else if (of_device_is_compatible(mac_node, "fsl,fman-10g-mac"))
+ __if->__if.mac_type = fman_mac_10g;
+ else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
+ __if->__if.is_memac = 1;
+ char_prop = of_get_property(mac_node, "phy-connection-type",
+ NULL);
+ if (!char_prop) {
+ printf("memac: unknown MII type assuming 1G\n");
+ /* Right now forcing memac to 1g in case of error*/
+ __if->__if.mac_type = fman_mac_1g;
+ } else {
+ if (strstr(char_prop, "sgmii"))
+ __if->__if.mac_type = fman_mac_1g;
+ else if (strstr(char_prop, "rgmii")) {
+ __if->__if.mac_type = fman_mac_1g;
+ __if->__if.is_rgmii = 1;
+ } else if (strstr(char_prop, "xgmii"))
+ __if->__if.mac_type = fman_mac_10g;
+ }
+ } else {
+ FMAN_ERR(-EINVAL, "%s: unknown MAC type\n", mname);
+ goto err;
+ }
+
+ /*
+ * For MAC ports, we cannot rely on cell-index. In
+ * T2080, two of the 10G ports on single FMAN have same
+ * duplicate cell-indexes as the other two 10G ports on
+ * same FMAN. Hence, we now rely upon addresses of the
+ * ports from device tree to deduce the index.
+ */
+
+ _errno = fman_get_mac_index(regs_addr_host, &__if->__if.mac_idx);
+ if (_errno) {
+ FMAN_ERR(-EINVAL, "Invalid register address: %" PRIx64,
+ regs_addr_host);
+ goto err;
+ }
+
+ /* Extract the MAC address for private and shared interfaces */
+ mac_addr = of_get_property(mac_node, "local-mac-address",
+ &lenp);
+ if (!mac_addr) {
+ FMAN_ERR(-EINVAL, "%s: no local-mac-address\n",
+ mname);
+ goto err;
+ }
+ memcpy(&__if->__if.mac_addr, mac_addr, ETHER_ADDR_LEN);
+
+ /* Extract the Tx port (it's the second of the two port handles)
+ * and get its channel ID
+ */
+ ports_phandle = of_get_property(mac_node, "fsl,port-handles",
+ &lenp);
+ if (!ports_phandle)
+ ports_phandle = of_get_property(mac_node, "fsl,fman-ports",
+ &lenp);
+ if (!ports_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,port-handles\n",
+ mname);
+ goto err;
+ }
+ assert(lenp == (2 * sizeof(phandle)));
+ tx_node = of_find_node_by_phandle(ports_phandle[1]);
+ if (!tx_node) {
+ FMAN_ERR(-ENXIO, "%s: bad fsl,port-handle[1]\n", mname);
+ goto err;
+ }
+ /* Extract the channel ID (from tx-port-handle) */
+ tx_channel_id = of_get_property(tx_node, "fsl,qman-channel-id",
+ &lenp);
+ if (!tx_channel_id) {
+ FMAN_ERR(-EINVAL, "%s: no fsl-qman-channel-id\n",
+ tx_node->full_name);
+ goto err;
+ }
+
+ rx_node = of_find_node_by_phandle(ports_phandle[0]);
+ if (!rx_node) {
+ FMAN_ERR(-ENXIO, "%s: bad fsl,port-handle[0]\n", mname);
+ goto err;
+ }
+ regs_addr = of_get_address(rx_node, 0, &__if->regs_size, NULL);
+ if (!regs_addr) {
+ FMAN_ERR(-EINVAL, "of_get_address(%s)\n", mname);
+ goto err;
+ }
+ phys_addr = of_translate_address(rx_node, regs_addr);
+ if (!phys_addr) {
+ FMAN_ERR(-EINVAL, "of_translate_address(%s, %p)\n",
+ mname, regs_addr);
+ goto err;
+ }
+ __if->bmi_map = mmap(NULL, __if->regs_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ fman_ccsr_map_fd, phys_addr);
+ if (__if->bmi_map == MAP_FAILED) {
+ FMAN_ERR(-errno, "mmap(0x%"PRIx64")\n", phys_addr);
+ goto err;
+ }
+
+ /* No channel ID for MAC-less */
+ assert(lenp == sizeof(*tx_channel_id));
+ na = of_n_addr_cells(mac_node);
+ __if->__if.tx_channel_id = of_read_number(tx_channel_id, na);
+
+ /* Extract the Rx FQIDs. (Note, the device representation is silly,
+ * there are "counts" that must always be 1.)
+ */
+ rx_phandle = of_get_property(dpa_node, rprop, &lenp);
+ if (!rx_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,qman-frame-queues-rx\n", dname);
+ goto err;
+ }
+
+ assert(lenp == (4 * sizeof(phandle)));
+
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues). Convert to host byte order */
+ rx_phandle_host[0] = of_read_number(&rx_phandle[0], na);
+ rx_phandle_host[1] = of_read_number(&rx_phandle[1], na);
+ rx_phandle_host[2] = of_read_number(&rx_phandle[2], na);
+ rx_phandle_host[3] = of_read_number(&rx_phandle[3], na);
+
+ assert((rx_phandle_host[1] == 1) && (rx_phandle_host[3] == 1));
+ __if->__if.fqid_rx_err = rx_phandle_host[0];
+ __if->__if.fqid_rx_def = rx_phandle_host[2];
+
+ /* Extract the Tx FQIDs */
+ tx_phandle = of_get_property(dpa_node,
+ "fsl,qman-frame-queues-tx", &lenp);
+ if (!tx_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,qman-frame-queues-tx\n", dname);
+ goto err;
+ }
+
+ assert(lenp == (4 * sizeof(phandle)));
+ /*TODO: Fix for other cases also */
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues). Convert to host byte order */
+ tx_phandle_host[0] = of_read_number(&tx_phandle[0], na);
+ tx_phandle_host[1] = of_read_number(&tx_phandle[1], na);
+ tx_phandle_host[2] = of_read_number(&tx_phandle[2], na);
+ tx_phandle_host[3] = of_read_number(&tx_phandle[3], na);
+ assert((tx_phandle_host[1] == 1) && (tx_phandle_host[3] == 1));
+ __if->__if.fqid_tx_err = tx_phandle_host[0];
+ __if->__if.fqid_tx_confirm = tx_phandle_host[2];
+
+ /* Obtain the buffer pool nodes used by this interface */
+ pools_phandle = of_get_property(dpa_node, "fsl,bman-buffer-pools",
+ &lenp);
+ if (!pools_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,bman-buffer-pools\n", dname);
+ goto err;
+ }
+ /* For each pool, parse the corresponding node and add a pool object
+ * to the interface's "bpool_list"
+ */
+ assert(lenp && !(lenp % sizeof(phandle)));
+ while (lenp) {
+ size_t proplen;
+ const phandle *prop;
+ uint64_t bpid_host = 0;
+ uint64_t bpool_host[6] = {0};
+ const char *pname;
+ /* Allocate an object for the pool */
+ bpool = rte_malloc(NULL, sizeof(*bpool), RTE_CACHE_LINE_SIZE);
+ if (!bpool) {
+ FMAN_ERR(-ENOMEM, "malloc(%zu)\n", sizeof(*bpool));
+ goto err;
+ }
+ /* Find the pool node */
+ pool_node = of_find_node_by_phandle(*pools_phandle);
+ if (!pool_node) {
+ FMAN_ERR(-ENXIO, "%s: bad fsl,bman-buffer-pools\n",
+ dname);
+ rte_free(bpool);
+ goto err;
+ }
+ pname = pool_node->full_name;
+ /* Extract the BPID property */
+ prop = of_get_property(pool_node, "fsl,bpid", &proplen);
+ if (!prop) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,bpid\n", pname);
+ rte_free(bpool);
+ goto err;
+ }
+ assert(proplen == sizeof(*prop));
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues).
+ * Convert to host byte-order
+ */
+ bpid_host = of_read_number(prop, na);
+ bpool->bpid = bpid_host;
+ /* Extract the cfg property (count/size/addr). "fsl,bpool-cfg"
+ * indicates for the Bman driver to seed the pool.
+ * "fsl,bpool-ethernet-cfg" is used by the network driver. The
+ * two are mutually exclusive, so check for either of them.
+ */
+ prop = of_get_property(pool_node, "fsl,bpool-cfg",
+ &proplen);
+ if (!prop)
+ prop = of_get_property(pool_node,
+ "fsl,bpool-ethernet-cfg",
+ &proplen);
+ if (!prop) {
+ /* It's OK for there to be no bpool-cfg */
+ bpool->count = bpool->size = bpool->addr = 0;
+ } else {
+ assert(proplen == (6 * sizeof(*prop)));
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues).
+ * Convert to host byte order
+ */
+ bpool_host[0] = of_read_number(&prop[0], na);
+ bpool_host[1] = of_read_number(&prop[1], na);
+ bpool_host[2] = of_read_number(&prop[2], na);
+ bpool_host[3] = of_read_number(&prop[3], na);
+ bpool_host[4] = of_read_number(&prop[4], na);
+ bpool_host[5] = of_read_number(&prop[5], na);
+
+ bpool->count = ((uint64_t)bpool_host[0] << 32) |
+ bpool_host[1];
+ bpool->size = ((uint64_t)bpool_host[2] << 32) |
+ bpool_host[3];
+ bpool->addr = ((uint64_t)bpool_host[4] << 32) |
+ bpool_host[5];
+ }
+ /* Parsing of the pool is complete, add it to the interface
+ * list.
+ */
+ list_add_tail(&bpool->node, &__if->__if.bpool_list);
+ lenp -= sizeof(phandle);
+ pools_phandle++;
+ }
+
+ /* Parsing of the network interface is complete, add it to the list */
+ DPAA_BUS_LOG(DEBUG, "Found %s, Tx Channel = %x, FMAN = %x,"
+ "Port ID = %x",
+ dname, __if->__if.tx_channel_id, __if->__if.fman_idx,
+ __if->__if.mac_idx);
+
+ list_add_tail(&__if->__if.node, &__ifs);
+ return 0;
+err:
+ if_destructor(__if);
+ return _errno;
+}
+
+int
+fman_init(void)
+{
+ const struct device_node *dpa_node;
+ int _errno;
+
+ /* If multiple dependencies try to initialise the Fman driver, don't
+ * panic.
+ */
+ if (fman_ccsr_map_fd != -1)
+ return 0;
+
+ fman_ccsr_map_fd = open(FMAN_DEVICE_PATH, O_RDWR);
+ if (unlikely(fman_ccsr_map_fd < 0)) {
+ DPAA_BUS_LOG(ERR, "Unable to open (/dev/mem)");
+ return fman_ccsr_map_fd;
+ }
+
+ for_each_compatible_node(dpa_node, NULL, "fsl,dpa-ethernet-init") {
+ _errno = fman_if_init(dpa_node);
+ if (_errno) {
+ FMAN_ERR(_errno, "if_init(%s)\n", dpa_node->full_name);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ fman_finish();
+ return _errno;
+}
+
+void
+fman_finish(void)
+{
+ struct __fman_if *__if, *tmpif;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ list_for_each_entry_safe(__if, tmpif, &__ifs, __if.node) {
+ int _errno;
+
+ /* disable Rx and Tx */
+ if ((__if->__if.mac_type == fman_mac_1g) &&
+ (!__if->__if.is_memac))
+ out_be32(__if->ccsr_map + 0x100,
+ in_be32(__if->ccsr_map + 0x100) & ~(u32)0x5);
+ else
+ out_be32(__if->ccsr_map + 8,
+ in_be32(__if->ccsr_map + 8) & ~(u32)3);
+ /* release the mapping */
+ _errno = munmap(__if->ccsr_map, __if->regs_size);
+ if (unlikely(_errno < 0))
+ fprintf(stderr, "%s:%d:%s(): munmap() = %d (%s)\n",
+ __FILE__, __LINE__, __func__,
+ -errno, strerror(errno));
+ printf("Tearing down %s\n", __if->node_path);
+ list_del(&__if->__if.node);
+ rte_free(__if);
+ }
+
+ close(fman_ccsr_map_fd);
+ fman_ccsr_map_fd = -1;
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c
new file mode 100644
index 000000000..9ab8e835d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -0,0 +1,611 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <ifaddrs.h>
+#include <fman.h>
+/* This header declares things about Fman hardware itself (the format of status
+ * words and an inline implementation of CRC64). We include it only in order to
+ * instantiate the one global variable it depends on.
+ */
+#include <fsl_fman.h>
+#include <fsl_fman_crc64.h>
+#include <fsl_bman.h>
+
+#define FMAN_SP_SG_DISABLE 0x80000000
+#define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16
+
+/* Instantiate the global variable that the inline CRC64 implementation (in
+ * <fsl_fman.h>) depends on.
+ */
+DECLARE_FMAN_CRC64_TABLE();
+
+#define ETH_ADDR_TO_UINT64(eth_addr) \
+ (uint64_t)(((uint64_t)(eth_addr)[0] << 40) | \
+ ((uint64_t)(eth_addr)[1] << 32) | \
+ ((uint64_t)(eth_addr)[2] << 24) | \
+ ((uint64_t)(eth_addr)[3] << 16) | \
+ ((uint64_t)(eth_addr)[4] << 8) | \
+ ((uint64_t)(eth_addr)[5]))
+
+void
+fman_if_set_mcast_filter_table(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *hashtable_ctrl;
+ uint32_t i;
+
+ hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
+ for (i = 0; i < 64; i++)
+ out_be32(hashtable_ctrl, i|HASH_CTRL_MCAST_EN);
+}
+
+void
+fman_if_reset_mcast_filter_table(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *hashtable_ctrl;
+ uint32_t i;
+
+ hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
+ for (i = 0; i < 64; i++)
+ out_be32(hashtable_ctrl, i & ~HASH_CTRL_MCAST_EN);
+}
+
+static
+uint32_t get_mac_hash_code(uint64_t eth_addr)
+{
+ uint64_t mask1, mask2;
+ uint32_t xorVal = 0;
+ uint8_t i, j;
+
+ for (i = 0; i < 6; i++) {
+ mask1 = eth_addr & (uint64_t)0x01;
+ eth_addr >>= 1;
+
+ for (j = 0; j < 7; j++) {
+ mask2 = eth_addr & (uint64_t)0x01;
+ mask1 ^= mask2;
+ eth_addr >>= 1;
+ }
+
+ xorVal |= (mask1 << (5 - i));
+ }
+
+ return xorVal;
+}
+
+int
+fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth)
+{
+ uint64_t eth_addr;
+ void *hashtable_ctrl;
+ uint32_t hash;
+
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ eth_addr = ETH_ADDR_TO_UINT64(eth);
+
+ if (!(eth_addr & GROUP_ADDRESS))
+ return -1;
+
+ hash = get_mac_hash_code(eth_addr) & HASH_CTRL_ADDR_MASK;
+ hash = hash | HASH_CTRL_MCAST_EN;
+
+ hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
+ out_be32(hashtable_ctrl, hash);
+
+ return 0;
+}
+
+int
+fman_if_get_primary_mac_addr(struct fman_if *p, uint8_t *eth)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *mac_reg =
+ &((struct memac_regs *)__if->ccsr_map)->mac_addr0.mac_addr_l;
+ u32 val = in_be32(mac_reg);
+
+ eth[0] = (val & 0x000000ff) >> 0;
+ eth[1] = (val & 0x0000ff00) >> 8;
+ eth[2] = (val & 0x00ff0000) >> 16;
+ eth[3] = (val & 0xff000000) >> 24;
+
+ mac_reg = &((struct memac_regs *)__if->ccsr_map)->mac_addr0.mac_addr_u;
+ val = in_be32(mac_reg);
+
+ eth[4] = (val & 0x000000ff) >> 0;
+ eth[5] = (val & 0x0000ff00) >> 8;
+
+ return 0;
+}
+
+void
+fman_if_clear_mac_addr(struct fman_if *p, uint8_t addr_num)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+ void *reg;
+
+ if (addr_num) {
+ reg = &((struct memac_regs *)m->ccsr_map)->
+ mac_addr[addr_num-1].mac_addr_l;
+ out_be32(reg, 0x0);
+ reg = &((struct memac_regs *)m->ccsr_map)->
+ mac_addr[addr_num-1].mac_addr_u;
+ out_be32(reg, 0x0);
+ } else {
+ reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_l;
+ out_be32(reg, 0x0);
+ reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_u;
+ out_be32(reg, 0x0);
+ }
+}
+
+int
+fman_if_add_mac_addr(struct fman_if *p, uint8_t *eth, uint8_t addr_num)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+
+ void *reg;
+ u32 val;
+
+ memcpy(&m->__if.mac_addr, eth, ETHER_ADDR_LEN);
+
+ if (addr_num)
+ reg = &((struct memac_regs *)m->ccsr_map)->
+ mac_addr[addr_num-1].mac_addr_l;
+ else
+ reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_l;
+
+ val = (m->__if.mac_addr.addr_bytes[0] |
+ (m->__if.mac_addr.addr_bytes[1] << 8) |
+ (m->__if.mac_addr.addr_bytes[2] << 16) |
+ (m->__if.mac_addr.addr_bytes[3] << 24));
+ out_be32(reg, val);
+
+ if (addr_num)
+ reg = &((struct memac_regs *)m->ccsr_map)->
+ mac_addr[addr_num-1].mac_addr_u;
+ else
+ reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_u;
+
+ val = ((m->__if.mac_addr.addr_bytes[4] << 0) |
+ (m->__if.mac_addr.addr_bytes[5] << 8));
+ out_be32(reg, val);
+
+ return 0;
+}
+
+void
+fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ u32 value = 0;
+ void *cmdcfg;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Set Rx Ignore Pause Frames */
+ cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
+ if (enable)
+ value = in_be32(cmdcfg) | CMD_CFG_PAUSE_IGNORE;
+ else
+ value = in_be32(cmdcfg) & ~CMD_CFG_PAUSE_IGNORE;
+
+ out_be32(cmdcfg, value);
+}
+
+void
+fman_if_conf_max_frame_len(struct fman_if *p, unsigned int max_frame_len)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ unsigned int *maxfrm;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Set Max frame length */
+ maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
+ out_be32(maxfrm, (MAXFRM_RX_MASK & max_frame_len));
+}
+
+void
+fman_if_stats_get(struct fman_if *p, struct rte_eth_stats *stats)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+ struct memac_regs *regs = m->ccsr_map;
+
+ /* read recved packet count */
+ stats->ipackets = ((u64)in_be32(&regs->rfrm_u)) << 32 |
+ in_be32(&regs->rfrm_l);
+ stats->ibytes = ((u64)in_be32(&regs->roct_u)) << 32 |
+ in_be32(&regs->roct_l);
+ stats->ierrors = ((u64)in_be32(&regs->rerr_u)) << 32 |
+ in_be32(&regs->rerr_l);
+
+ /* read xmited packet count */
+ stats->opackets = ((u64)in_be32(&regs->tfrm_u)) << 32 |
+ in_be32(&regs->tfrm_l);
+ stats->obytes = ((u64)in_be32(&regs->toct_u)) << 32 |
+ in_be32(&regs->toct_l);
+ stats->oerrors = ((u64)in_be32(&regs->terr_u)) << 32 |
+ in_be32(&regs->terr_l);
+}
+
+void
+fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+ struct memac_regs *regs = m->ccsr_map;
+ int i;
+ uint64_t base_offset = offsetof(struct memac_regs, reoct_l);
+
+ for (i = 0; i < n; i++)
+ value[i] = ((u64)in_be32((char *)regs
+ + base_offset + 8 * i + 4)) << 32 |
+ ((u64)in_be32((char *)regs
+ + base_offset + 8 * i));
+}
+
+void
+fman_if_stats_reset(struct fman_if *p)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+ struct memac_regs *regs = m->ccsr_map;
+ uint32_t tmp;
+
+ tmp = in_be32(&regs->statn_config);
+
+ tmp |= STATS_CFG_CLR;
+
+ out_be32(&regs->statn_config, tmp);
+
+ while (in_be32(&regs->statn_config) & STATS_CFG_CLR)
+ ;
+}
+
+void
+fman_if_promiscuous_enable(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *cmdcfg;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Enable Rx promiscuous mode */
+ cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
+ out_be32(cmdcfg, in_be32(cmdcfg) | CMD_CFG_PROMIS_EN);
+}
+
+void
+fman_if_promiscuous_disable(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *cmdcfg;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Disable Rx promiscuous mode */
+ cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
+ out_be32(cmdcfg, in_be32(cmdcfg) & (~CMD_CFG_PROMIS_EN));
+}
+
+void
+fman_if_enable_rx(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* enable Rx and Tx */
+ out_be32(__if->ccsr_map + 8, in_be32(__if->ccsr_map + 8) | 3);
+}
+
+void
+fman_if_disable_rx(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* only disable Rx, not Tx */
+ out_be32(__if->ccsr_map + 8, in_be32(__if->ccsr_map + 8) & ~(u32)2);
+}
+
+void
+fman_if_loopback_enable(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Enable loopback mode */
+ if ((__if->__if.is_memac) && (__if->__if.is_rgmii)) {
+ unsigned int *ifmode =
+ &((struct memac_regs *)__if->ccsr_map)->if_mode;
+ out_be32(ifmode, in_be32(ifmode) | IF_MODE_RLP);
+ } else{
+ unsigned int *cmdcfg =
+ &((struct memac_regs *)__if->ccsr_map)->command_config;
+ out_be32(cmdcfg, in_be32(cmdcfg) | CMD_CFG_LOOPBACK_EN);
+ }
+}
+
+void
+fman_if_loopback_disable(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+ /* Disable loopback mode */
+ if ((__if->__if.is_memac) && (__if->__if.is_rgmii)) {
+ unsigned int *ifmode =
+ &((struct memac_regs *)__if->ccsr_map)->if_mode;
+ out_be32(ifmode, in_be32(ifmode) & ~IF_MODE_RLP);
+ } else {
+ unsigned int *cmdcfg =
+ &((struct memac_regs *)__if->ccsr_map)->command_config;
+ out_be32(cmdcfg, in_be32(cmdcfg) & ~CMD_CFG_LOOPBACK_EN);
+ }
+}
+
+void
+fman_if_set_bp(struct fman_if *fm_if, unsigned num __always_unused,
+ int bpid, size_t bufsize)
+{
+ u32 fmbm_ebmpi;
+ u32 ebmpi_val_ace = 0xc0000000;
+ u32 ebmpi_mask = 0xffc00000;
+
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_ebmpi =
+ in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ebmpi[0]);
+ fmbm_ebmpi = ebmpi_val_ace | (fmbm_ebmpi & ebmpi_mask) | (bpid << 16) |
+ (bufsize);
+
+ out_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ebmpi[0],
+ fmbm_ebmpi);
+}
+
+int
+fman_if_get_fc_threshold(struct fman_if *fm_if)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_mpd;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_mpd = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_mpd;
+ return in_be32(fmbm_mpd);
+}
+
+int
+fman_if_set_fc_threshold(struct fman_if *fm_if, u32 high_water,
+ u32 low_water, u32 bpid)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_mpd;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_mpd = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_mpd;
+ out_be32(fmbm_mpd, FMAN_ENABLE_BPOOL_DEPLETION);
+ return bm_pool_set_hw_threshold(bpid, low_water, high_water);
+
+}
+
+int
+fman_if_get_fc_quanta(struct fman_if *fm_if)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ return in_be32(&((struct memac_regs *)__if->ccsr_map)->pause_quanta[0]);
+}
+
+int
+fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ out_be32(&((struct memac_regs *)__if->ccsr_map)->pause_quanta[0],
+ pause_quanta);
+ return 0;
+}
+
+int
+fman_if_get_fdoff(struct fman_if *fm_if)
+{
+ u32 fmbm_rebm;
+ int fdoff;
+
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
+
+ fdoff = (fmbm_rebm >> FMAN_SP_EXT_BUF_MARG_START_SHIFT) & 0x1ff;
+
+ return fdoff;
+}
+
+void
+fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ unsigned int *fmbm_refqid =
+ &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_refqid;
+ out_be32(fmbm_refqid, err_fqid);
+}
+
+int
+fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params *icp)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ int val = 0;
+ int iceof_mask = 0x001f0000;
+ int icsz_mask = 0x0000001f;
+ int iciof_mask = 0x00000f00;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ unsigned int *fmbm_ricp =
+ &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp;
+ val = in_be32(fmbm_ricp);
+
+ icp->iceof = (val & iceof_mask) >> 12;
+ icp->iciof = (val & iciof_mask) >> 4;
+ icp->icsz = (val & icsz_mask) << 4;
+
+ return 0;
+}
+
+int
+fman_if_set_ic_params(struct fman_if *fm_if,
+ const struct fman_if_ic_params *icp)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ int val = 0;
+ int iceof_mask = 0x001f0000;
+ int icsz_mask = 0x0000001f;
+ int iciof_mask = 0x00000f00;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ val |= (icp->iceof << 12) & iceof_mask;
+ val |= (icp->iciof << 4) & iciof_mask;
+ val |= (icp->icsz >> 4) & icsz_mask;
+
+ unsigned int *fmbm_ricp =
+ &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp;
+ out_be32(fmbm_ricp, val);
+
+ return 0;
+}
+
+void
+fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_rebm;
+ int val = 0;
+ int fmbm_mask = 0x01ff0000;
+
+ val = fd_offset << FMAN_SP_EXT_BUF_MARG_START_SHIFT;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm;
+
+ out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
+}
+
+void
+fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *reg_maxfrm;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ reg_maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
+
+ out_be32(reg_maxfrm, (in_be32(reg_maxfrm) & 0xFFFF0000) | max_frm);
+}
+
+uint16_t
+fman_if_get_maxfrm(struct fman_if *fm_if)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *reg_maxfrm;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ reg_maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
+
+ return (in_be32(reg_maxfrm) | 0x0000FFFF);
+}
+
+/* MSB in fmbm_rebm register
+ * 0 - If BMI cannot store the frame in a single buffer it may select a buffer
+ * of smaller size and store the frame in scatter gather (S/G) buffers
+ * 1 - Scatter gather format is not enabled for frame storage. If BMI cannot
+ * store the frame in a single buffer, the frame is discarded.
+ */
+
+int
+fman_if_get_sg_enable(struct fman_if *fm_if)
+{
+ u32 fmbm_rebm;
+
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
+
+ return (fmbm_rebm & FMAN_SP_SG_DISABLE) ? 0 : 1;
+}
+
+void
+fman_if_set_sg(struct fman_if *fm_if, int enable)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_rebm;
+ int val;
+ int fmbm_mask = FMAN_SP_SG_DISABLE;
+
+ if (enable)
+ val = 0;
+ else
+ val = FMAN_SP_SG_DISABLE;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm;
+
+ out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
+}
+
+void
+fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmqm_pndn;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmqm_pndn = &((struct fman_port_qmi_regs *)__if->qmi_map)->fmqm_pndn;
+
+ out_be32(fmqm_pndn, nia);
+}
+
+void
+fman_if_discard_rx_errors(struct fman_if *fm_if)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_rfsdm, *fmbm_rfsem;
+
+ fmbm_rfsem = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsem;
+ out_be32(fmbm_rfsem, 0);
+
+ /* Configure the discard mask to discard the error packets which have
+ * DMA errors, Frame size error, Header error etc. The mask 0x010EE3F0
+ * is to configured discard all the errors which come in the FD[STATUS]
+ */
+ fmbm_rfsdm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsdm;
+ out_be32(fmbm_rfsdm, 0x010EE3F0);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/fman/netcfg_layer.c b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/netcfg_layer.c
new file mode 100644
index 000000000..36eca88cd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/fman/netcfg_layer.c
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2019 NXP
+ *
+ */
+#include <inttypes.h>
+#include <dpaa_of.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <error.h>
+#include <net/if_arp.h>
+#include <assert.h>
+#include <unistd.h>
+
+#include <rte_malloc.h>
+
+#include <rte_dpaa_logs.h>
+#include <netcfg.h>
+
+/* This data structure contaings all configurations information
+ * related to usages of DPA devices.
+ */
+static struct netcfg_info *netcfg;
+/* fd to open a socket for making ioctl request to disable/enable shared
+ * interfaces.
+ */
+static int skfd = -1;
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+void
+dump_netcfg(struct netcfg_info *cfg_ptr)
+{
+ int i;
+
+ printf(".......... DPAA Configuration ..........\n\n");
+
+ /* Network interfaces */
+ printf("Network interfaces: %d\n", cfg_ptr->num_ethports);
+ for (i = 0; i < cfg_ptr->num_ethports; i++) {
+ struct fman_if_bpool *bpool;
+ struct fm_eth_port_cfg *p_cfg = &cfg_ptr->port_cfg[i];
+ struct fman_if *__if = p_cfg->fman_if;
+
+ printf("\n+ Fman %d, MAC %d (%s);\n",
+ __if->fman_idx, __if->mac_idx,
+ (__if->mac_type == fman_mac_1g) ? "1G" : "10G");
+
+ printf("\tmac_addr: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ (&__if->mac_addr)->addr_bytes[0],
+ (&__if->mac_addr)->addr_bytes[1],
+ (&__if->mac_addr)->addr_bytes[2],
+ (&__if->mac_addr)->addr_bytes[3],
+ (&__if->mac_addr)->addr_bytes[4],
+ (&__if->mac_addr)->addr_bytes[5]);
+
+ printf("\ttx_channel_id: 0x%02x\n",
+ __if->tx_channel_id);
+
+ printf("\tfqid_rx_def: 0x%x\n", p_cfg->rx_def);
+ printf("\tfqid_rx_err: 0x%x\n", __if->fqid_rx_err);
+
+ printf("\tfqid_tx_err: 0x%x\n", __if->fqid_tx_err);
+ printf("\tfqid_tx_confirm: 0x%x\n", __if->fqid_tx_confirm);
+ fman_if_for_each_bpool(bpool, __if)
+ printf("\tbuffer pool: (bpid=%d, count=%"PRId64
+ " size=%"PRId64", addr=0x%"PRIx64")\n",
+ bpool->bpid, bpool->count, bpool->size,
+ bpool->addr);
+ }
+}
+#endif /* RTE_LIBRTE_DPAA_DEBUG_DRIVER */
+
+struct netcfg_info *
+netcfg_acquire(void)
+{
+ struct fman_if *__if;
+ int _errno, idx = 0;
+ uint8_t num_ports = 0;
+ uint8_t num_cfg_ports = 0;
+ size_t size;
+
+ /* Extract dpa configuration from fman driver and FMC configuration
+ * for command-line interfaces.
+ */
+
+ /* Open a basic socket to enable/disable shared
+ * interfaces.
+ */
+ skfd = socket(AF_PACKET, SOCK_RAW, 0);
+ if (unlikely(skfd < 0)) {
+ error(0, errno, "%s(): open(SOCK_RAW)", __func__);
+ return NULL;
+ }
+
+ /* Initialise the Fman driver */
+ _errno = fman_init();
+ if (_errno) {
+ DPAA_BUS_LOG(ERR, "FMAN driver init failed (%d)", errno);
+ close(skfd);
+ skfd = -1;
+ return NULL;
+ }
+
+ /* Number of MAC ports */
+ list_for_each_entry(__if, fman_if_list, node)
+ num_ports++;
+
+ if (!num_ports) {
+ DPAA_BUS_LOG(ERR, "FMAN ports not available");
+ return NULL;
+ }
+ /* Allocate space for all enabled mac ports */
+ size = sizeof(*netcfg) +
+ (num_ports * sizeof(struct fm_eth_port_cfg));
+
+ netcfg = rte_calloc(NULL, 1, size, 0);
+ if (unlikely(netcfg == NULL)) {
+ DPAA_BUS_LOG(ERR, "Unable to allocat mem for netcfg");
+ goto error;
+ }
+
+ netcfg->num_ethports = num_ports;
+
+ list_for_each_entry(__if, fman_if_list, node) {
+ struct fm_eth_port_cfg *cfg = &netcfg->port_cfg[idx];
+ /* Hook in the fman driver interface */
+ cfg->fman_if = __if;
+ cfg->rx_def = __if->fqid_rx_def;
+ num_cfg_ports++;
+ idx++;
+ }
+
+ if (!num_cfg_ports) {
+ DPAA_BUS_LOG(ERR, "No FMAN ports found");
+ goto error;
+ } else if (num_ports != num_cfg_ports)
+ netcfg->num_ethports = num_cfg_ports;
+
+ return netcfg;
+
+error:
+ if (netcfg) {
+ rte_free(netcfg);
+ netcfg = NULL;
+ }
+
+ return NULL;
+}
+
+void
+netcfg_release(struct netcfg_info *cfg_ptr)
+{
+ rte_free(cfg_ptr);
+ /* Close socket for shared interfaces */
+ if (skfd >= 0) {
+ close(skfd);
+ skfd = -1;
+ }
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.c
new file mode 100644
index 000000000..8a6290734
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.c
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include "bman.h"
+#include <rte_branch_prediction.h>
+
+/* Compilation constants */
+#define RCR_THRESH 2 /* reread h/w CI when running out of space */
+#define IRQNAME "BMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
+
+struct bman_portal {
+ struct bm_portal p;
+ /* 2-element array. pools[0] is mask, pools[1] is snapshot. */
+ struct bman_depletion *pools;
+ int thresh_set;
+ unsigned long irq_sources;
+ u32 slowpoll; /* only used when interrupts are off */
+ /* When the cpu-affine portal is activated, this is non-NULL */
+ const struct bm_portal_config *config;
+ char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static RTE_DEFINE_PER_LCORE(struct bman_portal, bman_affine_portal);
+
+static inline struct bman_portal *get_affine_portal(void)
+{
+ return &RTE_PER_LCORE(bman_affine_portal);
+}
+
+/*
+ * This object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of
+ * the pool are operating via different portals.
+ */
+struct bman_pool {
+ struct bman_pool_params params;
+ /* Used for hash-table admin when using depletion notifications. */
+ struct bman_portal *portal;
+ struct bman_pool *next;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ atomic_t in_use;
+#endif
+};
+
+static inline
+struct bman_portal *bman_create_portal(struct bman_portal *portal,
+ const struct bm_portal_config *c)
+{
+ struct bm_portal *p;
+ const struct bman_depletion *pools = &c->mask;
+ int ret;
+ u8 bpid = 0;
+
+ p = &portal->p;
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference...
+ */
+ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+ p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
+ pr_err("Bman RCR initialisation failed\n");
+ return NULL;
+ }
+ if (bm_mc_init(p)) {
+ pr_err("Bman MC initialisation failed\n");
+ goto fail_mc;
+ }
+ portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
+ if (!portal->pools)
+ goto fail_pools;
+ portal->pools[0] = *pools;
+ bman_depletion_init(portal->pools + 1);
+ while (bpid < bman_pool_max) {
+ /*
+ * Default to all BPIDs disabled, we enable as required at
+ * run-time.
+ */
+ bm_isr_bscn_mask(p, bpid, 0);
+ bpid++;
+ }
+ portal->slowpoll = 0;
+ /* Write-to-clear any stale interrupt status bits */
+ bm_isr_disable_write(p, 0xffffffff);
+ portal->irq_sources = 0;
+ bm_isr_enable_write(p, portal->irq_sources);
+ bm_isr_status_clear(p, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, NULL, 0, portal->irqname,
+ portal)) {
+ pr_err("request_irq() failed\n");
+ goto fail_irq;
+ }
+
+ /* Need RCR to be empty before continuing */
+ ret = bm_rcr_get_fill(p);
+ if (ret) {
+ pr_err("Bman RCR unclean\n");
+ goto fail_rcr_empty;
+ }
+ /* Success */
+ portal->config = c;
+
+ bm_isr_disable_write(p, 0);
+ bm_isr_uninhibit(p);
+ return portal;
+fail_rcr_empty:
+ free_irq(c->irq, portal);
+fail_irq:
+ kfree(portal->pools);
+fail_pools:
+ bm_mc_finish(p);
+fail_mc:
+ bm_rcr_finish(p);
+ return NULL;
+}
+
+struct bman_portal *
+bman_create_affine_portal(const struct bm_portal_config *c)
+{
+ struct bman_portal *portal = get_affine_portal();
+
+ /*This function is called from the context which is already affine to
+ *CPU or in other words this in non-migratable to other CPUs.
+ */
+ portal = bman_create_portal(portal, c);
+ if (portal) {
+ spin_lock(&affine_mask_lock);
+ CPU_SET(c->cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ }
+ return portal;
+}
+
+static inline
+void bman_destroy_portal(struct bman_portal *bm)
+{
+ const struct bm_portal_config *pcfg;
+
+ pcfg = bm->config;
+ bm_rcr_cce_update(&bm->p);
+ bm_rcr_cce_update(&bm->p);
+
+ free_irq(pcfg->irq, bm);
+
+ kfree(bm->pools);
+ bm_mc_finish(&bm->p);
+ bm_rcr_finish(&bm->p);
+ bm->config = NULL;
+}
+
+const struct
+bm_portal_config *bman_destroy_affine_portal(void)
+{
+ struct bman_portal *bm = get_affine_portal();
+ const struct bm_portal_config *pcfg;
+
+ pcfg = bm->config;
+ bman_destroy_portal(bm);
+ spin_lock(&affine_mask_lock);
+ CPU_CLR(pcfg->cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ return pcfg;
+}
+
+int
+bman_get_portal_index(void)
+{
+ struct bman_portal *p = get_affine_portal();
+ return p->config->index;
+}
+
+static const u32 zero_thresholds[4] = {0, 0, 0, 0};
+
+struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
+{
+ struct bman_pool *pool = NULL;
+ u32 bpid;
+
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
+ int ret = bman_alloc_bpid(&bpid);
+
+ if (ret)
+ return NULL;
+ } else {
+ if (params->bpid >= bman_pool_max)
+ return NULL;
+ bpid = params->bpid;
+ }
+ if (params->flags & BMAN_POOL_FLAG_THRESH) {
+ int ret = bm_pool_set(bpid, params->thresholds);
+
+ if (ret)
+ goto err;
+ }
+
+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ goto err;
+ pool->params = *params;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ atomic_set(&pool->in_use, 1);
+#endif
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ pool->params.bpid = bpid;
+
+ return pool;
+err:
+ if (params->flags & BMAN_POOL_FLAG_THRESH)
+ bm_pool_set(bpid, zero_thresholds);
+
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ bman_release_bpid(bpid);
+ kfree(pool);
+
+ return NULL;
+}
+
+void bman_free_pool(struct bman_pool *pool)
+{
+ if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
+ bm_pool_set(pool->params.bpid, zero_thresholds);
+ if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ bman_release_bpid(pool->params.bpid);
+ kfree(pool);
+}
+
+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
+{
+ return &pool->params;
+}
+
+static void update_rcr_ci(struct bman_portal *p, int avail)
+{
+ if (avail)
+ bm_rcr_cce_prefetch(&p->p);
+ else
+ bm_rcr_cce_update(&p->p);
+}
+
+#define BMAN_BUF_MASK 0x0000fffffffffffful
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
+ u32 flags __maybe_unused)
+{
+ struct bman_portal *p;
+ struct bm_rcr_entry *r;
+ u32 i = num - 1;
+ u8 avail;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (!num || (num > 8))
+ return -EINVAL;
+ if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
+ return -EINVAL;
+#endif
+
+ p = get_affine_portal();
+ avail = bm_rcr_get_avail(&p->p);
+ if (avail < 2)
+ update_rcr_ci(p, avail);
+ r = bm_rcr_start(&p->p);
+ if (unlikely(!r))
+ return -EBUSY;
+
+ /*
+ * we can copy all but the first entry, as this can trigger badness
+ * with the valid-bit
+ */
+ r->bufs[0].opaque =
+ cpu_to_be64(((u64)pool->params.bpid << 48) |
+ (bufs[0].opaque & BMAN_BUF_MASK));
+ if (i) {
+ for (i = 1; i < num; i++)
+ r->bufs[i].opaque =
+ cpu_to_be64(bufs[i].opaque & BMAN_BUF_MASK);
+ }
+
+ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+ (num & BM_RCR_VERB_BUFCOUNT_MASK));
+
+ return 0;
+}
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
+ u32 flags __maybe_unused)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_command *mcc;
+ struct bm_mc_result *mcr;
+ int ret, i;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (!num || (num > 8))
+ return -EINVAL;
+ if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
+ return -EINVAL;
+#endif
+
+ mcc = bm_mc_start(&p->p);
+ mcc->acquire.bpid = pool->params.bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+ while (!(mcr = bm_mc_result(&p->p)))
+ cpu_relax();
+ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+ if (bufs) {
+ for (i = 0; i < num; i++)
+ bufs[i].opaque =
+ be64_to_cpu(mcr->acquire.bufs[i].opaque);
+ }
+ if (ret != num)
+ ret = -ENOMEM;
+ return ret;
+}
+
+int bman_query_pools(struct bm_pool_state *state)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_result *mcr;
+
+ bm_mc_start(&p->p);
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
+ while (!(mcr = bm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) ==
+ BM_MCR_VERB_CMD_QUERY);
+ *state = mcr->query;
+ state->as.state.state[0] = be32_to_cpu(state->as.state.state[0]);
+ state->as.state.state[1] = be32_to_cpu(state->as.state.state[1]);
+ state->ds.state.state[0] = be32_to_cpu(state->ds.state.state[0]);
+ state->ds.state.state[1] = be32_to_cpu(state->ds.state.state[1]);
+ return 0;
+}
+
+u32 bman_query_free_buffers(struct bman_pool *pool)
+{
+ return bm_pool_free_buffers(pool->params.bpid);
+}
+
+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
+{
+ u32 bpid;
+
+ bpid = bman_get_params(pool)->bpid;
+
+ return bm_pool_set(bpid, thresholds);
+}
+
+int bman_shutdown_pool(u32 bpid)
+{
+ struct bman_portal *p = get_affine_portal();
+ return bm_shutdown_pool(&p->p, bpid);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.h b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.h
new file mode 100644
index 000000000..21a6bee77
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman.h
@@ -0,0 +1,541 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __BMAN_H
+#define __BMAN_H
+
+#include "bman_priv.h"
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x3000
+#define BM_REG_RCR_CI_CINH 0x3100
+#define BM_REG_RCR_ITR 0x3200
+#define BM_REG_CFG 0x3300
+#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
+#define BM_REG_ISR 0x3e00
+#define BM_REG_IIR 0x3ec0
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent).
+ */
+
+/* Cache-inhibited register access. */
+#define __bm_in(bm, o) be32_to_cpu(__raw_readl((bm)->ci + (o)))
+#define __bm_out(bm, o, val) __raw_writel(cpu_to_be32(val), \
+ (bm)->ci + (o))
+#define bm_in(reg) __bm_in(&portal->addr, BM_REG_##reg)
+#define bm_out(reg, val) __bm_out(&portal->addr, BM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->ce + (o))
+#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->ce + (o))
+#define __bm_cl_in(bm, o) be32_to_cpu(__raw_readl((bm)->ce + (o)))
+#define __bm_cl_out(bm, o, val) \
+ do { \
+ u32 *__tmpclout = (bm)->ce + (o); \
+ __raw_writel(cpu_to_be32(val), __tmpclout); \
+ dcbf(__tmpclout); \
+ } while (0)
+#define __bm_cl_invalidate(bm, o) dccivac((bm)->ce + (o))
+#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_in(reg) __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
+#define bm_cl_invalidate(reg)\
+ __bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues.
+ */
+static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
+ bm_rcr_pci = 0, /* PI index, cache-inhibited */
+ bm_rcr_pce = 1, /* PI index, cache-enabled */
+ bm_rcr_pvb = 2 /* valid-bit */
+};
+
+enum bm_rcr_cmode { /* s/w-only */
+ bm_rcr_cci, /* CI index, cache-inhibited */
+ bm_rcr_cce /* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE 8
+
+struct bm_rcr {
+ struct bm_rcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ u32 busy;
+ enum bm_rcr_pmode pmode;
+ enum bm_rcr_cmode cmode;
+#endif
+};
+
+struct bm_mc {
+ struct bm_mc_command *cr;
+ struct bm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ enum {
+ /* Can only be _mc_start()ed */
+ mc_idle,
+ /* Can only be _mc_commit()ed or _mc_abort()ed */
+ mc_user,
+ /* Can only be _mc_retry()ed */
+ mc_hw
+ } state;
+#endif
+};
+
+struct bm_addr {
+ void __iomem *ce; /* cache-enabled */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct bm_portal {
+ struct bm_addr addr;
+ struct bm_rcr rcr;
+ struct bm_mc mc;
+ struct bm_portal_config config;
+} ____cacheline_aligned;
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define RCR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void RCR_INC(struct bm_rcr *rcr)
+{
+ /* NB: this is odd-looking, but experiments show that it generates
+ * fast code with essentially no branching overheads. We increment to
+ * the next RCR pointer and handle overflow and 'vbit'.
+ */
+ struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+ rcr->cursor = RCR_CARRYCLEAR(partial);
+ if (partial != rcr->cursor)
+ rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+ __maybe_unused enum bm_rcr_cmode cmode)
+{
+ /* This use of 'register', as well as all other occurrences, is because
+ * it has been observed to generate much faster code with gcc than is
+ * otherwise the case.
+ */
+ register struct bm_rcr *rcr = &portal->rcr;
+ u32 cfg;
+ u8 pi;
+
+ rcr->ring = portal->addr.ce + BM_CL_RCR;
+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+
+ pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ rcr->cursor = rcr->ring + pi;
+ rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0;
+ rcr->available = BM_RCR_SIZE - 1
+ - bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+ rcr->ithresh = bm_in(RCR_ITR);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+ rcr->pmode = pmode;
+ rcr->cmode = cmode;
+#endif
+ cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
+ bm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void bm_rcr_finish(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(!rcr->busy);
+#endif
+ if (pi != RCR_PTR2IDX(rcr->cursor))
+ pr_crit("losing uncommitted RCR entries\n");
+ if (ci != rcr->ci)
+ pr_crit("missing existing RCR completions\n");
+ if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
+ pr_crit("RCR destroyed unquiesced\n");
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(!rcr->busy);
+#endif
+ if (!rcr->available)
+ return NULL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 1;
+#endif
+ dcbz_64(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_abort(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->busy);
+ rcr->busy = 0;
+#endif
+}
+
+static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
+ struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode != bm_rcr_pvb);
+#endif
+ if (rcr->available == 1)
+ return NULL;
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ dcbf_64(rcr->cursor);
+ RCR_INC(rcr);
+ rcr->available--;
+ dcbz_64(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pci);
+#endif
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ RCR_INC(rcr);
+ rcr->available--;
+ hwsync();
+ bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pce);
+#endif
+ bm_cl_invalidate(RCR_PI);
+ bm_cl_touch_rw(RCR_PI);
+}
+
+static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pce);
+#endif
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ RCR_INC(rcr);
+ rcr->available--;
+ lwsync();
+ bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ struct bm_rcr_entry *rcursor;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
+#endif
+ lwsync();
+ rcursor = rcr->cursor;
+ rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ dcbf_64(rcursor);
+ RCR_INC(rcr);
+ rcr->available--;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+#endif
+}
+
+static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cci);
+#endif
+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+#endif
+ bm_cl_touch_ro(RCR_CI);
+}
+
+static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+#endif
+ rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
+ bm_cl_invalidate(RCR_CI);
+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->ithresh;
+}
+
+static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ rcr->ithresh = ithresh;
+ bm_out(RCR_ITR, ithresh);
+}
+
+static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->available;
+}
+
+static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+/* --- Management command API --- */
+
+static inline int bm_mc_init(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + BM_CL_CR;
+ mc->rr = portal->addr.ce + BM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
+ BM_MCC_VERB_VBIT) ? 0 : 1;
+ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = mc_idle;
+#endif
+ return 0;
+}
+
+static inline void bm_mc_finish(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_mc *mc = &portal->mc;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_idle);
+ if (mc->state != mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_idle);
+ mc->state = mc_user;
+#endif
+ dcbz_64(mc->cr);
+ return mc->cr;
+}
+
+static inline void bm_mc_abort(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_mc *mc = &portal->mc;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_user);
+ mc->state = mc_idle;
+#endif
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_mc *mc = &portal->mc;
+ struct bm_mc_result *rr = mc->rr + mc->rridx;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_user);
+#endif
+ lwsync();
+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+ dcbf(mc->cr);
+ dcbit_ro(rr);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = mc_hw;
+#endif
+}
+
+static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+ struct bm_mc_result *rr = mc->rr + mc->rridx;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_hw);
+#endif
+ /* The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering.
+ */
+ if (!__raw_readb(&rr->verb)) {
+ dcbit_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = mc_idle;
+#endif
+ return rr;
+}
+
+#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
+#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
+static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
+ int enable)
+{
+ u32 val;
+
+ DPAA_ASSERT(bpid < bman_pool_max);
+ /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
+ val = __bm_in(&portal->addr, SCN_REG(bpid));
+ if (enable)
+ val |= SCN_BIT(bpid);
+ else
+ val &= ~SCN_BIT(bpid);
+ __bm_out(&portal->addr, SCN_REG(bpid), val);
+}
+
+static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
+{
+#if defined(RTE_ARCH_ARM64)
+ return __bm_in(&portal->addr, BM_REG_ISR + (n << 6));
+#else
+ return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
+#endif
+}
+
+static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
+ u32 val)
+{
+#if defined(RTE_ARCH_ARM64)
+ __bm_out(&portal->addr, BM_REG_ISR + (n << 6), val);
+#else
+ __bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
+#endif
+}
+
+/* Buffer Pool Cleanup */
+static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
+{
+ struct bm_mc_command *bm_cmd;
+ struct bm_mc_result *bm_res;
+
+ int aq_count = 0;
+ bool stop = false;
+
+ while (!stop) {
+ /* Acquire buffers until empty */
+ bm_cmd = bm_mc_start(p);
+ bm_cmd->acquire.bpid = bpid;
+ bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+ while (!(bm_res = bm_mc_result(p)))
+ cpu_relax();
+ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+ /* Pool is empty */
+ stop = true;
+ } else
+ ++aq_count;
+ };
+ return 0;
+}
+
+#endif /* __BMAN_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_driver.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_driver.c
new file mode 100644
index 000000000..750b756b9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_driver.c
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <rte_branch_prediction.h>
+
+#include <fsl_usd.h>
+#include <process.h>
+#include "bman_priv.h"
+#include <sys/ioctl.h>
+
+/*
+ * Global variables of the max portal/pool number this bman version supported
+ */
+static u16 bman_ip_rev;
+u16 bman_pool_max;
+static void *bman_ccsr_map;
+
+/*****************/
+/* Portal driver */
+/*****************/
+
+static __thread int bmfd = -1;
+static __thread struct bm_portal_config pcfg;
+static __thread struct dpaa_ioctl_portal_map map = {
+ .type = dpaa_portal_bman
+};
+
+static int fsl_bman_portal_init(uint32_t idx, int is_shared)
+{
+ cpu_set_t cpuset;
+ struct bman_portal *portal;
+ int loop, ret;
+ struct dpaa_ioctl_irq_map irq_map;
+
+ /* Verify the thread's cpu-affinity */
+ ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
+ &cpuset);
+ if (ret) {
+ error(0, ret, "pthread_getaffinity_np()");
+ return ret;
+ }
+ pcfg.cpu = -1;
+ for (loop = 0; loop < CPU_SETSIZE; loop++)
+ if (CPU_ISSET(loop, &cpuset)) {
+ if (pcfg.cpu != -1) {
+ pr_err("Thread is not affine to 1 cpu");
+ return -EINVAL;
+ }
+ pcfg.cpu = loop;
+ }
+ if (pcfg.cpu == -1) {
+ pr_err("Bug in getaffinity handling!");
+ return -EINVAL;
+ }
+ /* Allocate and map a bman portal */
+ map.index = idx;
+ ret = process_portal_map(&map);
+ if (ret) {
+ error(0, ret, "process_portal_map()");
+ return ret;
+ }
+ /* Make the portal's cache-[enabled|inhibited] regions */
+ pcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
+ pcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
+ pcfg.is_shared = is_shared;
+ pcfg.index = map.index;
+ bman_depletion_fill(&pcfg.mask);
+
+ bmfd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
+ if (bmfd == -1) {
+ pr_err("BMan irq init failed");
+ process_portal_unmap(&map.addr);
+ return -EBUSY;
+ }
+ /* Use the IRQ FD as a unique IRQ number */
+ pcfg.irq = bmfd;
+
+ portal = bman_create_affine_portal(&pcfg);
+ if (!portal) {
+ pr_err("Bman portal initialisation failed (%d)",
+ pcfg.cpu);
+ process_portal_unmap(&map.addr);
+ return -EBUSY;
+ }
+
+ /* Set the IRQ number */
+ irq_map.type = dpaa_portal_bman;
+ irq_map.portal_cinh = map.addr.cinh;
+ process_portal_irq_map(bmfd, &irq_map);
+ return 0;
+}
+
+static int fsl_bman_portal_finish(void)
+{
+ __maybe_unused const struct bm_portal_config *cfg;
+ int ret;
+
+ process_portal_irq_unmap(bmfd);
+
+ cfg = bman_destroy_affine_portal();
+ DPAA_BUG_ON(cfg != &pcfg);
+ ret = process_portal_unmap(&map.addr);
+ if (ret)
+ error(0, ret, "process_portal_unmap()");
+ return ret;
+}
+
+int bman_thread_fd(void)
+{
+ return bmfd;
+}
+
+int bman_thread_init(void)
+{
+ /* Convert from contiguous/virtual cpu numbering to real cpu when
+ * calling into the code that is dependent on the device naming.
+ */
+ return fsl_bman_portal_init(QBMAN_ANY_PORTAL_IDX, 0);
+}
+
+int bman_thread_finish(void)
+{
+ return fsl_bman_portal_finish();
+}
+
+void bman_thread_irq(void)
+{
+ qbman_invoke_irq(pcfg.irq);
+ /* Now we need to uninhibit interrupts. This is the only code outside
+ * the regular portal driver that manipulates any portal register, so
+ * rather than breaking that encapsulation I am simply hard-coding the
+ * offset to the inhibit register here.
+ */
+ out_be32(pcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
+}
+
+int bman_init_ccsr(const struct device_node *node)
+{
+ static int ccsr_map_fd;
+ uint64_t phys_addr;
+ const uint32_t *bman_addr;
+ uint64_t regs_size;
+
+ bman_addr = of_get_address(node, 0, &regs_size, NULL);
+ if (!bman_addr) {
+ pr_err("of_get_address cannot return BMan address");
+ return -EINVAL;
+ }
+ phys_addr = of_translate_address(node, bman_addr);
+ if (!phys_addr) {
+ pr_err("of_translate_address failed");
+ return -EINVAL;
+ }
+
+ ccsr_map_fd = open(BMAN_CCSR_MAP, O_RDWR);
+ if (unlikely(ccsr_map_fd < 0)) {
+ pr_err("Can not open /dev/mem for BMan CCSR map");
+ return ccsr_map_fd;
+ }
+
+ bman_ccsr_map = mmap(NULL, regs_size, PROT_READ |
+ PROT_WRITE, MAP_SHARED, ccsr_map_fd, phys_addr);
+ if (bman_ccsr_map == MAP_FAILED) {
+ pr_err("Can not map BMan CCSR base Bman: "
+ "0x%x Phys: 0x%" PRIx64 " size 0x%" PRIu64,
+ *bman_addr, phys_addr, regs_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bman_global_init(void)
+{
+ const struct device_node *dt_node;
+ static int done;
+
+ if (done)
+ return -EBUSY;
+ /* Use the device-tree to determine IP revision until something better
+ * is devised.
+ */
+ dt_node = of_find_compatible_node(NULL, NULL, "fsl,bman-portal");
+ if (!dt_node) {
+ pr_err("No bman portals available for any CPU\n");
+ return -ENODEV;
+ }
+ if (of_device_is_compatible(dt_node, "fsl,bman-portal-1.0") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-1.0.0")) {
+ bman_ip_rev = BMAN_REV10;
+ bman_pool_max = 64;
+ } else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.0") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-2.0.8")) {
+ bman_ip_rev = BMAN_REV20;
+ bman_pool_max = 8;
+ } else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.0") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.1") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.2") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.3")) {
+ bman_ip_rev = BMAN_REV21;
+ bman_pool_max = 64;
+ } else {
+ pr_warn("unknown BMan version in portal node,default "
+ "to rev1.0");
+ bman_ip_rev = BMAN_REV10;
+ bman_pool_max = 64;
+ }
+
+ if (!bman_ip_rev) {
+ pr_err("Unknown bman portal version\n");
+ return -ENODEV;
+ }
+ {
+ const struct device_node *dn = of_find_compatible_node(NULL,
+ NULL, "fsl,bman");
+ if (!dn)
+ pr_err("No bman device node available");
+
+ if (bman_init_ccsr(dn))
+ pr_err("BMan CCSR map failed.");
+ }
+
+ done = 1;
+ return 0;
+}
+
+#define BMAN_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
+u32 bm_pool_free_buffers(u32 bpid)
+{
+ return in_be32(bman_ccsr_map + BMAN_POOL_CONTENT(bpid));
+}
+
+static u32 __generate_thresh(u32 val, int roundup)
+{
+ u32 e = 0; /* co-efficient, exponent */
+ int oddbit = 0;
+
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ DPAA_ASSERT(e < 0x10);
+ return (val | (e << 8));
+}
+
+#define POOL_SWDET(n) (0x0000 + ((n) * 0x04))
+#define POOL_HWDET(n) (0x0100 + ((n) * 0x04))
+#define POOL_SWDXT(n) (0x0200 + ((n) * 0x04))
+#define POOL_HWDXT(n) (0x0300 + ((n) * 0x04))
+int bm_pool_set(u32 bpid, const u32 *thresholds)
+{
+ if (!bman_ccsr_map)
+ return -ENODEV;
+ if (bpid >= bman_pool_max)
+ return -EINVAL;
+ out_be32(bman_ccsr_map + POOL_SWDET(bpid),
+ __generate_thresh(thresholds[0], 0));
+ out_be32(bman_ccsr_map + POOL_SWDXT(bpid),
+ __generate_thresh(thresholds[1], 1));
+ out_be32(bman_ccsr_map + POOL_HWDET(bpid),
+ __generate_thresh(thresholds[2], 0));
+ out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
+ __generate_thresh(thresholds[3], 1));
+ return 0;
+}
+
+#define BMAN_LOW_DEFAULT_THRESH 0x40
+#define BMAN_HIGH_DEFAULT_THRESH 0x80
+int bm_pool_set_hw_threshold(u32 bpid, const u32 low_thresh,
+ const u32 high_thresh)
+{
+ if (!bman_ccsr_map)
+ return -ENODEV;
+ if (bpid >= bman_pool_max)
+ return -EINVAL;
+ if (low_thresh && high_thresh) {
+ out_be32(bman_ccsr_map + POOL_HWDET(bpid),
+ __generate_thresh(low_thresh, 0));
+ out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
+ __generate_thresh(high_thresh, 1));
+ } else {
+ out_be32(bman_ccsr_map + POOL_HWDET(bpid),
+ __generate_thresh(BMAN_LOW_DEFAULT_THRESH, 0));
+ out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
+ __generate_thresh(BMAN_HIGH_DEFAULT_THRESH, 1));
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_priv.h b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_priv.h
new file mode 100644
index 000000000..5a3e330d9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/bman_priv.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __BMAN_PRIV_H
+#define __BMAN_PRIV_H
+
+#include "dpaa_sys.h"
+#include <fsl_bman.h>
+
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+
+#define BMAN_PORTAL_IRQ_PATH "/dev/fsl-usdpaa-irq"
+#define BMAN_CCSR_MAP "/dev/mem"
+
+/* This mask contains all the "irqsource" bits visible to API users */
+#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN)
+
+/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write
+ * the disable register" rather than "disable the ability to write".
+ */
+#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status)
+#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m)
+#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable)
+#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v)
+#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable)
+#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v)
+#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1)
+#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0)
+
+/*
+ * Global variables of the max portal/pool number this bman version supported
+ */
+extern u16 bman_pool_max;
+
+/* used by CCSR and portal interrupt code */
+enum bm_isr_reg {
+ bm_isr_status = 0,
+ bm_isr_enable = 1,
+ bm_isr_disable = 2,
+ bm_isr_inhibit = 3
+};
+
+struct bm_portal_config {
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ /* User-visible portal configuration settings */
+ /* This is used for any "core-affine" portals, ie. default portals
+ * associated to the corresponding cpu. -1 implies that there is no
+ * core affinity configured.
+ */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /* the unique index of this portal */
+ u32 index;
+ /* Is this portal shared? (If so, it has coarser locking and demuxes
+ * processing on behalf of other CPUs.).
+ */
+ int is_shared;
+ /* These are the buffer pool IDs that may be used via this portal. */
+ struct bman_depletion mask;
+
+};
+
+int bman_init_ccsr(const struct device_node *node);
+
+struct bman_portal *bman_create_affine_portal(
+ const struct bm_portal_config *config);
+const struct bm_portal_config *bman_destroy_affine_portal(void);
+
+/* Set depletion thresholds associated with a buffer pool. Requires that the
+ * operating system have access to Bman CCSR (ie. compiled in support and
+ * run-time access courtesy of the device-tree).
+ */
+int bm_pool_set(u32 bpid, const u32 *thresholds);
+
+/* Read the free buffer count for a given buffer */
+u32 bm_pool_free_buffers(u32 bpid);
+
+#endif /* __BMAN_PRIV_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_alloc.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_alloc.c
new file mode 100644
index 000000000..a05803c23
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_alloc.c
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2009-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include "dpaa_sys.h"
+#include <process.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+
+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return process_alloc(dpaa_id_bpid, result, count, align, partial);
+}
+
+void bman_release_bpid_range(u32 bpid, u32 count)
+{
+ process_release(dpaa_id_bpid, bpid, count);
+}
+
+int bman_reserve_bpid_range(u32 bpid, u32 count)
+{
+ return process_reserve(dpaa_id_bpid, bpid, count);
+}
+
+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return process_alloc(dpaa_id_fqid, result, count, align, partial);
+}
+
+void qman_release_fqid_range(u32 fqid, u32 count)
+{
+ process_release(dpaa_id_fqid, fqid, count);
+}
+
+int qman_reserve_fqid_range(u32 fqid, unsigned int count)
+{
+ return process_reserve(dpaa_id_fqid, fqid, count);
+}
+
+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return process_alloc(dpaa_id_qpool, result, count, align, partial);
+}
+
+void qman_release_pool_range(u32 pool, u32 count)
+{
+ process_release(dpaa_id_qpool, pool, count);
+}
+
+int qman_reserve_pool_range(u32 pool, u32 count)
+{
+ return process_reserve(dpaa_id_qpool, pool, count);
+}
+
+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return process_alloc(dpaa_id_cgrid, result, count, align, partial);
+}
+
+void qman_release_cgrid_range(u32 cgrid, u32 count)
+{
+ process_release(dpaa_id_cgrid, cgrid, count);
+}
+
+int qman_reserve_cgrid_range(u32 cgrid, u32 count)
+{
+ return process_reserve(dpaa_id_cgrid, cgrid, count);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.c
new file mode 100644
index 000000000..9d6bfd40a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.c
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <process.h>
+#include "dpaa_sys.h"
+
+struct process_interrupt {
+ int irq;
+ irqreturn_t (*isr)(int irq, void *arg);
+ unsigned long flags;
+ const char *name;
+ void *arg;
+ struct list_head node;
+};
+
+static COMPAT_LIST_HEAD(process_irq_list);
+static pthread_mutex_t process_irq_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static void process_interrupt_install(struct process_interrupt *irq)
+{
+ int ret;
+ /* Add the irq to the end of the list */
+ ret = pthread_mutex_lock(&process_irq_lock);
+ assert(!ret);
+ list_add_tail(&irq->node, &process_irq_list);
+ ret = pthread_mutex_unlock(&process_irq_lock);
+ assert(!ret);
+}
+
+static void process_interrupt_remove(struct process_interrupt *irq)
+{
+ int ret;
+
+ ret = pthread_mutex_lock(&process_irq_lock);
+ assert(!ret);
+ list_del(&irq->node);
+ ret = pthread_mutex_unlock(&process_irq_lock);
+ assert(!ret);
+}
+
+static struct process_interrupt *process_interrupt_find(int irq_num)
+{
+ int ret;
+ struct process_interrupt *i = NULL;
+
+ ret = pthread_mutex_lock(&process_irq_lock);
+ assert(!ret);
+ list_for_each_entry(i, &process_irq_list, node) {
+ if (i->irq == irq_num)
+ goto done;
+ }
+done:
+ ret = pthread_mutex_unlock(&process_irq_lock);
+ assert(!ret);
+ return i;
+}
+
+/* This is the interface from the platform-agnostic driver code to (de)register
+ * interrupt handlers. We simply create/destroy corresponding structs.
+ */
+int qbman_request_irq(int irq, irqreturn_t (*isr)(int irq, void *arg),
+ unsigned long flags, const char *name,
+ void *arg __maybe_unused)
+{
+ struct process_interrupt *irq_node =
+ kmalloc(sizeof(*irq_node), GFP_KERNEL);
+
+ if (!irq_node)
+ return -ENOMEM;
+ irq_node->irq = irq;
+ irq_node->isr = isr;
+ irq_node->flags = flags;
+ irq_node->name = name;
+ irq_node->arg = arg;
+ process_interrupt_install(irq_node);
+ return 0;
+}
+
+int qbman_free_irq(int irq, __maybe_unused void *arg)
+{
+ struct process_interrupt *irq_node = process_interrupt_find(irq);
+
+ if (!irq_node)
+ return -EINVAL;
+ process_interrupt_remove(irq_node);
+ kfree(irq_node);
+ return 0;
+}
+
+/* This is the interface from the platform-specific driver code to obtain
+ * interrupt handlers that have been registered.
+ */
+void qbman_invoke_irq(int irq)
+{
+ struct process_interrupt *irq_node = process_interrupt_find(irq);
+
+ if (irq_node)
+ irq_node->isr(irq, irq_node->arg);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.h b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.h
new file mode 100644
index 000000000..9377738df
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/dpaa_sys.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __DPAA_SYS_H
+#define __DPAA_SYS_H
+
+#include <compat.h>
+#include <dpaa_of.h>
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPAA_PORTAL_CE 0
+#define DPAA_PORTAL_CI 1
+
+#define DPAA_ASSERT(x) RTE_ASSERT(x)
+
+/* This is the interface from the platform-agnostic driver code to (de)register
+ * interrupt handlers. We simply create/destroy corresponding structs.
+ */
+int qbman_request_irq(int irq, irqreturn_t (*isr)(int irq, void *arg),
+ unsigned long flags, const char *name, void *arg);
+int qbman_free_irq(int irq, void *arg);
+
+void qbman_invoke_irq(int irq);
+
+#endif /* __DPAA_SYS_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/process.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/process.c
new file mode 100644
index 000000000..2c23c98df
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/process.c
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2011-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+#include <assert.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+
+#include "process.h"
+
+#include <fsl_usd.h>
+
+/* As higher-level drivers will be built on top of this (dma_mem, qbman, ...),
+ * it's preferable that the process driver itself not provide any exported API.
+ * As such, combined with the fact that none of these operations are
+ * performance critical, it is justified to use lazy initialisation, so that's
+ * what the lock is for.
+ */
+static int fd = -1;
+static pthread_mutex_t fd_init_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static int check_fd(void)
+{
+ int ret;
+
+ if (fd >= 0)
+ return 0;
+ ret = pthread_mutex_lock(&fd_init_lock);
+ assert(!ret);
+ /* check again with the lock held */
+ if (fd < 0)
+ fd = open(PROCESS_PATH, O_RDWR);
+ ret = pthread_mutex_unlock(&fd_init_lock);
+ assert(!ret);
+ return (fd >= 0) ? 0 : -ENODEV;
+}
+
+#define DPAA_IOCTL_MAGIC 'u'
+struct dpaa_ioctl_id_alloc {
+ uint32_t base; /* Return value, the start of the allocated range */
+ enum dpaa_id_type id_type; /* what kind of resource(s) to allocate */
+ uint32_t num; /* how many IDs to allocate (and return value) */
+ uint32_t align; /* must be a power of 2, 0 is treated like 1 */
+ int partial; /* whether to allow less than 'num' */
+};
+
+struct dpaa_ioctl_id_release {
+ /* Input; */
+ enum dpaa_id_type id_type;
+ uint32_t base;
+ uint32_t num;
+};
+
+struct dpaa_ioctl_id_reserve {
+ enum dpaa_id_type id_type;
+ uint32_t base;
+ uint32_t num;
+};
+
+#define DPAA_IOCTL_ID_ALLOC \
+ _IOWR(DPAA_IOCTL_MAGIC, 0x01, struct dpaa_ioctl_id_alloc)
+#define DPAA_IOCTL_ID_RELEASE \
+ _IOW(DPAA_IOCTL_MAGIC, 0x02, struct dpaa_ioctl_id_release)
+#define DPAA_IOCTL_ID_RESERVE \
+ _IOW(DPAA_IOCTL_MAGIC, 0x0A, struct dpaa_ioctl_id_reserve)
+
+int process_alloc(enum dpaa_id_type id_type, uint32_t *base, uint32_t num,
+ uint32_t align, int partial)
+{
+ struct dpaa_ioctl_id_alloc id = {
+ .id_type = id_type,
+ .num = num,
+ .align = align,
+ .partial = partial
+ };
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+ ret = ioctl(fd, DPAA_IOCTL_ID_ALLOC, &id);
+ if (ret)
+ return ret;
+ for (ret = 0; ret < (int)id.num; ret++)
+ base[ret] = id.base + ret;
+ return id.num;
+}
+
+void process_release(enum dpaa_id_type id_type, uint32_t base, uint32_t num)
+{
+ struct dpaa_ioctl_id_release id = {
+ .id_type = id_type,
+ .base = base,
+ .num = num
+ };
+ int ret = check_fd();
+
+ if (ret) {
+ fprintf(stderr, "Process FD failure\n");
+ return;
+ }
+ ret = ioctl(fd, DPAA_IOCTL_ID_RELEASE, &id);
+ if (ret)
+ fprintf(stderr, "Process FD ioctl failure type %d base 0x%x num %d\n",
+ id_type, base, num);
+}
+
+int process_reserve(enum dpaa_id_type id_type, uint32_t base, uint32_t num)
+{
+ struct dpaa_ioctl_id_reserve id = {
+ .id_type = id_type,
+ .base = base,
+ .num = num
+ };
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+ return ioctl(fd, DPAA_IOCTL_ID_RESERVE, &id);
+}
+
+/***************************************/
+/* Mapping and using QMan/BMan portals */
+/***************************************/
+
+#define DPAA_IOCTL_PORTAL_MAP \
+ _IOWR(DPAA_IOCTL_MAGIC, 0x07, struct dpaa_ioctl_portal_map)
+#define DPAA_IOCTL_PORTAL_UNMAP \
+ _IOW(DPAA_IOCTL_MAGIC, 0x08, struct dpaa_portal_map)
+
+int process_portal_map(struct dpaa_ioctl_portal_map *params)
+{
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+
+ ret = ioctl(fd, DPAA_IOCTL_PORTAL_MAP, params);
+ if (ret) {
+ perror("ioctl(DPAA_IOCTL_PORTAL_MAP)");
+ return ret;
+ }
+ return 0;
+}
+
+int process_portal_unmap(struct dpaa_portal_map *map)
+{
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+
+ ret = ioctl(fd, DPAA_IOCTL_PORTAL_UNMAP, map);
+ if (ret) {
+ perror("ioctl(DPAA_IOCTL_PORTAL_UNMAP)");
+ return ret;
+ }
+ return 0;
+}
+
+#define DPAA_IOCTL_PORTAL_IRQ_MAP \
+ _IOW(DPAA_IOCTL_MAGIC, 0x09, struct dpaa_ioctl_irq_map)
+
+int process_portal_irq_map(int ifd, struct dpaa_ioctl_irq_map *map)
+{
+ map->fd = fd;
+ return ioctl(ifd, DPAA_IOCTL_PORTAL_IRQ_MAP, map);
+}
+
+int process_portal_irq_unmap(int ifd)
+{
+ return close(ifd);
+}
+
+struct dpaa_ioctl_raw_portal {
+ /* inputs */
+ enum dpaa_portal_type type; /* Type of portal to allocate */
+
+ uint8_t enable_stash; /* set to non zero to turn on stashing */
+ /* Stashing attributes for the portal */
+ uint32_t cpu;
+ uint32_t cache;
+ uint32_t window;
+ /* Specifies the stash request queue this portal should use */
+ uint8_t sdest;
+
+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ * for don't care. The portal index will be populated by the
+ * driver when the ioctl() successfully completes.
+ */
+ uint32_t index;
+
+ /* outputs */
+ uint64_t cinh;
+ uint64_t cena;
+};
+
+#define DPAA_IOCTL_ALLOC_RAW_PORTAL \
+ _IOWR(DPAA_IOCTL_MAGIC, 0x0C, struct dpaa_ioctl_raw_portal)
+
+#define DPAA_IOCTL_FREE_RAW_PORTAL \
+ _IOR(DPAA_IOCTL_MAGIC, 0x0D, struct dpaa_ioctl_raw_portal)
+
+static int process_portal_allocate(struct dpaa_ioctl_raw_portal *portal)
+{
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+
+ ret = ioctl(fd, DPAA_IOCTL_ALLOC_RAW_PORTAL, portal);
+ if (ret) {
+ perror("ioctl(DPAA_IOCTL_ALLOC_RAW_PORTAL)");
+ return ret;
+ }
+ return 0;
+}
+
+static int process_portal_free(struct dpaa_ioctl_raw_portal *portal)
+{
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+
+ ret = ioctl(fd, DPAA_IOCTL_FREE_RAW_PORTAL, portal);
+ if (ret) {
+ perror("ioctl(DPAA_IOCTL_FREE_RAW_PORTAL)");
+ return ret;
+ }
+ return 0;
+}
+
+int qman_allocate_raw_portal(struct dpaa_raw_portal *portal)
+{
+ struct dpaa_ioctl_raw_portal input;
+ int ret;
+
+ input.type = dpaa_portal_qman;
+ input.index = portal->index;
+ input.enable_stash = portal->enable_stash;
+ input.cpu = portal->cpu;
+ input.cache = portal->cache;
+ input.window = portal->window;
+ input.sdest = portal->sdest;
+
+ ret = process_portal_allocate(&input);
+ if (ret)
+ return ret;
+ portal->index = input.index;
+ portal->cinh = input.cinh;
+ portal->cena = input.cena;
+ return 0;
+}
+
+int qman_free_raw_portal(struct dpaa_raw_portal *portal)
+{
+ struct dpaa_ioctl_raw_portal input;
+
+ input.type = dpaa_portal_qman;
+ input.index = portal->index;
+ input.cinh = portal->cinh;
+ input.cena = portal->cena;
+
+ return process_portal_free(&input);
+}
+
+int bman_allocate_raw_portal(struct dpaa_raw_portal *portal)
+{
+ struct dpaa_ioctl_raw_portal input;
+ int ret;
+
+ input.type = dpaa_portal_bman;
+ input.index = portal->index;
+ input.enable_stash = 0;
+
+ ret = process_portal_allocate(&input);
+ if (ret)
+ return ret;
+ portal->index = input.index;
+ portal->cinh = input.cinh;
+ portal->cena = input.cena;
+ return 0;
+}
+
+int bman_free_raw_portal(struct dpaa_raw_portal *portal)
+{
+ struct dpaa_ioctl_raw_portal input;
+
+ input.type = dpaa_portal_bman;
+ input.index = portal->index;
+ input.cinh = portal->cinh;
+ input.cena = portal->cena;
+
+ return process_portal_free(&input);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.c
new file mode 100644
index 000000000..b596e79c2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.c
@@ -0,0 +1,2852 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017,2019 NXP
+ *
+ */
+
+#include "qman.h"
+#include <rte_branch_prediction.h>
+#include <rte_dpaa_bus.h>
+#include <rte_eventdev.h>
+#include <rte_byteorder.h>
+
+#include <dpaa_bits.h>
+
+/* Compilation constants */
+#define DQRR_MAXFILL 15
+#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
+#define IRQNAME "QMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
+/* maximum number of DQRR entries to process in qman_poll() */
+#define FSL_QMAN_POLL_LIMIT 8
+
+/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
+ * inter-processor locking only. Note, FQLOCK() is always called either under a
+ * local_irq_save() or from interrupt context - hence there's no need for irq
+ * protection (and indeed, attempting to nest irq-protection doesn't work, as
+ * the "irq en/disable" machinery isn't recursive...).
+ */
+#define FQLOCK(fq) \
+ do { \
+ struct qman_fq *__fq478 = (fq); \
+ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
+ spin_lock(&__fq478->fqlock); \
+ } while (0)
+#define FQUNLOCK(fq) \
+ do { \
+ struct qman_fq *__fq478 = (fq); \
+ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
+ spin_unlock(&__fq478->fqlock); \
+ } while (0)
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+ dpaa_set_bits(mask, &fq->flags);
+}
+
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+ dpaa_clear_bits(mask, &fq->flags);
+}
+
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+ return fq->flags & mask;
+}
+
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+ return !(fq->flags & mask);
+}
+
+struct qman_portal {
+ struct qm_portal p;
+ /* PORTAL_BITS_*** - dynamic, strictly internal */
+ unsigned long bits;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ u32 use_eqcr_ci_stashing;
+ u32 slowpoll; /* only used when interrupts are off */
+ /* only 1 volatile dequeue at a time */
+ struct qman_fq *vdqcr_owned;
+ u32 sdqcr;
+ int dqrr_disable_ref;
+ /* A portal-specific handler for DCP ERNs. If this is NULL, the global
+ * handler is called instead.
+ */
+ qman_cb_dc_ern cb_dc_ern;
+ /* When the cpu-affine portal is activated, this is non-NULL */
+ const struct qm_portal_config *config;
+ struct dpa_rbtree retire_table;
+ char irqname[MAX_IRQNAME];
+ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+ struct qman_cgrs *cgrs;
+ /* linked-list of CSCN handlers. */
+ struct list_head cgr_cbs;
+ /* list lock */
+ spinlock_t cgr_lock;
+ /* track if memory was allocated by the driver */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
+ * do byte swaps of DQRR read only memory. First entry must be aligned
+ * to 2 ** 10 to ensure DQRR index calculations based shadow copy
+ * address (6 bits for address shift + 4 bits for the DQRR size).
+ */
+ struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE]
+ __rte_aligned(1024);
+#endif
+};
+
+/* Global handler for DCP ERNs. Used when the portal receiving the message does
+ * not have a portal-specific handler.
+ */
+static qman_cb_dc_ern cb_dc_ern;
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+static RTE_DEFINE_PER_LCORE(struct qman_portal, qman_affine_portal);
+
+static inline struct qman_portal *get_affine_portal(void)
+{
+ return &RTE_PER_LCORE(qman_affine_portal);
+}
+
+/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
+ * retirement notifications (the fact they are sometimes h/w-consumed means that
+ * contextB isn't always a s/w demux - and as we can't know which case it is
+ * when looking at the notification, we have to use the slow lookup for all of
+ * them). NB, it's possible to have multiple FQ objects refer to the same FQID
+ * (though at most one of them should be the consumer), so this table isn't for
+ * all FQs - FQs are added when retirement commands are issued, and removed when
+ * they complete, which also massively reduces the size of this table.
+ */
+IMPLEMENT_DPAA_RBTREE(fqtree, struct qman_fq, node, fqid);
+/*
+ * This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
+{
+ int ret = fqtree_push(&p->retire_table, fq);
+
+ if (ret)
+ pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
+ return ret;
+}
+
+static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
+{
+ fqtree_del(&p->retire_table, fq);
+}
+
+static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
+{
+ return fqtree_find(&p->retire_table, fqid);
+}
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+static void **qman_fq_lookup_table;
+static size_t qman_fq_lookup_table_size;
+
+int qman_setup_fq_lookup_table(size_t num_entries)
+{
+ num_entries++;
+ /* Allocate 1 more entry since the first entry is not used */
+ qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *)));
+ if (!qman_fq_lookup_table) {
+ pr_err("QMan: Could not allocate fq lookup table\n");
+ return -ENOMEM;
+ }
+ memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *));
+ qman_fq_lookup_table_size = num_entries;
+ pr_debug("QMan: Allocated lookup table at %p, entry count %lu\n",
+ qman_fq_lookup_table,
+ (unsigned long)qman_fq_lookup_table_size);
+ return 0;
+}
+
+void qman_set_fq_lookup_table(void **fq_table)
+{
+ qman_fq_lookup_table = fq_table;
+}
+
+/* global structure that maintains fq object mapping */
+static DEFINE_SPINLOCK(fq_hash_table_lock);
+
+static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
+{
+ u32 i;
+
+ spin_lock(&fq_hash_table_lock);
+ /* Can't use index zero because this has special meaning
+ * in context_b field.
+ */
+ for (i = 1; i < qman_fq_lookup_table_size; i++) {
+ if (qman_fq_lookup_table[i] == NULL) {
+ *entry = i;
+ qman_fq_lookup_table[i] = fq;
+ spin_unlock(&fq_hash_table_lock);
+ return 0;
+ }
+ }
+ spin_unlock(&fq_hash_table_lock);
+ return -ENOMEM;
+}
+
+static void clear_fq_table_entry(u32 entry)
+{
+ spin_lock(&fq_hash_table_lock);
+ DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
+ qman_fq_lookup_table[entry] = NULL;
+ spin_unlock(&fq_hash_table_lock);
+}
+
+static inline struct qman_fq *get_fq_table_entry(u32 entry)
+{
+ DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
+ return qman_fq_lookup_table[entry];
+}
+#endif
+
+static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
+{
+ /* Byteswap the FQD to HW format */
+ fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
+ fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
+ fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
+ fqd->context_b = cpu_to_be32(fqd->context_b);
+ fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
+ fqd->opaque_td = cpu_to_be16(fqd->opaque_td);
+}
+
+static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
+{
+ /* Byteswap the FQD to CPU format */
+ fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
+ fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
+ fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
+ fqd->context_b = be32_to_cpu(fqd->context_b);
+ fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
+}
+
+static inline void cpu_to_hw_fd(struct qm_fd *fd)
+{
+ fd->addr = cpu_to_be40(fd->addr);
+ fd->status = cpu_to_be32(fd->status);
+ fd->opaque = cpu_to_be32(fd->opaque);
+}
+
+static inline void hw_fd_to_cpu(struct qm_fd *fd)
+{
+ fd->addr = be40_to_cpu(fd->addr);
+ fd->status = be32_to_cpu(fd->status);
+ fd->opaque = be32_to_cpu(fd->opaque);
+}
+
+/* In the case that slow- and fast-path handling are both done by qman_poll()
+ * (ie. because there is no interrupt handling), we ought to balance how often
+ * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
+ * sources, so we call the fast poll 'n' times before calling the slow poll
+ * once. The idle decrementer constant is used when the last slow-poll detected
+ * no work to do, and the busy decrementer constant when the last slow-poll had
+ * work to do.
+ */
+#define SLOW_POLL_IDLE 1000
+#define SLOW_POLL_BUSY 10
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit);
+
+/* Portal interrupt handler */
+static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
+{
+ struct qman_portal *p = ptr;
+ /*
+ * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
+ * it could race against a Query Congestion State command also given
+ * as part of the handling of this interrupt source. We mustn't
+ * clear it a second time in this top-level function.
+ */
+ u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
+ ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
+ u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
+ /* DQRR-handling if it's interrupt-driven */
+ if (is & QM_PIRQ_DQRI)
+ __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
+ /* Handling of anything else that's interrupt-driven */
+ clear |= __poll_portal_slow(p, is);
+ qm_isr_status_clear(&p->p, clear);
+ return IRQ_HANDLED;
+}
+
+/* This inner version is used privately by qman_create_affine_portal(), as well
+ * as by the exported qman_stop_dequeues().
+ */
+static inline void qman_stop_dequeues_ex(struct qman_portal *p)
+{
+ if (!(p->dqrr_disable_ref++))
+ qm_dqrr_set_maxfill(&p->p, 0);
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+ const struct qm_mr_entry *msg;
+loop:
+ msg = qm_mr_current(p);
+ if (!msg) {
+ /*
+ * if MR was full and h/w had other FQRNI entries to produce, we
+ * need to allow it time to produce those entries once the
+ * existing entries are consumed. A worst-case situation
+ * (fully-loaded system) means h/w sequencers may have to do 3-4
+ * other things before servicing the portal's MR pump, each of
+ * which (if slow) may take ~50 qman cycles (which is ~200
+ * processor cycles). So rounding up and then multiplying this
+ * worst-case estimate by a factor of 10, just to be
+ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+ * one entry at a time, so h/w has an opportunity to produce new
+ * entries well before the ring has been fully consumed, so
+ * we're being *really* paranoid here.
+ */
+ u64 now, then = mfatb();
+
+ do {
+ now = mfatb();
+ } while ((then + 10000) > now);
+ msg = qm_mr_current(p);
+ if (!msg)
+ return 0;
+ }
+ if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+ /* We aren't draining anything but FQRNIs */
+ pr_err("Found verb 0x%x in MR\n", msg->ern.verb);
+ return -1;
+ }
+ qm_mr_next(p);
+ qm_mr_cci_consume(p, 1);
+ goto loop;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+ enum qm_eqcr_pmode pmode,
+ unsigned int eq_stash_thresh,
+ int eq_stash_prio)
+{
+ /* This use of 'register', as well as all other occurrences, is because
+ * it has been observed to generate much faster code with gcc than is
+ * otherwise the case.
+ */
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u32 cfg;
+ u8 pi;
+
+ eqcr->ring = portal->addr.ce + QM_CL_EQCR;
+ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(EQCR_CI);
+ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ eqcr->cursor = eqcr->ring + pi;
+ eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+ QM_EQCR_VERB_VBIT : 0;
+ eqcr->available = QM_EQCR_SIZE - 1 -
+ qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+ eqcr->ithresh = qm_in(EQCR_ITR);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+ eqcr->pmode = pmode;
+#endif
+ cfg = (qm_in(CFG) & 0x00ffffff) |
+ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+ (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+ qm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 pi, ci;
+ u32 cfg;
+
+ /*
+ * Disable EQCI stashing because the QMan only
+ * presents the value it previously stashed to
+ * maintain coherency. Setting the stash threshold
+ * to 1 then 0 ensures that QMan has resyncronized
+ * its internal copy so that the portal is clean
+ * when it is reinitialized in the future
+ */
+ cfg = (qm_in(CFG) & 0x0fffffff) |
+ (1 << 28); /* QCSP_CFG: EST */
+ qm_out(CFG, cfg);
+ cfg &= 0x0fffffff; /* stash threshold = 0 */
+ qm_out(CFG, cfg);
+
+ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+ /* Refresh EQCR CI cache value */
+ qm_cl_invalidate(EQCR_CI);
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(!eqcr->busy);
+#endif
+ if (pi != EQCR_PTR2IDX(eqcr->cursor))
+ pr_crit("losing uncommitted EQCR entries\n");
+ if (ci != eqcr->ci)
+ pr_crit("missing existing EQCR completions\n");
+ if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
+ pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+ __maybe_unused const struct qm_portal_config *config,
+ enum qm_dqrr_dmode dmode,
+ __maybe_unused enum qm_dqrr_pmode pmode,
+ enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u32 cfg;
+
+ /* Make sure the DQRR will be idle when we enable */
+ qm_out(DQRR_SDQCR, 0);
+ qm_out(DQRR_VDQCR, 0);
+ qm_out(DQRR_PDQCR, 0);
+ dqrr->ring = portal->addr.ce + QM_CL_DQRR;
+ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->cursor = dqrr->ring + dqrr->ci;
+ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+ dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+ QM_DQRR_VERB_VBIT : 0;
+ dqrr->ithresh = qm_in(DQRR_ITR);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ dqrr->dmode = dmode;
+ dqrr->pmode = pmode;
+ dqrr->cmode = cmode;
+#endif
+ /* Invalidate every ring entry before beginning */
+ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+ dccivac(qm_cl(dqrr->ring, cfg));
+ cfg = (qm_in(CFG) & 0xff000f00) |
+ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+ ((dmode & 1) << 18) | /* DP */
+ ((cmode & 3) << 16) | /* DCM */
+ 0xa0 | /* RE+SE */
+ (0 ? 0x40 : 0) | /* Ignore RP */
+ (0 ? 0x10 : 0); /* Ignore SP */
+ qm_out(CFG, cfg);
+ qm_dqrr_set_maxfill(portal, max_fill);
+ return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if ((dqrr->cmode != qm_dqrr_cdc) &&
+ (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
+ pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline int qm_mr_init(struct qm_portal *portal,
+ __maybe_unused enum qm_mr_pmode pmode,
+ enum qm_mr_cmode cmode)
+{
+ register struct qm_mr *mr = &portal->mr;
+ u32 cfg;
+
+ mr->ring = portal->addr.ce + QM_CL_MR;
+ mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
+ mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
+ mr->cursor = mr->ring + mr->ci;
+ mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+ mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
+ mr->ithresh = qm_in(MR_ITR);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mr->pmode = pmode;
+ mr->cmode = cmode;
+#endif
+ cfg = (qm_in(CFG) & 0xfffff0ff) |
+ ((cmode & 1) << 8); /* QCSP_CFG:MM */
+ qm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mr->pmode == qm_mr_pvb);
+#endif
+ /* when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads".
+ */
+ if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+ if (!mr->pi)
+ mr->vbit ^= QM_MR_VERB_VBIT;
+ mr->fill++;
+ res = MR_INC(res);
+ }
+ dcbit_ro(res);
+}
+
+struct qman_portal *
+qman_init_portal(struct qman_portal *portal,
+ const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qm_portal *p;
+ char buf[16];
+ int ret;
+ u32 isdr;
+
+ p = &portal->p;
+
+ if (!c)
+ c = portal->config;
+
+ if (dpaa_svr_family == SVR_LS1043A_FAMILY)
+ portal->use_eqcr_ci_stashing = 3;
+ else
+ portal->use_eqcr_ci_stashing =
+ ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference
+ */
+ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+ p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ /*
+ * If CI-stashing is used, the current defaults use a threshold of 3,
+ * and stash with high-than-DQRR priority.
+ */
+ if (qm_eqcr_init(p, qm_eqcr_pvb,
+ portal->use_eqcr_ci_stashing, 1)) {
+ pr_err("Qman EQCR initialisation failed\n");
+ goto fail_eqcr;
+ }
+ if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
+ qm_dqrr_cdc, DQRR_MAXFILL)) {
+ pr_err("Qman DQRR initialisation failed\n");
+ goto fail_dqrr;
+ }
+ if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
+ pr_err("Qman MR initialisation failed\n");
+ goto fail_mr;
+ }
+ if (qm_mc_init(p)) {
+ pr_err("Qman MC initialisation failed\n");
+ goto fail_mc;
+ }
+
+ /* static interrupt-gating controls */
+ qm_dqrr_set_ithresh(p, 0);
+ qm_mr_set_ithresh(p, 0);
+ qm_isr_set_iperiod(p, 0);
+ portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
+ if (!portal->cgrs)
+ goto fail_cgrs;
+ /* initial snapshot is no-depletion */
+ qman_cgrs_init(&portal->cgrs[1]);
+ if (cgrs)
+ portal->cgrs[0] = *cgrs;
+ else
+ /* if the given mask is NULL, assume all CGRs can be seen */
+ qman_cgrs_fill(&portal->cgrs[0]);
+ INIT_LIST_HEAD(&portal->cgr_cbs);
+ spin_lock_init(&portal->cgr_lock);
+ portal->bits = 0;
+ portal->slowpoll = 0;
+ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+ portal->dqrr_disable_ref = 0;
+ portal->cb_dc_ern = NULL;
+ sprintf(buf, "qportal-%d", c->channel);
+ dpa_rbtree_init(&portal->retire_table);
+ isdr = 0xffffffff;
+ qm_isr_disable_write(p, isdr);
+ portal->irq_sources = 0;
+ qm_isr_enable_write(p, portal->irq_sources);
+ qm_isr_status_clear(p, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname,
+ portal)) {
+ pr_err("request_irq() failed\n");
+ goto fail_irq;
+ }
+
+ /* Need EQCR to be empty before continuing */
+ isdr &= ~QM_PIRQ_EQCI;
+ qm_isr_disable_write(p, isdr);
+ ret = qm_eqcr_get_fill(p);
+ if (ret) {
+ pr_err("Qman EQCR unclean\n");
+ goto fail_eqcr_empty;
+ }
+ isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
+ qm_isr_disable_write(p, isdr);
+ if (qm_dqrr_current(p)) {
+ pr_err("Qman DQRR unclean\n");
+ qm_dqrr_cdc_consume_n(p, 0xffff);
+ }
+ if (qm_mr_current(p) && drain_mr_fqrni(p)) {
+ /* special handling, drain just in case it's a few FQRNIs */
+ if (drain_mr_fqrni(p))
+ goto fail_dqrr_mr_empty;
+ }
+ /* Success */
+ portal->config = c;
+ qm_isr_disable_write(p, 0);
+ qm_isr_uninhibit(p);
+ /* Write a sane SDQCR */
+ qm_dqrr_sdqcr_set(p, portal->sdqcr);
+ return portal;
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+ free_irq(c->irq, portal);
+fail_irq:
+ kfree(portal->cgrs);
+ spin_lock_destroy(&portal->cgr_lock);
+fail_cgrs:
+ qm_mc_finish(p);
+fail_mc:
+ qm_mr_finish(p);
+fail_mr:
+ qm_dqrr_finish(p);
+fail_dqrr:
+ qm_eqcr_finish(p);
+fail_eqcr:
+ return NULL;
+}
+
+#define MAX_GLOBAL_PORTALS 8
+static struct qman_portal global_portals[MAX_GLOBAL_PORTALS];
+static rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS];
+
+struct qman_portal *
+qman_alloc_global_portal(struct qm_portal_config *q_pcfg)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
+ if (rte_atomic16_test_and_set(&global_portals_used[i])) {
+ global_portals[i].config = q_pcfg;
+ return &global_portals[i];
+ }
+ }
+ pr_err("No portal available (%x)\n", MAX_GLOBAL_PORTALS);
+
+ return NULL;
+}
+
+int
+qman_free_global_portal(struct qman_portal *portal)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
+ if (&global_portals[i] == portal) {
+ rte_atomic16_clear(&global_portals_used[i]);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+void
+qman_portal_uninhibit_isr(struct qman_portal *portal)
+{
+ qm_isr_uninhibit(&portal->p);
+}
+
+struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qman_portal *res;
+ struct qman_portal *portal = get_affine_portal();
+
+ /* A criteria for calling this function (from qman_driver.c) is that
+ * we're already affine to the cpu and won't schedule onto another cpu.
+ */
+ res = qman_init_portal(portal, c, cgrs);
+ if (res) {
+ spin_lock(&affine_mask_lock);
+ CPU_SET(c->cpu, &affine_mask);
+ affine_channels[c->cpu] =
+ c->channel;
+ spin_unlock(&affine_mask_lock);
+ }
+ return res;
+}
+
+static inline
+void qman_destroy_portal(struct qman_portal *qm)
+{
+ const struct qm_portal_config *pcfg;
+
+ /* Stop dequeues on the portal */
+ qm_dqrr_sdqcr_set(&qm->p, 0);
+
+ /*
+ * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+ * something related to QM_PIRQ_EQCI, this may need fixing.
+ * Also, due to the prefetching model used for CI updates in the enqueue
+ * path, this update will only invalidate the CI cacheline *after*
+ * working on it, so we need to call this twice to ensure a full update
+ * irrespective of where the enqueue processing was at when the teardown
+ * began.
+ */
+ qm_eqcr_cce_update(&qm->p);
+ qm_eqcr_cce_update(&qm->p);
+ pcfg = qm->config;
+
+ free_irq(pcfg->irq, qm);
+
+ kfree(qm->cgrs);
+ qm_mc_finish(&qm->p);
+ qm_mr_finish(&qm->p);
+ qm_dqrr_finish(&qm->p);
+ qm_eqcr_finish(&qm->p);
+
+ qm->config = NULL;
+
+ spin_lock_destroy(&qm->cgr_lock);
+}
+
+const struct qm_portal_config *
+qman_destroy_affine_portal(struct qman_portal *qp)
+{
+ /* We don't want to redirect if we're a slave, use "raw" */
+ struct qman_portal *qm;
+ const struct qm_portal_config *pcfg;
+ int cpu;
+
+ if (qp == NULL)
+ qm = get_affine_portal();
+ else
+ qm = qp;
+ pcfg = qm->config;
+ cpu = pcfg->cpu;
+
+ qman_destroy_portal(qm);
+
+ spin_lock(&affine_mask_lock);
+ CPU_CLR(cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+
+ qman_free_global_portal(qm);
+
+ return pcfg;
+}
+
+int qman_get_portal_index(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ return p->config->index;
+}
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_mr_entry *msg, u8 verb)
+{
+ FQLOCK(fq);
+ switch (verb) {
+ case QM_MR_VERB_FQRL:
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+ fq_clear(fq, QMAN_FQ_STATE_ORL);
+ table_del_fq(p, fq);
+ break;
+ case QM_MR_VERB_FQRN:
+ DPAA_ASSERT((fq->state == qman_fq_state_parked) ||
+ (fq->state == qman_fq_state_sched));
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+ fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ else
+ table_del_fq(p, fq);
+ fq->state = qman_fq_state_retired;
+ break;
+ case QM_MR_VERB_FQPN:
+ DPAA_ASSERT(fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+ fq->state = qman_fq_state_parked;
+ }
+ FQUNLOCK(fq);
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+ const struct qm_mr_entry *msg;
+ struct qm_mr_entry swapped_msg;
+
+ if (is & QM_PIRQ_CSCI) {
+ struct qman_cgrs rr, c;
+ struct qm_mc_result *mcr;
+ struct qman_cgr *cgr;
+
+ spin_lock(&p->cgr_lock);
+ /*
+ * The CSCI bit must be cleared _before_ issuing the
+ * Query Congestion State command, to ensure that a long
+ * CGR State Change callback cannot miss an intervening
+ * state change.
+ */
+ qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ /* mask out the ones I'm not interested in */
+ qman_cgrs_and(&rr, (const struct qman_cgrs *)
+ &mcr->querycongestion.state, &p->cgrs[0]);
+ /* check previous snapshot for delta, enter/exit congestion */
+ qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+ /* update snapshot */
+ qman_cgrs_cp(&p->cgrs[1], &rr);
+ /* Invoke callback */
+ list_for_each_entry(cgr, &p->cgr_cbs, node)
+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+ spin_unlock(&p->cgr_lock);
+ }
+
+ if (is & QM_PIRQ_EQRI) {
+ qm_eqcr_cce_update(&p->p);
+ qm_eqcr_set_ithresh(&p->p, 0);
+ wake_up(&affine_queue);
+ }
+
+ if (is & QM_PIRQ_MRI) {
+ struct qman_fq *fq;
+ u8 verb, num = 0;
+mr_loop:
+ qm_mr_pvb_update(&p->p);
+ msg = qm_mr_current(&p->p);
+ if (!msg)
+ goto mr_done;
+ swapped_msg = *msg;
+ hw_fd_to_cpu(&swapped_msg.ern.fd);
+ verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK;
+ /* The message is a software ERN iff the 0x20 bit is set */
+ if (verb & 0x20) {
+ switch (verb) {
+ case QM_MR_VERB_FQRNI:
+ /* nada, we drop FQRNIs on the floor */
+ break;
+ case QM_MR_VERB_FQRN:
+ case QM_MR_VERB_FQRL:
+ /* Lookup in the retirement table */
+ fq = table_find_fq(p,
+ be32_to_cpu(msg->fq.fqid));
+ DPAA_BUG_ON(!fq);
+ fq_state_change(p, fq, &swapped_msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, &swapped_msg);
+ break;
+ case QM_MR_VERB_FQPN:
+ /* Parked */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(msg->fq.contextB);
+#else
+ fq = (void *)(uintptr_t)msg->fq.contextB;
+#endif
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, &swapped_msg);
+ break;
+ case QM_MR_VERB_DC_ERN:
+ /* DCP ERN */
+ if (p->cb_dc_ern)
+ p->cb_dc_ern(p, msg);
+ else if (cb_dc_ern)
+ cb_dc_ern(p, msg);
+ else {
+ static int warn_once;
+
+ if (!warn_once) {
+ pr_crit("Leaking DCP ERNs!\n");
+ warn_once = 1;
+ }
+ }
+ break;
+ default:
+ pr_crit("Invalid MR verb 0x%02x\n", verb);
+ }
+ } else {
+ /* Its a software ERN */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
+#else
+ fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
+#endif
+ fq->cb.ern(p, fq, &swapped_msg);
+ }
+ num++;
+ qm_mr_next(&p->p);
+ goto mr_loop;
+mr_done:
+ qm_mr_cci_consume(&p->p, num);
+ }
+ /*
+ * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
+ * processing. If that interrupt source has meanwhile been re-asserted,
+ * we mustn't clear it here (or in the top-level interrupt handler).
+ */
+ return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
+}
+
+/*
+ * remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined.
+ */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+ p->vdqcr_owned = NULL;
+ FQLOCK(fq);
+ fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+ FQUNLOCK(fq);
+ wake_up(&affine_queue);
+}
+
+/*
+ * The only states that would conflict with other things if they ran at the
+ * same time on the same cpu are:
+ *
+ * (i) setting/clearing vdqcr_owned, and
+ * (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ * (i) this clearing can only occur after qman_set_vdq() has set the
+ * vdqcr_owned field (which it does before setting VDQCR), and
+ * qman_volatile_dequeue() blocks interrupts and preemption while this is
+ * done so that we can't interfere.
+ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ * with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_set_vdq() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API.
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ struct qm_dqrr_entry *shadow;
+#endif
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (unlikely(!dq))
+ break;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* If running on an LE system the fields of the
+ * dequeue entry must be swapper. Because the
+ * QMan HW will ignore writes the DQRR entry is
+ * copied and the index stored within the copy
+ */
+ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+ *shadow = *dq;
+ dq = shadow;
+ shadow->fqid = be32_to_cpu(shadow->fqid);
+ shadow->seqnum = be16_to_cpu(shadow->seqnum);
+ hw_fd_to_cpu(&shadow->fd);
+#endif
+
+ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+ /*
+ * VDQCR: don't trust context_b as the FQ may have
+ * been configured for h/w consumption and we're
+ * draining it post-retirement.
+ */
+ fq = p->vdqcr_owned;
+ /*
+ * We only set QMAN_FQ_STATE_NE when retiring, so we
+ * only need to check for clearing it when doing
+ * volatile dequeues. It's one less thing to check
+ * in the critical path (SDQCR).
+ */
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+ /*
+ * This is duplicated from the SDQCR code, but we
+ * have stuff to do before *and* after this callback,
+ * and we don't want multiple if()s in the critical
+ * path (SDQCR).
+ */
+ res = fq->cb.dqrr(p, fq, dq);
+ if (res == qman_cb_dqrr_stop)
+ break;
+ /* Check for VDQCR completion */
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+ } else {
+ /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(dq->contextB);
+#else
+ fq = (void *)(uintptr_t)dq->contextB;
+#endif
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr(p, fq, dq);
+ /*
+ * The callback can request that we exit without
+ * consuming this entry nor advancing;
+ */
+ if (res == qman_cb_dqrr_stop)
+ break;
+ }
+ /* Interpret 'dq' from a driver perspective. */
+ /*
+ * Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both.
+ */
+ DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ /* just means "skip it, I'll consume it myself later on" */
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+ res == qman_cb_dqrr_park);
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /*
+ * Entry processed and consumed, increment our counter. The
+ * callback can request that we exit after consuming the
+ * entry, and we also exit if we reach our processing limit,
+ * so loop back only if neither of these conditions is met.
+ */
+ } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
+
+ return limit;
+}
+
+int qman_irqsource_add(u32 bits)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ bits = bits & QM_PIRQ_VISIBLE;
+
+ /* Clear any previously remaining interrupt conditions in
+ * QCSP_ISR. This prevents raising a false interrupt when
+ * interrupt conditions are enabled in QCSP_IER.
+ */
+ qm_isr_status_clear(&p->p, bits);
+ dpaa_set_bits(bits, &p->irq_sources);
+ qm_isr_enable_write(&p->p, p->irq_sources);
+
+ return 0;
+}
+
+int qman_fq_portal_irqsource_add(struct qman_portal *p, u32 bits)
+{
+ bits = bits & QM_PIRQ_VISIBLE;
+
+ /* Clear any previously remaining interrupt conditions in
+ * QCSP_ISR. This prevents raising a false interrupt when
+ * interrupt conditions are enabled in QCSP_IER.
+ */
+ qm_isr_status_clear(&p->p, bits);
+ dpaa_set_bits(bits, &p->irq_sources);
+ qm_isr_enable_write(&p->p, p->irq_sources);
+
+ return 0;
+}
+
+int qman_irqsource_remove(u32 bits)
+{
+ struct qman_portal *p = get_affine_portal();
+ u32 ier;
+
+ /* Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert.
+ */
+
+ bits &= QM_PIRQ_VISIBLE;
+ dpaa_clear_bits(bits, &p->irq_sources);
+ qm_isr_enable_write(&p->p, p->irq_sources);
+ ier = qm_isr_enable_read(&p->p);
+ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering.
+ */
+ qm_isr_status_clear(&p->p, ~ier);
+ return 0;
+}
+
+int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+ u32 ier;
+
+ /* Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert.
+ */
+
+ bits &= QM_PIRQ_VISIBLE;
+ dpaa_clear_bits(bits, &p->irq_sources);
+ qm_isr_enable_write(&p->p, p->irq_sources);
+ ier = qm_isr_enable_read(&p->p);
+ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering.
+ */
+ qm_isr_status_clear(&p->p, ~ier);
+ return 0;
+}
+
+u16 qman_affine_channel(int cpu)
+{
+ if (cpu < 0) {
+ struct qman_portal *portal = get_affine_portal();
+
+ cpu = portal->config->cpu;
+ }
+ DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask));
+ return affine_channels[cpu];
+}
+
+unsigned int qman_portal_poll_rx(unsigned int poll_limit,
+ void **bufs,
+ struct qman_portal *p)
+{
+ struct qm_portal *portal = &p->p;
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ struct qm_dqrr_entry *dq[QM_DQRR_SIZE], *shadow[QM_DQRR_SIZE];
+ struct qman_fq *fq;
+ unsigned int limit = 0, rx_number = 0;
+ uint32_t consume = 0;
+
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ if (!dqrr->fill)
+ break;
+
+ dq[rx_number] = dqrr->cursor;
+ dqrr->cursor = DQRR_CARRYCLEAR(dqrr->cursor + 1);
+ /* Prefetch the next DQRR entry */
+ rte_prefetch0(dqrr->cursor);
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* If running on an LE system the fields of the
+ * dequeue entry must be swapper. Because the
+ * QMan HW will ignore writes the DQRR entry is
+ * copied and the index stored within the copy
+ */
+ shadow[rx_number] =
+ &p->shadow_dqrr[DQRR_PTR2IDX(dq[rx_number])];
+ shadow[rx_number]->fd.opaque_addr =
+ dq[rx_number]->fd.opaque_addr;
+ shadow[rx_number]->fd.addr =
+ be40_to_cpu(dq[rx_number]->fd.addr);
+ shadow[rx_number]->fd.opaque =
+ be32_to_cpu(dq[rx_number]->fd.opaque);
+#else
+ shadow[rx_number] = dq[rx_number];
+#endif
+
+ /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = qman_fq_lookup_table[dq[rx_number]->contextB];
+#else
+ fq = (void *)dq[rx_number]->contextB;
+#endif
+ if (fq->cb.dqrr_prepare)
+ fq->cb.dqrr_prepare(shadow[rx_number],
+ &bufs[rx_number]);
+
+ consume |= (1 << (31 - DQRR_PTR2IDX(shadow[rx_number])));
+ rx_number++;
+ --dqrr->fill;
+ } while (++limit < poll_limit);
+
+ if (rx_number)
+ fq->cb.dqrr_dpdk_pull_cb(&fq, shadow, bufs, rx_number);
+
+ /* Consume all the DQRR enries together */
+ qm_out(DQRR_DCAP, (1 << 8) | consume);
+
+ return rx_number;
+}
+
+void qman_clear_irq(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
+ ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
+ qm_isr_status_clear(&p->p, clear);
+}
+
+u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
+ void **bufs)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+ struct qman_portal *p = get_affine_portal();
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ struct qm_dqrr_entry *shadow;
+#endif
+ unsigned int rx_number = 0;
+
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ break;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ /*
+ * If running on an LE system the fields of the
+ * dequeue entry must be swapper. Because the
+ * QMan HW will ignore writes the DQRR entry is
+ * copied and the index stored within the copy
+ */
+ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+ *shadow = *dq;
+ dq = shadow;
+ shadow->fqid = be32_to_cpu(shadow->fqid);
+ shadow->seqnum = be16_to_cpu(shadow->seqnum);
+ hw_fd_to_cpu(&shadow->fd);
+#endif
+
+ /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(dq->contextB);
+#else
+ fq = (void *)(uintptr_t)dq->contextB;
+#endif
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr_dpdk_cb(&ev[rx_number], p, fq,
+ dq, &bufs[rx_number]);
+ rx_number++;
+ /* Interpret 'dq' from a driver perspective. */
+ /*
+ * Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both.
+ */
+ DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+ res == qman_cb_dqrr_park);
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /*
+ * Entry processed and consumed, increment our counter. The
+ * callback can request that we exit after consuming the
+ * entry, and we also exit if we reach our processing limit,
+ * so loop back only if neither of these conditions is met.
+ */
+ } while (++limit < poll_limit);
+
+ return limit;
+}
+
+struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
+{
+ struct qman_portal *p = get_affine_portal();
+ const struct qm_dqrr_entry *dq;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ struct qm_dqrr_entry *shadow;
+#endif
+
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ return NULL;
+
+ if (!(dq->stat & QM_DQRR_STAT_FD_VALID)) {
+ /* Invalid DQRR - put the portal and consume the DQRR.
+ * Return NULL to user as no packet is seen.
+ */
+ qman_dqrr_consume(fq, (struct qm_dqrr_entry *)dq);
+ return NULL;
+ }
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+ *shadow = *dq;
+ dq = shadow;
+ shadow->fqid = be32_to_cpu(shadow->fqid);
+ shadow->seqnum = be16_to_cpu(shadow->seqnum);
+ hw_fd_to_cpu(&shadow->fd);
+#endif
+
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+
+ return (struct qm_dqrr_entry *)dq;
+}
+
+void qman_dqrr_consume(struct qman_fq *fq,
+ struct qm_dqrr_entry *dq)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, 0);
+ qm_dqrr_next(&p->p);
+}
+
+int qman_poll_dqrr(unsigned int limit)
+{
+ struct qman_portal *p = get_affine_portal();
+ int ret;
+
+ ret = __poll_portal_fast(p, limit);
+ return ret;
+}
+
+void qman_poll(void)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ if ((~p->irq_sources) & QM_PIRQ_SLOW) {
+ if (!(p->slowpoll--)) {
+ u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
+ u32 active = __poll_portal_slow(p, is);
+
+ if (active) {
+ qm_isr_status_clear(&p->p, active);
+ p->slowpoll = SLOW_POLL_BUSY;
+ } else
+ p->slowpoll = SLOW_POLL_IDLE;
+ }
+ }
+ if ((~p->irq_sources) & QM_PIRQ_DQRI)
+ __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
+}
+
+void qman_stop_dequeues(void)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qman_stop_dequeues_ex(p);
+}
+
+void qman_start_dequeues(void)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ DPAA_ASSERT(p->dqrr_disable_ref > 0);
+ if (!(--p->dqrr_disable_ref))
+ qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
+}
+
+void qman_static_dequeue_add(u32 pools, struct qman_portal *qp)
+{
+ struct qman_portal *p = qp ? qp : get_affine_portal();
+
+ pools &= p->config->pools;
+ p->sdqcr |= pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+}
+
+void qman_static_dequeue_del(u32 pools, struct qman_portal *qp)
+{
+ struct qman_portal *p = qp ? qp : get_affine_portal();
+
+ pools &= p->config->pools;
+ p->sdqcr &= ~pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+}
+
+u32 qman_static_dequeue_get(struct qman_portal *qp)
+{
+ struct qman_portal *p = qp ? qp : get_affine_portal();
+ return p->sdqcr;
+}
+
+void qman_dca(const struct qm_dqrr_entry *dq, int park_request)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
+}
+
+void qman_dca_index(u8 index, int park_request)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qm_dqrr_cdc_consume_1(&p->p, index, park_request);
+}
+
+/* Frame queue API */
+static const char *mcr_result_str(u8 result)
+{
+ switch (result) {
+ case QM_MCR_RESULT_NULL:
+ return "QM_MCR_RESULT_NULL";
+ case QM_MCR_RESULT_OK:
+ return "QM_MCR_RESULT_OK";
+ case QM_MCR_RESULT_ERR_FQID:
+ return "QM_MCR_RESULT_ERR_FQID";
+ case QM_MCR_RESULT_ERR_FQSTATE:
+ return "QM_MCR_RESULT_ERR_FQSTATE";
+ case QM_MCR_RESULT_ERR_NOTEMPTY:
+ return "QM_MCR_RESULT_ERR_NOTEMPTY";
+ case QM_MCR_RESULT_PENDING:
+ return "QM_MCR_RESULT_PENDING";
+ case QM_MCR_RESULT_ERR_BADCOMMAND:
+ return "QM_MCR_RESULT_ERR_BADCOMMAND";
+ }
+ return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+ struct qm_fqd fqd;
+ struct qm_mcr_queryfq_np np;
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+ int ret = qman_alloc_fqid(&fqid);
+
+ if (ret)
+ return ret;
+ }
+ spin_lock_init(&fq->fqlock);
+ fq->fqid = fqid;
+ fq->fqid_le = cpu_to_be32(fqid);
+ fq->flags = flags;
+ fq->state = qman_fq_state_oos;
+ fq->cgr_groupid = 0;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) {
+ pr_info("Find empty table entry failed\n");
+ return -ENOMEM;
+ }
+ fq->qman_fq_lookup_table = qman_fq_lookup_table;
+#endif
+ if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
+ return 0;
+ /* Everything else is AS_IS support */
+ p = get_affine_portal();
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
+ goto err;
+ }
+ fqd = mcr->queryfq.fqd;
+ hw_fqd_to_cpu(&fqd);
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq_np.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
+ goto err;
+ }
+ np = mcr->queryfq_np;
+ /* Phew, have queryfq and queryfq_np results, stitch together
+ * the FQ object from those.
+ */
+ fq->cgr_groupid = fqd.cgid;
+ switch (np.state & QM_MCR_NP_STATE_MASK) {
+ case QM_MCR_NP_STATE_OOS:
+ break;
+ case QM_MCR_NP_STATE_RETIRED:
+ fq->state = qman_fq_state_retired;
+ if (np.frm_cnt)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ break;
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ fq->state = qman_fq_state_sched;
+ if (np.state & QM_MCR_NP_STATE_R)
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ break;
+ case QM_MCR_NP_STATE_PARKED:
+ fq->state = qman_fq_state_parked;
+ break;
+ default:
+ DPAA_ASSERT(NULL == "invalid FQ state");
+ }
+ if (fqd.fq_ctrl & QM_FQCTRL_CGE)
+ fq->state |= QMAN_FQ_STATE_CGR_EN;
+ return 0;
+err:
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
+ qman_release_fqid(fqid);
+ return -EIO;
+}
+
+void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
+{
+ /*
+ * We don't need to lock the FQ as it is a pre-condition that the FQ be
+ * quiesced. Instead, run some checks.
+ */
+ switch (fq->state) {
+ case qman_fq_state_parked:
+ DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
+ /* Fallthrough */
+ case qman_fq_state_oos:
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+ qman_release_fqid(fq->fqid);
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ clear_fq_table_entry(fq->key);
+#endif
+ return;
+ default:
+ break;
+ }
+ DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+ return fq->fqid;
+}
+
+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
+{
+ if (state)
+ *state = fq->state;
+ if (flags)
+ *flags = fq->flags;
+}
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+ if ((fq->state != qman_fq_state_oos) &&
+ (fq->state != qman_fq_state_parked))
+ return -EINVAL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+ /* And can't be set at the same time as TDTHRESH */
+ if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+ return -EINVAL;
+ }
+ /* Issue an INITFQ_[PARKED|SCHED] management command */
+ p = get_affine_portal();
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ ((fq->state != qman_fq_state_oos) &&
+ (fq->state != qman_fq_state_parked)))) {
+ FQUNLOCK(fq);
+ return -EBUSY;
+ }
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initfq = *opts;
+ mcc->initfq.fqid = cpu_to_be32(fq->fqid);
+ mcc->initfq.count = 0;
+ /*
+ * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
+ * demux pointer. Otherwise, the caller-provided value is allowed to
+ * stand, don't overwrite it.
+ */
+ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+ dma_addr_t phys_fq;
+
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ mcc->initfq.fqd.context_b = cpu_to_be32(fq->key);
+#else
+ mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
+#endif
+ /*
+ * and the physical address - NB, if the user wasn't trying to
+ * set CONTEXTA, clear the stashing settings.
+ */
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ memset(&mcc->initfq.fqd.context_a, 0,
+ sizeof(mcc->initfq.fqd.context_a));
+ } else {
+ phys_fq = rte_mem_virt2iova(fq);
+ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+ }
+ }
+ if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+ mcc->initfq.fqd.dest.channel = p->config->channel;
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ mcc->initfq.fqd.dest.wq = 4;
+ }
+ }
+ mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
+ cpu_to_hw_fqd(&mcc->initfq.fqd);
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ FQUNLOCK(fq);
+ return -EIO;
+ }
+ if (opts) {
+ if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
+ if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+ fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+ else
+ fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+ }
+ if (opts->we_mask & QM_INITFQ_WE_CGID)
+ fq->cgr_groupid = opts->fqd.cgid;
+ }
+ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ qman_fq_state_sched : qman_fq_state_parked;
+ FQUNLOCK(fq);
+ return 0;
+}
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ int ret = 0;
+ u8 res;
+
+ if (fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ /* Issue a ALTERFQ_SCHED management command */
+ p = get_affine_portal();
+
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state != qman_fq_state_parked))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_sched;
+out:
+ FQUNLOCK(fq);
+
+ return ret;
+}
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ int rval;
+ u8 res;
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_sched))
+ return -EINVAL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state == qman_fq_state_retired) ||
+ (fq->state == qman_fq_state_oos))) {
+ rval = -EBUSY;
+ goto out;
+ }
+ rval = table_push_fq(p, fq);
+ if (rval)
+ goto out;
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result;
+ /*
+ * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+ * and defer the flags until FQRNI or FQRN (respectively) show up. But
+ * "Friendly" is to process OK immediately, and not set CHANGING. We do
+ * friendly, otherwise the caller doesn't necessarily have a fully
+ * "retired" FQ on return even if the retirement was immediate. However
+ * this does mean some code duplication between here and
+ * fq_state_change().
+ */
+ if (likely(res == QM_MCR_RESULT_OK)) {
+ rval = 0;
+ /* Process 'fq' right away, we'll ignore FQRNI */
+ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ else
+ table_del_fq(p, fq);
+ if (flags)
+ *flags = fq->flags;
+ fq->state = qman_fq_state_retired;
+ if (fq->cb.fqs) {
+ /*
+ * Another issue with supporting "immediate" retirement
+ * is that we're forced to drop FQRNIs, because by the
+ * time they're seen it may already be "too late" (the
+ * fq may have been OOS'd and free()'d already). But if
+ * the upper layer wants a callback whether it's
+ * immediate or not, we have to fake a "MR" entry to
+ * look like an FQRNI...
+ */
+ struct qm_mr_entry msg;
+
+ msg.ern.verb = QM_MR_VERB_FQRNI;
+ msg.fq.fqs = mcr->alterfq.fqs;
+ msg.fq.fqid = fq->fqid;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ msg.fq.contextB = fq->key;
+#else
+ msg.fq.contextB = (u32)(uintptr_t)fq;
+#endif
+ fq->cb.fqs(p, fq, &msg);
+ }
+ } else if (res == QM_MCR_RESULT_PENDING) {
+ rval = 1;
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ } else {
+ rval = -EIO;
+ table_del_fq(p, fq);
+ }
+out:
+ FQUNLOCK(fq);
+ return rval;
+}
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ int ret = 0;
+ u8 res;
+
+ if (fq->state != qman_fq_state_retired)
+ return -EINVAL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
+ (fq->state != qman_fq_state_retired))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_oos;
+out:
+ FQUNLOCK(fq);
+ return ret;
+}
+
+int qman_fq_flow_control(struct qman_fq *fq, int xon)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ int ret = 0;
+ u8 res;
+ u8 myverb;
+
+ if ((fq->state == qman_fq_state_oos) ||
+ (fq->state == qman_fq_state_retired) ||
+ (fq->state == qman_fq_state_parked))
+ return -EINVAL;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
+ p = get_affine_portal();
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state == qman_fq_state_parked) ||
+ (fq->state == qman_fq_state_oos) ||
+ (fq->state == qman_fq_state_retired))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ mcc->alterfq.count = 0;
+ myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
+
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+out:
+ FQUNLOCK(fq);
+ return ret;
+}
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *fqd = mcr->queryfq.fqd;
+ hw_fqd_to_cpu(fqd);
+ if (res != QM_MCR_RESULT_OK)
+ return -EIO;
+ return 0;
+}
+
+int qman_query_fq_has_pkts(struct qman_fq *fq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ int ret = 0;
+ u8 res;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ ret = !!mcr->queryfq_np.frm_cnt;
+ return ret;
+}
+
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK) {
+ *np = mcr->queryfq_np;
+ np->fqd_link = be24_to_cpu(np->fqd_link);
+ np->odp_seq = be16_to_cpu(np->odp_seq);
+ np->orp_nesn = be16_to_cpu(np->orp_nesn);
+ np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq);
+ np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq);
+ np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
+ np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
+ np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
+ np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
+ np->ics_surp = be16_to_cpu(np->ics_surp);
+ np->byte_cnt = be32_to_cpu(np->byte_cnt);
+ np->frm_cnt = be24_to_cpu(np->frm_cnt);
+ np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
+ np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
+ np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
+ np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
+ np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
+ }
+ if (res == QM_MCR_RESULT_ERR_FQID)
+ return -ERANGE;
+ else if (res != QM_MCR_RESULT_OK)
+ return -EIO;
+ return 0;
+}
+
+int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *frm_cnt = be24_to_cpu(mcr->queryfq_np.frm_cnt);
+ else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
+ return -ERANGE;
+ else if (mcr->result != QM_MCR_RESULT_OK)
+ return -EIO;
+ return 0;
+}
+
+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res, myverb;
+
+ myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
+ QM_MCR_VERB_QUERYWQ;
+ mcc = qm_mc_start(&p->p);
+ mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK) {
+ int i, array_len;
+
+ wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
+ array_len = ARRAY_SIZE(mcr->querywq.wq_len);
+ for (i = 0; i < array_len; i++)
+ wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
+ }
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ return 0;
+}
+
+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
+ struct qm_mcr_cgrtestwrite *result)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->cgrtestwrite.cgid = cgr->cgrid;
+ mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
+ mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
+ qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *result = mcr->cgrtestwrite;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ return 0;
+}
+
+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ u8 res;
+ unsigned int i;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->querycgr.cgid = cgr->cgrid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *cgrd = mcr->querycgr;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ cgrd->cgr.wr_parm_g.word =
+ be32_to_cpu(cgrd->cgr.wr_parm_g.word);
+ cgrd->cgr.wr_parm_y.word =
+ be32_to_cpu(cgrd->cgr.wr_parm_y.word);
+ cgrd->cgr.wr_parm_r.word =
+ be32_to_cpu(cgrd->cgr.wr_parm_r.word);
+ cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ);
+ cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
+ for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
+ cgrd->cscn_targ_swp[i] =
+ be32_to_cpu(cgrd->cscn_targ_swp[i]);
+ return 0;
+}
+
+int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
+{
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ u8 res;
+ unsigned int i;
+
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCC_VERB_QUERYCONGESTION);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *congestion = mcr->querycongestion;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++)
+ congestion->state.state[i] =
+ be32_to_cpu(congestion->state.state[i]);
+ return 0;
+}
+
+int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags)
+{
+ struct qman_portal *p = get_affine_portal();
+ uint32_t vdqcr;
+ int ret = -EBUSY;
+
+ vdqcr = vdqcr_flags;
+ vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_retired)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+
+ if (!p->vdqcr_owned) {
+ FQLOCK(fq);
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto escape;
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ FQUNLOCK(fq);
+ p->vdqcr_owned = fq;
+ ret = 0;
+ }
+escape:
+ if (!ret)
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+
+out:
+ return ret;
+}
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
+ u32 vdqcr)
+{
+ struct qman_portal *p;
+ int ret = -EBUSY;
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_retired))
+ return -EINVAL;
+ if (vdqcr & QM_VDQCR_FQID_MASK)
+ return -EINVAL;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ return -EBUSY;
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+
+ p = get_affine_portal();
+
+ if (!p->vdqcr_owned) {
+ FQLOCK(fq);
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto escape;
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ FQUNLOCK(fq);
+ p->vdqcr_owned = fq;
+ ret = 0;
+ }
+escape:
+ if (ret)
+ return ret;
+
+ /* VDQCR is set */
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ return 0;
+}
+
+static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+ if (avail)
+ qm_eqcr_cce_prefetch(&p->p);
+ else
+ qm_eqcr_cce_update(&p->p);
+}
+
+int qman_eqcr_is_empty(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ u8 avail;
+
+ update_eqcr_ci(p, 0);
+ avail = qm_eqcr_get_fill(&p->p);
+ return (avail == 0);
+}
+
+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
+{
+ if (affine) {
+ struct qman_portal *p = get_affine_portal();
+
+ p->cb_dc_ern = handler;
+ } else
+ cb_dc_ern = handler;
+}
+
+static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+ u8 avail;
+
+ if (p->use_eqcr_ci_stashing) {
+ /*
+ * The stashing case is easy, only update if we need to in
+ * order to try and liberate ring entries.
+ */
+ eq = qm_eqcr_start_stash(&p->p);
+ } else {
+ /*
+ * The non-stashing case is harder, need to prefetch ahead of
+ * time.
+ */
+ avail = qm_eqcr_get_avail(&p->p);
+ if (avail < 2)
+ update_eqcr_ci(p, avail);
+ eq = qm_eqcr_start_no_stash(&p->p);
+ }
+
+ if (unlikely(!eq))
+ return NULL;
+
+ if (flags & QMAN_ENQUEUE_FLAG_DCA)
+ eq->dca = QM_EQCR_DCA_ENABLE |
+ ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
+ QM_EQCR_DCA_PARK : 0) |
+ ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
+ eq->fqid = cpu_to_be32(fq->fqid);
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ eq->tag = cpu_to_be32(fq->key);
+#else
+ eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
+#endif
+ eq->fd = *fd;
+ cpu_to_hw_fd(&eq->fd);
+ return eq;
+}
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
+{
+ struct qman_portal *p = get_affine_portal();
+ struct qm_eqcr_entry *eq;
+
+ eq = try_p_eq_start(p, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ /* Factor the below out, it's used from qman_enqueue_orp() too */
+ return 0;
+}
+
+int qman_enqueue_multi(struct qman_fq *fq,
+ const struct qm_fd *fd, u32 *flags,
+ int frames_to_send)
+{
+ struct qman_portal *p = get_affine_portal();
+ struct qm_portal *portal = &p->p;
+
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
+
+ u8 i = 0, diff, old_ci, sent = 0;
+
+ /* Update the available entries if no entry is free */
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return 0;
+ }
+
+ /* try to send as many frames as possible */
+ while (eqcr->available && frames_to_send--) {
+ eq->fqid = fq->fqid_le;
+ eq->fd.opaque_addr = fd->opaque_addr;
+ eq->fd.addr = cpu_to_be40(fd->addr);
+ eq->fd.status = cpu_to_be32(fd->status);
+ eq->fd.opaque = cpu_to_be32(fd->opaque);
+ if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
+ eq->dca = QM_EQCR_DCA_ENABLE |
+ ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
+ }
+ i++;
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ eqcr->available--;
+ sent++;
+ fd++;
+ }
+ lwsync();
+
+ /* In order for flushes to complete faster, all lines are recorded in
+ * 32 bit word.
+ */
+ eq = eqcr->cursor;
+ for (i = 0; i < sent; i++) {
+ eq->__dont_write_directly__verb =
+ QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
+ prev_eq = eq;
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ if (unlikely((prev_eq + 1) != eq))
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+ }
+
+ /* We need to flush all the lines but without load/store operations
+ * between them
+ */
+ eq = eqcr->cursor;
+ for (i = 0; i < sent; i++) {
+ dcbf(eq);
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ }
+ /* Update cursor for the next call */
+ eqcr->cursor = eq;
+ return sent;
+}
+
+int
+qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
+ u32 *flags, int frames_to_send)
+{
+ struct qman_portal *p = get_affine_portal();
+ struct qm_portal *portal = &p->p;
+
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
+
+ u8 i = 0, diff, old_ci, sent = 0;
+
+ /* Update the available entries if no entry is free */
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return 0;
+ }
+
+ /* try to send as many frames as possible */
+ while (eqcr->available && frames_to_send--) {
+ eq->fqid = fq[sent]->fqid_le;
+ eq->fd.opaque_addr = fd->opaque_addr;
+ eq->fd.addr = cpu_to_be40(fd->addr);
+ eq->fd.status = cpu_to_be32(fd->status);
+ eq->fd.opaque = cpu_to_be32(fd->opaque);
+ if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
+ eq->dca = QM_EQCR_DCA_ENABLE |
+ ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
+ }
+ i++;
+
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ eqcr->available--;
+ sent++;
+ fd++;
+ }
+ lwsync();
+
+ /* In order for flushes to complete faster, all lines are recorded in
+ * 32 bit word.
+ */
+ eq = eqcr->cursor;
+ for (i = 0; i < sent; i++) {
+ eq->__dont_write_directly__verb =
+ QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
+ prev_eq = eq;
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ if (unlikely((prev_eq + 1) != eq))
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+ }
+
+ /* We need to flush all the lines but without load/store operations
+ * between them
+ */
+ eq = eqcr->cursor;
+ for (i = 0; i < sent; i++) {
+ dcbf(eq);
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ }
+ /* Update cursor for the next call */
+ eqcr->cursor = eq;
+ return sent;
+}
+
+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
+ struct qman_fq *orp, u16 orp_seqnum)
+{
+ struct qman_portal *p = get_affine_portal();
+ struct qm_eqcr_entry *eq;
+
+ eq = try_p_eq_start(p, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Process ORP-specifics here */
+ if (flags & QMAN_ENQUEUE_FLAG_NLIS)
+ orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
+ else {
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
+ if (flags & QMAN_ENQUEUE_FLAG_NESN)
+ orp_seqnum |= QM_EQCR_SEQNUM_NESN;
+ else
+ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
+ }
+ eq->seqnum = cpu_to_be16(orp_seqnum);
+ eq->orp = cpu_to_be32(orp->fqid);
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
+ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
+ 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+
+ return 0;
+}
+
+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res;
+ u8 verb = QM_MCC_VERB_MODIFYCGR;
+
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initcgr = *opts;
+ mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
+ mcc->initcgr.cgr.wr_parm_g.word =
+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
+ mcc->initcgr.cgr.wr_parm_y.word =
+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
+ mcc->initcgr.cgr.wr_parm_r.word =
+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
+ mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
+ mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
+
+ mcc->initcgr.cgid = cgr->cgrid;
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ verb = QM_MCC_VERB_INITCGR;
+ qm_mc_commit(&p->p, verb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+ res = mcr->result;
+ return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
+}
+
+#define TARG_MASK(n) (0x80000000 >> (n->config->channel - \
+ QM_CHANNEL_SWPORTAL0))
+#define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
+#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret;
+ struct qman_portal *p;
+
+ /* We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= __CGR_NUM)
+ return -EINVAL;
+
+ p = get_affine_portal();
+
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ cgr->chan = p->config->channel;
+ spin_lock(&p->cgr_lock);
+
+ /* if no opts specified, just add it to the list */
+ if (!opts)
+ goto add_list;
+
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ goto release_lock;
+ if (opts)
+ local_opts = *opts;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl =
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
+ else
+ /* Overwrite TARG */
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+ TARG_MASK(p);
+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ /* send init if flags indicate so */
+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
+ else
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ goto release_lock;
+add_list:
+ list_add(&cgr->node, &p->cgr_cbs);
+
+ /* Determine if newly added object requires its callback to be called */
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* we can't go back, so proceed and return success, but screen
+ * and wail to the log file.
+ */
+ pr_crit("CGR HW state partially modified\n");
+ ret = 0;
+ goto release_lock;
+ }
+ if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
+ cgr->cgrid))
+ cgr->cb(p, cgr, 1);
+release_lock:
+ spin_unlock(&p->cgr_lock);
+ return ret;
+}
+
+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mcc_initcgr local_opts;
+ struct qm_mcr_querycgr cgr_state;
+ int ret;
+
+ if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
+ pr_warn("QMan version doesn't support CSCN => DCP portal\n");
+ return -EINVAL;
+ }
+ /* We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= __CGR_NUM)
+ return -EINVAL;
+
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ return ret;
+
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ if (opts)
+ local_opts = *opts;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl =
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
+ QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
+ else
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+ TARG_DCP_MASK(dcp_portal);
+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ /* send init if flags indicate so */
+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+ &local_opts);
+ else
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+
+ return ret;
+}
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret = 0;
+ struct qman_cgr *i;
+ struct qman_portal *p = get_affine_portal();
+
+ if (cgr->chan != p->config->channel) {
+ pr_crit("Attempting to delete cgr from different portal than"
+ " it was create: create 0x%x, delete 0x%x\n",
+ cgr->chan, p->config->channel);
+ ret = -EINVAL;
+ goto put_portal;
+ }
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ spin_lock(&p->cgr_lock);
+ list_del(&cgr->node);
+ /*
+ * If there are no other CGR objects for this CGRID in the list,
+ * update CSCN_TARG accordingly
+ */
+ list_for_each_entry(i, &p->cgr_cbs, node)
+ if ((i->cgrid == cgr->cgrid) && i->cb)
+ goto release_lock;
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+ goto release_lock;
+ }
+ /* Overwrite TARG */
+ local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
+ else
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
+ ~(TARG_MASK(p));
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+ spin_unlock(&p->cgr_lock);
+put_portal:
+ return ret;
+}
+
+int qman_shutdown_fq(u32 fqid)
+{
+ struct qman_portal *p;
+ struct qm_portal *low_p;
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ u8 state;
+ int orl_empty, fq_empty, drain = 0;
+ u32 result;
+ u32 channel, wq;
+ u16 dest_wq;
+
+ p = get_affine_portal();
+ low_p = &p->p;
+
+ /* Determine the state of the FQID */
+ mcc = qm_mc_start(low_p);
+ mcc->queryfq_np.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+ if (state == QM_MCR_NP_STATE_OOS)
+ return 0; /* Already OOS, no need to do anymore checks */
+
+ /* Query which channel the FQ is using */
+ mcc = qm_mc_start(low_p);
+ mcc->queryfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+
+ /* Need to store these since the MCR gets reused */
+ dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
+ channel = dest_wq & 0x7;
+ wq = dest_wq >> 3;
+
+ switch (state) {
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ case QM_MCR_NP_STATE_PARKED:
+ orl_empty = 0;
+ mcc = qm_mc_start(low_p);
+ mcc->alterfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_RETIRE);
+ result = mcr->result; /* Make a copy as we reuse MCR below */
+
+ if (result == QM_MCR_RESULT_PENDING) {
+ /* Need to wait for the FQRN in the message ring, which
+ * will only occur once the FQ has been drained. In
+ * order for the FQ to drain the portal needs to be set
+ * to dequeue from the channel the FQ is scheduled on
+ */
+ const struct qm_mr_entry *msg;
+ const struct qm_dqrr_entry *dqrr = NULL;
+ int found_fqrn = 0;
+ __maybe_unused u16 dequeue_wq = 0;
+
+ /* Flag that we need to drain FQ */
+ drain = 1;
+
+ if (channel >= qm_channel_pool1 &&
+ channel < (u16)(qm_channel_pool1 + 15)) {
+ /* Pool channel, enable the bit in the portal */
+ dequeue_wq = (channel -
+ qm_channel_pool1 + 1) << 4 | wq;
+ } else if (channel < qm_channel_pool1) {
+ /* Dedicated channel */
+ dequeue_wq = wq;
+ } else {
+ pr_info("Cannot recover FQ 0x%x,"
+ " it is scheduled on channel 0x%x",
+ fqid, channel);
+ return -EBUSY;
+ }
+ /* Set the sdqcr to drain this channel */
+ if (channel < qm_channel_pool1)
+ qm_dqrr_sdqcr_set(low_p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_DEDICATED);
+ else
+ qm_dqrr_sdqcr_set(low_p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_POOL_CONV
+ (channel));
+ while (!found_fqrn) {
+ /* Keep draining DQRR while checking the MR*/
+ qm_dqrr_pvb_update(low_p);
+ dqrr = qm_dqrr_current(low_p);
+ while (dqrr) {
+ qm_dqrr_cdc_consume_1ptr(
+ low_p, dqrr, 0);
+ qm_dqrr_pvb_update(low_p);
+ qm_dqrr_next(low_p);
+ dqrr = qm_dqrr_current(low_p);
+ }
+ /* Process message ring too */
+ qm_mr_pvb_update(low_p);
+ msg = qm_mr_current(low_p);
+ while (msg) {
+ if ((msg->ern.verb &
+ QM_MR_VERB_TYPE_MASK)
+ == QM_MR_VERB_FQRN)
+ found_fqrn = 1;
+ qm_mr_next(low_p);
+ qm_mr_cci_consume_to_current(low_p);
+ qm_mr_pvb_update(low_p);
+ msg = qm_mr_current(low_p);
+ }
+ cpu_relax();
+ }
+ }
+ if (result != QM_MCR_RESULT_OK &&
+ result != QM_MCR_RESULT_PENDING) {
+ /* error */
+ pr_err("qman_retire_fq failed on FQ 0x%x,"
+ " result=0x%x\n", fqid, result);
+ return -1;
+ }
+ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+ /* ORL had no entries, no need to wait until the
+ * ERNs come in.
+ */
+ orl_empty = 1;
+ }
+ /* Retirement succeeded, check to see if FQ needs
+ * to be drained.
+ */
+ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+ /* FQ is Not Empty, drain using volatile DQ commands */
+ fq_empty = 0;
+ do {
+ const struct qm_dqrr_entry *dqrr = NULL;
+ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+
+ qm_dqrr_vdqcr_set(low_p, vdqcr);
+
+ /* Wait for a dequeue to occur */
+ while (dqrr == NULL) {
+ qm_dqrr_pvb_update(low_p);
+ dqrr = qm_dqrr_current(low_p);
+ if (!dqrr)
+ cpu_relax();
+ }
+ /* Process the dequeues, making sure to
+ * empty the ring completely.
+ */
+ while (dqrr) {
+ if (dqrr->fqid == fqid &&
+ dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_empty = 1;
+ qm_dqrr_cdc_consume_1ptr(low_p,
+ dqrr, 0);
+ qm_dqrr_pvb_update(low_p);
+ qm_dqrr_next(low_p);
+ dqrr = qm_dqrr_current(low_p);
+ }
+ } while (fq_empty == 0);
+ }
+ qm_dqrr_sdqcr_set(low_p, 0);
+
+ /* Wait for the ORL to have been completely drained */
+ while (orl_empty == 0) {
+ const struct qm_mr_entry *msg;
+
+ qm_mr_pvb_update(low_p);
+ msg = qm_mr_current(low_p);
+ while (msg) {
+ if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) ==
+ QM_MR_VERB_FQRL)
+ orl_empty = 1;
+ qm_mr_next(low_p);
+ qm_mr_cci_consume_to_current(low_p);
+ qm_mr_pvb_update(low_p);
+ msg = qm_mr_current(low_p);
+ }
+ cpu_relax();
+ }
+ mcc = qm_mc_start(low_p);
+ mcc->alterfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err(
+ "OOS after drain Failed on FQID 0x%x, result 0x%x\n",
+ fqid, mcr->result);
+ return -1;
+ }
+ return 0;
+
+ case QM_MCR_NP_STATE_RETIRED:
+ /* Send OOS Command */
+ mcc = qm_mc_start(low_p);
+ mcc->alterfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result) {
+ pr_err("OOS Failed on FQID 0x%x\n", fqid);
+ return -1;
+ }
+ return 0;
+
+ }
+ return -1;
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.h b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.h
new file mode 100644
index 000000000..4346d8653
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman.h
@@ -0,0 +1,913 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ */
+
+#include "qman_priv.h"
+
+/***************************/
+/* Portal register assists */
+/***************************/
+#define QM_REG_EQCR_PI_CINH 0x3000
+#define QM_REG_EQCR_CI_CINH 0x3040
+#define QM_REG_EQCR_ITR 0x3080
+#define QM_REG_DQRR_PI_CINH 0x3100
+#define QM_REG_DQRR_CI_CINH 0x3140
+#define QM_REG_DQRR_ITR 0x3180
+#define QM_REG_DQRR_DCAP 0x31C0
+#define QM_REG_DQRR_SDQCR 0x3200
+#define QM_REG_DQRR_VDQCR 0x3240
+#define QM_REG_DQRR_PDQCR 0x3280
+#define QM_REG_MR_PI_CINH 0x3300
+#define QM_REG_MR_CI_CINH 0x3340
+#define QM_REG_MR_ITR 0x3380
+#define QM_REG_CFG 0x3500
+#define QM_REG_ISR 0x3600
+#define QM_REG_IIR 0x36C0
+#define QM_REG_ITPR 0x3740
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3040
+#define QM_CL_DQRR_PI_CENA 0x3100
+#define QM_CL_DQRR_CI_CENA 0x3140
+#define QM_CL_MR_PI_CENA 0x3300
+#define QM_CL_MR_CI_CENA 0x3340
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent).
+ */
+
+/* Cache-inhibited register access. */
+#define __qm_in(qm, o) be32_to_cpu(__raw_readl((qm)->ci + (o)))
+#define __qm_out(qm, o, val) __raw_writel((cpu_to_be32(val)), \
+ (qm)->ci + (o))
+#define qm_in(reg) __qm_in(&portal->addr, QM_REG_##reg)
+#define qm_out(reg, val) __qm_out(&portal->addr, QM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->ce + (o))
+#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->ce + (o))
+#define __qm_cl_in(qm, o) be32_to_cpu(__raw_readl((qm)->ce + (o)))
+#define __qm_cl_out(qm, o, val) \
+ do { \
+ u32 *__tmpclout = (qm)->ce + (o); \
+ __raw_writel(cpu_to_be32(val), __tmpclout); \
+ dcbf(__tmpclout); \
+ } while (0)
+#define __qm_cl_invalidate(qm, o) dccivac((qm)->ce + (o))
+#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_in(reg) __qm_cl_in(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, QM_CL_##reg##_CENA, val)
+#define qm_cl_invalidate(reg)\
+ __qm_cl_invalidate(&portal->addr, QM_CL_##reg##_CENA)
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues.
+ */
+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * dmode == h/w dequeue mode.
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ * As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
+ qm_eqcr_pci = 0, /* PI index, cache-inhibited */
+ qm_eqcr_pce = 1, /* PI index, cache-enabled */
+ qm_eqcr_pvb = 2 /* valid-bit */
+};
+
+enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
+ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
+ qm_dqrr_dpull = 1 /* PDQCR */
+};
+
+enum qm_dqrr_pmode { /* s/w-only */
+ qm_dqrr_pci, /* reads DQRR_PI_CINH */
+ qm_dqrr_pce, /* reads DQRR_PI_CENA */
+ qm_dqrr_pvb /* reads valid-bit */
+};
+
+enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
+ qm_dqrr_cci = 0, /* CI index, cache-inhibited */
+ qm_dqrr_cce = 1, /* CI index, cache-enabled */
+ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
+};
+
+enum qm_mr_pmode { /* s/w-only */
+ qm_mr_pci, /* reads MR_PI_CINH */
+ qm_mr_pce, /* reads MR_PI_CENA */
+ qm_mr_pvb /* reads valid-bit */
+};
+
+enum qm_mr_cmode { /* matches QCSP_CFG::MM */
+ qm_mr_cci = 0, /* CI index, cache-inhibited */
+ qm_mr_cce = 1 /* CI index, cache-enabled */
+};
+
+/* ------------------------- */
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE 8
+#define QM_DQRR_SIZE 16
+#define QM_MR_SIZE 8
+
+struct qm_eqcr {
+ struct qm_eqcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ u32 busy;
+ enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+ struct qm_dqrr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ enum qm_dqrr_dmode dmode;
+ enum qm_dqrr_pmode pmode;
+ enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+ const struct qm_mr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ enum qm_mr_pmode pmode;
+ enum qm_mr_cmode cmode;
+#endif
+};
+
+struct qm_mc {
+ struct qm_mc_command *cr;
+ struct qm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ enum {
+ /* Can be _mc_start()ed */
+ qman_mc_idle,
+ /* Can be _mc_commit()ed or _mc_abort()ed */
+ qman_mc_user,
+ /* Can only be _mc_retry()ed */
+ qman_mc_hw
+ } state;
+#endif
+};
+
+#define QM_PORTAL_ALIGNMENT ____cacheline_aligned
+
+struct qm_addr {
+ void __iomem *ce; /* cache-enabled */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct qm_portal {
+ struct qm_addr addr;
+ struct qm_eqcr eqcr;
+ struct qm_dqrr dqrr;
+ struct qm_mr mr;
+ struct qm_mc mc;
+} QM_PORTAL_ALIGNMENT;
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define EQCR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
+
+extern dma_addr_t rte_mem_virt2iova(const void *addr);
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void EQCR_INC(struct qm_eqcr *eqcr)
+{
+ /* NB: this is odd-looking, but experiments show that it generates fast
+ * code with essentially no branching overheads. We increment to the
+ * next EQCR pointer and handle overflow and 'vbit'.
+ */
+ struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+ eqcr->cursor = EQCR_CARRYCLEAR(partial);
+ if (partial != eqcr->cursor)
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+ *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(!eqcr->busy);
+#endif
+ if (!eqcr->available)
+ return NULL;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 1;
+#endif
+
+ return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+ *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(!eqcr->busy);
+#endif
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return NULL;
+ }
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 1;
+#endif
+ return eqcr->cursor;
+}
+
+static inline void qm_eqcr_abort(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(eqcr->busy);
+ eqcr->busy = 0;
+#endif
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next(
+ struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(eqcr->busy);
+ DPAA_ASSERT(eqcr->pmode != qm_eqcr_pvb);
+#endif
+ if (eqcr->available == 1)
+ return NULL;
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ dcbf(eqcr->cursor);
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ return eqcr->cursor;
+}
+
+#define EQCR_COMMIT_CHECKS(eqcr) \
+do { \
+ DPAA_ASSERT(eqcr->busy); \
+ DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); \
+ DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); \
+} while (0)
+
+static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pci);
+#endif
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ dcbf(eqcr->cursor);
+ hwsync();
+ qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor));
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+#endif
+ qm_cl_invalidate(EQCR_PI);
+ qm_cl_touch_rw(EQCR_PI);
+}
+
+static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+#endif
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ dcbf(eqcr->cursor);
+ lwsync();
+ qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor));
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eqcursor;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+#endif
+ lwsync();
+ eqcursor = eqcr->cursor;
+ eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ dcbf(eqcursor);
+ EQCR_INC(eqcr);
+ eqcr->available--;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+#endif
+}
+
+static inline u8 qm_eqcr_cci_update(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ qm_cl_touch_ro(EQCR_CI);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(EQCR_CI);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return eqcr->ithresh;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ eqcr->ithresh = ithresh;
+ qm_out(EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+#define DQRR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6)))
+
+static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1);
+}
+
+static inline struct qm_dqrr_entry *DQRR_INC(
+ const struct qm_dqrr_entry *e)
+{
+ return DQRR_CARRYCLEAR(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+ qm_out(CFG, (qm_in(CFG) & 0xff0fffff) |
+ ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+ struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (!dqrr->fill)
+ return NULL;
+ return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_cursor(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ return DQRR_PTR2IDX(dqrr->cursor);
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->fill);
+ dqrr->cursor = DQRR_INC(dqrr->cursor);
+ return --dqrr->fill;
+}
+
+static inline u8 qm_dqrr_pci_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 diff, old_pi = dqrr->pi;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pci);
+#endif
+ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
+ dqrr->fill += diff;
+ return diff;
+}
+
+static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+#endif
+ qm_cl_invalidate(DQRR_PI);
+ qm_cl_touch_ro(DQRR_PI);
+}
+
+static inline u8 qm_dqrr_pce_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 diff, old_pi = dqrr->pi;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+#endif
+ dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1);
+ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
+ dqrr->fill += diff;
+ return diff;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+#endif
+ /* when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads".
+ */
+ if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+ if (!dqrr->pi)
+ dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+ dqrr->fill++;
+ }
+}
+
+static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+#endif
+ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
+ qm_out(DQRR_CI_CINH, dqrr->ci);
+}
+
+static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+#endif
+ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
+ qm_out(DQRR_CI_CINH, dqrr->ci);
+}
+
+static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+#endif
+ qm_cl_invalidate(DQRR_CI);
+ qm_cl_touch_rw(DQRR_CI);
+}
+
+static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+#endif
+ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
+ qm_cl_out(DQRR_CI, dqrr->ci);
+}
+
+static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+#endif
+ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
+ qm_cl_out(DQRR_CI, dqrr->ci);
+}
+
+static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx,
+ int park)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ DPAA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ ((park ? 1 : 0) << 6) | /* PK */
+ idx); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+ const struct qm_dqrr_entry *dq,
+ int park)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 idx = DQRR_PTR2IDX(dq);
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ DPAA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
+ idx); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+ ((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
+ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+}
+
+static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+}
+
+static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ qm_cl_invalidate(DQRR_CI);
+ qm_cl_touch_ro(DQRR_CI);
+}
+
+static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
+ return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1);
+}
+
+static inline u8 qm_dqrr_get_ci(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+#endif
+ return dqrr->ci;
+}
+
+static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+#endif
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ (1 << 6) | /* PK */
+ (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_park_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+#endif
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ (1 << 6) | /* PK */
+ DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+ qm_out(DQRR_SDQCR, sdqcr);
+}
+
+static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal)
+{
+ return qm_in(DQRR_SDQCR);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+ qm_out(DQRR_VDQCR, vdqcr);
+}
+
+static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal)
+{
+ return qm_in(DQRR_VDQCR);
+}
+
+static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ return dqrr->ithresh;
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(DQRR_ITR, ithresh);
+}
+
+static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal)
+{
+ return (qm_in(CFG) & 0x00f00000) >> 20;
+}
+
+/* -------------- */
+/* --- MR API --- */
+
+#define MR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6)))
+
+static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1);
+}
+
+static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e)
+{
+ return MR_CARRYCLEAR(e + 1);
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ if (mr->ci != MR_PTR2IDX(mr->cursor))
+ pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ if (!mr->fill)
+ return NULL;
+ return mr->cursor;
+}
+
+static inline u8 qm_mr_next(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->fill);
+ mr->cursor = MR_INC(mr->cursor);
+ return --mr->fill;
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+#endif
+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+ qm_out(MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+#endif
+ mr->ci = MR_PTR2IDX(mr->cursor);
+ qm_out(MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(MR_ITR, ithresh);
+}
+
+/* ------------------------------ */
+/* --- Management command API --- */
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + QM_CL_CR;
+ mc->rr = portal->addr.ce + QM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
+ QM_MCC_VERB_VBIT) ? 0 : 1;
+ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = qman_mc_idle;
+#endif
+ return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_mc *mc = &portal->mc;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+ if (mc->state != qman_mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+ mc->state = qman_mc_user;
+#endif
+ dcbz_64(mc->cr);
+ return mc->cr;
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_mc *mc = &portal->mc;
+ struct qm_mc_result *rr = mc->rr + mc->rridx;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == qman_mc_user);
+#endif
+ lwsync();
+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+ dcbf(mc->cr);
+ dcbit_ro(rr);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = qman_mc_hw;
+#endif
+}
+
+static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+ struct qm_mc_result *rr = mc->rr + mc->rridx;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == qman_mc_hw);
+#endif
+ /* The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering.
+ */
+ if (!__raw_readb(&rr->verb)) {
+ dcbit_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = qman_mc_idle;
+#endif
+ return rr;
+}
+
+/* Portal interrupt register API */
+static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod)
+{
+ qm_out(ITPR, iperiod);
+}
+
+static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n)
+{
+#if defined(RTE_ARCH_ARM64)
+ return __qm_in(&portal->addr, QM_REG_ISR + (n << 6));
+#else
+ return __qm_in(&portal->addr, QM_REG_ISR + (n << 2));
+#endif
+}
+
+static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n,
+ u32 val)
+{
+#if defined(RTE_ARCH_ARM64)
+ __qm_out(&portal->addr, QM_REG_ISR + (n << 6), val);
+#else
+ __qm_out(&portal->addr, QM_REG_ISR + (n << 2), val);
+#endif
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_driver.c b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_driver.c
new file mode 100644
index 000000000..1166d68e2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_driver.c
@@ -0,0 +1,353 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017,2019 NXP
+ *
+ */
+
+#include <fsl_usd.h>
+#include <process.h>
+#include "qman_priv.h"
+#include <sys/ioctl.h>
+#include <rte_branch_prediction.h>
+
+/* Global variable containing revision id (even on non-control plane systems
+ * where CCSR isn't available).
+ */
+u16 qman_ip_rev;
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
+u16 qm_channel_pme = QMAN_CHANNEL_PME;
+
+/* Ccsr map address to access ccsrbased register */
+static void *qman_ccsr_map;
+/* The qman clock frequency */
+static u32 qman_clk;
+
+static __thread int qmfd = -1;
+static __thread struct qm_portal_config qpcfg;
+static __thread struct dpaa_ioctl_portal_map map = {
+ .type = dpaa_portal_qman
+};
+
+u16 dpaa_get_qm_channel_caam(void)
+{
+ return qm_channel_caam;
+}
+
+u16 dpaa_get_qm_channel_pool(void)
+{
+ return qm_channel_pool1;
+}
+
+static int fsl_qman_portal_init(uint32_t index, int is_shared)
+{
+ struct qman_portal *portal;
+ struct dpaa_ioctl_irq_map irq_map;
+ int ret;
+
+ /* Allocate and map a qman portal */
+ map.index = index;
+ ret = process_portal_map(&map);
+ if (ret) {
+ error(0, ret, "process_portal_map()");
+ return ret;
+ }
+ qpcfg.channel = map.channel;
+ qpcfg.pools = map.pools;
+ qpcfg.index = map.index;
+
+ /* Make the portal's cache-[enabled|inhibited] regions */
+ qpcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
+ qpcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
+
+ qmfd = open(QMAN_PORTAL_IRQ_PATH, O_RDONLY);
+ if (qmfd == -1) {
+ pr_err("QMan irq init failed\n");
+ process_portal_unmap(&map.addr);
+ return -EBUSY;
+ }
+
+ qpcfg.is_shared = is_shared;
+ qpcfg.node = NULL;
+ qpcfg.irq = qmfd;
+
+ portal = qman_create_affine_portal(&qpcfg, NULL);
+ if (!portal) {
+ pr_err("Qman portal initialisation failed (%d)\n",
+ qpcfg.cpu);
+ process_portal_unmap(&map.addr);
+ return -EBUSY;
+ }
+
+ irq_map.type = dpaa_portal_qman;
+ irq_map.portal_cinh = map.addr.cinh;
+ process_portal_irq_map(qmfd, &irq_map);
+ return 0;
+}
+
+static int fsl_qman_portal_finish(void)
+{
+ __maybe_unused const struct qm_portal_config *cfg;
+ int ret;
+
+ process_portal_irq_unmap(qmfd);
+
+ cfg = qman_destroy_affine_portal(NULL);
+ DPAA_BUG_ON(cfg != &qpcfg);
+ ret = process_portal_unmap(&map.addr);
+ if (ret)
+ error(0, ret, "process_portal_unmap()");
+ return ret;
+}
+
+int qman_thread_fd(void)
+{
+ return qmfd;
+}
+
+int qman_thread_init(void)
+{
+ /* Convert from contiguous/virtual cpu numbering to real cpu when
+ * calling into the code that is dependent on the device naming.
+ */
+ return fsl_qman_portal_init(QBMAN_ANY_PORTAL_IDX, 0);
+}
+
+int qman_thread_finish(void)
+{
+ return fsl_qman_portal_finish();
+}
+
+void qman_thread_irq(void)
+{
+ qbman_invoke_irq(qpcfg.irq);
+
+ /* Now we need to uninhibit interrupts. This is the only code outside
+ * the regular portal driver that manipulates any portal register, so
+ * rather than breaking that encapsulation I am simply hard-coding the
+ * offset to the inhibit register here.
+ */
+ out_be32(qpcfg.addr_virt[DPAA_PORTAL_CI] + 0x36C0, 0);
+}
+
+void qman_fq_portal_thread_irq(struct qman_portal *qp)
+{
+ qman_portal_uninhibit_isr(qp);
+}
+
+struct qman_portal *fsl_qman_fq_portal_create(int *fd)
+{
+ struct qman_portal *portal = NULL;
+ struct qm_portal_config *q_pcfg;
+ struct dpaa_ioctl_irq_map irq_map;
+ struct dpaa_ioctl_portal_map q_map = {0};
+ int q_fd = 0, ret;
+
+ q_pcfg = kzalloc((sizeof(struct qm_portal_config)), 0);
+ if (!q_pcfg) {
+ error(0, -1, "q_pcfg kzalloc failed");
+ return NULL;
+ }
+
+ /* Allocate and map a qman portal */
+ q_map.type = dpaa_portal_qman;
+ q_map.index = QBMAN_ANY_PORTAL_IDX;
+ ret = process_portal_map(&q_map);
+ if (ret) {
+ error(0, ret, "process_portal_map()");
+ kfree(q_pcfg);
+ return NULL;
+ }
+ q_pcfg->channel = q_map.channel;
+ q_pcfg->pools = q_map.pools;
+ q_pcfg->index = q_map.index;
+
+ /* Make the portal's cache-[enabled|inhibited] regions */
+ q_pcfg->addr_virt[DPAA_PORTAL_CE] = q_map.addr.cena;
+ q_pcfg->addr_virt[DPAA_PORTAL_CI] = q_map.addr.cinh;
+
+ q_fd = open(QMAN_PORTAL_IRQ_PATH, O_RDONLY);
+ if (q_fd == -1) {
+ pr_err("QMan irq init failed\n");
+ goto err;
+ }
+
+ q_pcfg->irq = q_fd;
+
+ portal = qman_alloc_global_portal(q_pcfg);
+ if (!portal) {
+ pr_err("Qman portal initialisation failed (%d)\n",
+ q_pcfg->cpu);
+ goto err;
+ }
+
+ irq_map.type = dpaa_portal_qman;
+ irq_map.portal_cinh = q_map.addr.cinh;
+ process_portal_irq_map(q_fd, &irq_map);
+
+ *fd = q_fd;
+ return portal;
+err:
+ if (portal)
+ qman_free_global_portal(portal);
+ if (q_fd)
+ close(q_fd);
+ process_portal_unmap(&q_map.addr);
+ kfree(q_pcfg);
+ return NULL;
+}
+
+int fsl_qman_fq_portal_init(struct qman_portal *qp)
+{
+ struct qman_portal *res;
+
+ res = qman_init_portal(qp, NULL, NULL);
+ if (!res) {
+ pr_err("Qman portal initialisation failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int fsl_qman_fq_portal_destroy(struct qman_portal *qp)
+{
+ const struct qm_portal_config *cfg;
+ struct dpaa_portal_map addr;
+ int ret;
+
+ cfg = qman_destroy_affine_portal(qp);
+
+ ret = qman_free_global_portal(qp);
+ if (ret)
+ pr_err("qman_free_global_portal() (%d)\n", ret);
+
+ kfree(qp);
+
+ process_portal_irq_unmap(cfg->irq);
+
+ addr.cena = cfg->addr_virt[DPAA_PORTAL_CE];
+ addr.cinh = cfg->addr_virt[DPAA_PORTAL_CI];
+
+ ret = process_portal_unmap(&addr);
+ if (ret)
+ pr_err("process_portal_unmap() (%d)\n", ret);
+
+ kfree((void *)cfg);
+
+ return ret;
+}
+
+int qman_global_init(void)
+{
+ const struct device_node *dt_node;
+ size_t lenp;
+ const u32 *chanid;
+ static int ccsr_map_fd;
+ const uint32_t *qman_addr;
+ uint64_t phys_addr;
+ uint64_t regs_size;
+ const u32 *clk;
+
+ static int done;
+
+ if (done)
+ return -EBUSY;
+
+ /* Use the device-tree to determine IP revision until something better
+ * is devised.
+ */
+ dt_node = of_find_compatible_node(NULL, NULL, "fsl,qman-portal");
+ if (!dt_node) {
+ pr_err("No qman portals available for any CPU\n");
+ return -ENODEV;
+ }
+ if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-1.0.0"))
+ pr_err("QMan rev1.0 on P4080 rev1 is not supported!\n");
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.1") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-1.1.0"))
+ qman_ip_rev = QMAN_REV11;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.2") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-1.2.0"))
+ qman_ip_rev = QMAN_REV12;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-2.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-2.0.0"))
+ qman_ip_rev = QMAN_REV20;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.0.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.0.1"))
+ qman_ip_rev = QMAN_REV30;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.1") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.2") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.3"))
+ qman_ip_rev = QMAN_REV31;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.2.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.2.1"))
+ qman_ip_rev = QMAN_REV32;
+ else
+ qman_ip_rev = QMAN_REV11;
+
+ if (!qman_ip_rev) {
+ pr_err("Unknown qman portal version\n");
+ return -ENODEV;
+ }
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+ qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
+ qm_channel_pme = QMAN_CHANNEL_PME_REV3;
+ }
+
+ dt_node = of_find_compatible_node(NULL, NULL, "fsl,pool-channel-range");
+ if (!dt_node) {
+ pr_err("No qman pool channel range available\n");
+ return -ENODEV;
+ }
+ chanid = of_get_property(dt_node, "fsl,pool-channel-range", &lenp);
+ if (!chanid) {
+ pr_err("Can not get pool-channel-range property\n");
+ return -EINVAL;
+ }
+
+ /* get ccsr base */
+ dt_node = of_find_compatible_node(NULL, NULL, "fsl,qman");
+ if (!dt_node) {
+ pr_err("No qman device node available\n");
+ return -ENODEV;
+ }
+ qman_addr = of_get_address(dt_node, 0, &regs_size, NULL);
+ if (!qman_addr) {
+ pr_err("of_get_address cannot return qman address\n");
+ return -EINVAL;
+ }
+ phys_addr = of_translate_address(dt_node, qman_addr);
+ if (!phys_addr) {
+ pr_err("of_translate_address failed\n");
+ return -EINVAL;
+ }
+
+ ccsr_map_fd = open("/dev/mem", O_RDWR);
+ if (unlikely(ccsr_map_fd < 0)) {
+ pr_err("Can not open /dev/mem for qman ccsr map\n");
+ return ccsr_map_fd;
+ }
+
+ qman_ccsr_map = mmap(NULL, regs_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, ccsr_map_fd, phys_addr);
+ if (qman_ccsr_map == MAP_FAILED) {
+ pr_err("Can not map qman ccsr base\n");
+ return -EINVAL;
+ }
+
+ clk = of_get_property(dt_node, "clock-frequency", NULL);
+ if (!clk)
+ pr_warn("Can't find Qman clock frequency\n");
+ else
+ qman_clk = be32_to_cpu(*clk);
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ return qman_setup_fq_lookup_table(CONFIG_FSL_QMAN_FQ_LOOKUP_MAX);
+#endif
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_priv.h b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_priv.h
new file mode 100644
index 000000000..8254729e6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/base/qbman/qman_priv.h
@@ -0,0 +1,287 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017,2019 NXP
+ *
+ */
+
+#ifndef __QMAN_PRIV_H
+#define __QMAN_PRIV_H
+
+#include "dpaa_sys.h"
+#include <fsl_qman.h>
+
+/* Congestion Groups */
+/*
+ * This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+struct qman_cgrs {
+ struct __qm_mcr_querycongestion q;
+};
+
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+ memset(c, 0, sizeof(*c));
+}
+
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+ memset(c, 0xff, sizeof(*c));
+}
+
+static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
+{
+ return QM_MCR_QUERYCONGESTION(&c->q, num);
+}
+
+static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
+{
+ c->q.state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
+}
+
+static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
+{
+ c->q.state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
+}
+
+static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
+{
+ while ((++num < (int)__CGR_NUM) && !qman_cgrs_get(c, num))
+ ;
+ return num;
+}
+
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+ const struct qman_cgrs *src)
+{
+ memcpy(dest, src, sizeof(*dest));
+}
+
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+ const struct qman_cgrs *a,
+ const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *(_d++) = *(_a++) & *(_b++);
+}
+
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+ const struct qman_cgrs *a,
+ const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *(_d++) = *(_a++) ^ *(_b++);
+}
+
+/* used by CCSR and portal interrupt code */
+enum qm_isr_reg {
+ qm_isr_status = 0,
+ qm_isr_enable = 1,
+ qm_isr_disable = 2,
+ qm_isr_inhibit = 3
+};
+
+struct qm_portal_config {
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
+ struct device_node *node;
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ /* User-visible portal configuration settings */
+ /* If the caller enables DQRR stashing (and thus wishes to operate the
+ * portal from only one cpu), this is the logical CPU that the portal
+ * will stash to. Whether stashing is enabled or not, this setting is
+ * also used for any "core-affine" portals, ie. default portals
+ * associated to the corresponding cpu. -1 implies that there is no
+ * core affinity configured.
+ */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /* the unique index of this portal */
+ u32 index;
+ /* Is this portal shared? (If so, it has coarser locking and demuxes
+ * processing on behalf of other CPUs.).
+ */
+ int is_shared;
+ /* The portal's dedicated channel id, use this value for initialising
+ * frame queues to target this portal when scheduled.
+ */
+ u16 channel;
+ /* A mask of which pool channels this portal has dequeue access to
+ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask).
+ */
+ u32 pools;
+
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+#define QMAN_REV32 0x0302
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+int qm_set_wpm(int wpm);
+int qm_get_wpm(int *wpm);
+
+struct qman_portal *qman_create_affine_portal(
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs);
+const struct qm_portal_config *
+qman_destroy_affine_portal(struct qman_portal *q);
+
+struct qman_portal *
+qman_init_portal(struct qman_portal *portal,
+ const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs);
+
+struct qman_portal *qman_alloc_global_portal(struct qm_portal_config *q_pcfg);
+int qman_free_global_portal(struct qman_portal *portal);
+
+void qman_portal_uninhibit_isr(struct qman_portal *portal);
+
+struct qm_portal_config *qm_get_unused_portal(void);
+struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);
+
+void qm_put_unused_portal(struct qm_portal_config *pcfg);
+void qm_set_liodns(struct qm_portal_config *pcfg);
+
+/* This CGR feature is supported by h/w and required by unit-tests and the
+ * debugfs hooks, so is implemented in the driver. However it allows an explicit
+ * corruption of h/w fields by s/w that are usually incorruptible (because the
+ * counters are usually maintained entirely within h/w). As such, we declare
+ * this API internally.
+ */
+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
+ struct qm_mcr_cgrtestwrite *result);
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+/* If the fq object pointer is greater than the size of context_b field,
+ * than a lookup table is required.
+ */
+int qman_setup_fq_lookup_table(size_t num_entries);
+#endif
+
+/* QMan s/w corenet portal, low-level i/face */
+
+/*
+ * For Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ * priority.
+ * If SOURCE == SPECIFICWQ,
+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ * same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS 0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
+#define QM_SDQCR_COUNT_EXACT1 0x0
+#define QM_SDQCR_COUNT_UPTO3 0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
+#define QM_SDQCR_TYPE_MASK 0x03000000
+#define QM_SDQCR_TYPE_NULL 0x0
+#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
+#define QM_SDQCR_TYPE_ACTIVE 0x03000000
+#define QM_SDQCR_TOKEN_MASK 0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
+
+#define QM_VDQCR_FQID_MASK 0x00ffffff
+#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
+
+#define QM_EQCR_VERB_VBIT 0x80
+#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
+#define QM_EQCR_VERB_COLOUR_MASK 0x18 /* 4 possible values; */
+#define QM_EQCR_VERB_COLOUR_GREEN 0x00
+#define QM_EQCR_VERB_COLOUR_YELLOW 0x08
+#define QM_EQCR_VERB_COLOUR_RED 0x10
+#define QM_EQCR_VERB_COLOUR_OVERRIDE 0x18
+#define QM_EQCR_VERB_INTERRUPT 0x04 /* on command consumption */
+#define QM_EQCR_VERB_ORP 0x02 /* enable order restoration */
+#define QM_EQCR_DCA_ENABLE 0x80
+#define QM_EQCR_DCA_PARK 0x40
+#define QM_EQCR_DCA_IDXMASK 0x0f /* "DQRR::idx" goes here */
+#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
+#define QM_EQCR_FQID_NULL 0 /* eg. for an ORP seqnum hole */
+
+#define QM_MCC_VERB_VBIT 0x80
+#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED 0x40
+#define QM_MCC_VERB_INITFQ_SCHED 0x41
+#define QM_MCC_VERB_QUERYFQ 0x44
+#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ 0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
+#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
+#define QM_MCC_VERB_INITCGR 0x50
+#define QM_MCC_VERB_MODIFYCGR 0x51
+#define QM_MCC_VERB_CGRTESTWRITE 0x52
+#define QM_MCC_VERB_QUERYCGR 0x58
+#define QM_MCC_VERB_QUERYCONGESTION 0x59
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL 0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
+#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK 0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
+ * the disable register" rather than "disable the ability to write".
+ */
+#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status)
+#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m)
+#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable)
+#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v)
+#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable)
+#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v)
+/* TODO: unfortunate name-clash here, reword? */
+#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1)
+#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0)
+
+#define QMAN_PORTAL_IRQ_PATH "/dev/fsl-usdpaa-irq"
+
+#endif /* _QMAN_PRIV_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/dpaa_bus.c b/src/spdk/dpdk/drivers/bus/dpaa/dpaa_bus.c
new file mode 100644
index 000000000..d53fe6083
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/dpaa_bus.c
@@ -0,0 +1,758 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017-2019 NXP
+ *
+ */
+/* System headers */
+#include <stdio.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_bus.h>
+#include <rte_mbuf_pool_ops.h>
+
+#include <dpaa_of.h>
+#include <rte_dpaa_bus.h>
+#include <rte_dpaa_logs.h>
+#include <dpaax_iova_table.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <netcfg.h>
+
+int dpaa_logtype_bus;
+
+static struct rte_dpaa_bus rte_dpaa_bus;
+struct netcfg_info *dpaa_netcfg;
+
+/* define a variable to hold the portal_key, once created.*/
+static pthread_key_t dpaa_portal_key;
+
+unsigned int dpaa_svr_family;
+
+#define FSL_DPAA_BUS_NAME dpaa_bus
+
+RTE_DEFINE_PER_LCORE(bool, dpaa_io);
+RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
+
+struct fm_eth_port_cfg *
+dpaa_get_eth_port_cfg(int dev_id)
+{
+ return &dpaa_netcfg->port_cfg[dev_id];
+}
+
+static int
+compare_dpaa_devices(struct rte_dpaa_device *dev1,
+ struct rte_dpaa_device *dev2)
+{
+ int comp = 0;
+
+ /* Segragating ETH from SEC devices */
+ if (dev1->device_type > dev2->device_type)
+ comp = 1;
+ else if (dev1->device_type < dev2->device_type)
+ comp = -1;
+ else
+ comp = 0;
+
+ if ((comp != 0) || (dev1->device_type != FSL_DPAA_ETH))
+ return comp;
+
+ if (dev1->id.fman_id > dev2->id.fman_id) {
+ comp = 1;
+ } else if (dev1->id.fman_id < dev2->id.fman_id) {
+ comp = -1;
+ } else {
+ /* FMAN ids match, check for mac_id */
+ if (dev1->id.mac_id > dev2->id.mac_id)
+ comp = 1;
+ else if (dev1->id.mac_id < dev2->id.mac_id)
+ comp = -1;
+ else
+ comp = 0;
+ }
+
+ return comp;
+}
+
+static inline void
+dpaa_add_to_device_list(struct rte_dpaa_device *newdev)
+{
+ int comp, inserted = 0;
+ struct rte_dpaa_device *dev = NULL;
+ struct rte_dpaa_device *tdev = NULL;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
+ comp = compare_dpaa_devices(newdev, dev);
+ if (comp < 0) {
+ TAILQ_INSERT_BEFORE(dev, newdev, next);
+ inserted = 1;
+ break;
+ }
+ }
+
+ if (!inserted)
+ TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, newdev, next);
+}
+
+/*
+ * Reads the SEC device from DTS
+ * Returns -1 if SEC devices not available, 0 otherwise
+ */
+static inline int
+dpaa_sec_available(void)
+{
+ const struct device_node *caam_node;
+
+ for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
+ return 0;
+ }
+
+ return -1;
+}
+
+static void dpaa_clean_device_list(void);
+
+static struct rte_devargs *
+dpaa_devargs_lookup(struct rte_dpaa_device *dev)
+{
+ struct rte_devargs *devargs;
+ char dev_name[32];
+
+ RTE_EAL_DEVARGS_FOREACH("dpaa_bus", devargs) {
+ devargs->bus->parse(devargs->name, &dev_name);
+ if (strcmp(dev_name, dev->device.name) == 0) {
+ DPAA_BUS_INFO("**Devargs matched %s", dev_name);
+ return devargs;
+ }
+ }
+ return NULL;
+}
+
+static int
+dpaa_create_device_list(void)
+{
+ int i;
+ int ret;
+ struct rte_dpaa_device *dev;
+ struct fm_eth_port_cfg *cfg;
+ struct fman_if *fman_intf;
+
+ /* Creating Ethernet Devices */
+ for (i = 0; i < dpaa_netcfg->num_ethports; i++) {
+ dev = calloc(1, sizeof(struct rte_dpaa_device));
+ if (!dev) {
+ DPAA_BUS_LOG(ERR, "Failed to allocate ETH devices");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ dev->device.bus = &rte_dpaa_bus.bus;
+
+ cfg = &dpaa_netcfg->port_cfg[i];
+ fman_intf = cfg->fman_if;
+
+ /* Device identifiers */
+ dev->id.fman_id = fman_intf->fman_idx + 1;
+ dev->id.mac_id = fman_intf->mac_idx;
+ dev->device_type = FSL_DPAA_ETH;
+ dev->id.dev_id = i;
+
+ /* Create device name */
+ memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
+ sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
+ fman_intf->mac_idx);
+ DPAA_BUS_LOG(INFO, "%s netdev added", dev->name);
+ dev->device.name = dev->name;
+ dev->device.devargs = dpaa_devargs_lookup(dev);
+
+ dpaa_add_to_device_list(dev);
+ }
+
+ rte_dpaa_bus.device_count = i;
+
+ /* Unlike case of ETH, RTE_LIBRTE_DPAA_MAX_CRYPTODEV SEC devices are
+ * constantly created only if "sec" property is found in the device
+ * tree. Logically there is no limit for number of devices (QI
+ * interfaces) that can be created.
+ */
+
+ if (dpaa_sec_available()) {
+ DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available");
+ return 0;
+ }
+
+ /* Creating SEC Devices */
+ for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
+ dev = calloc(1, sizeof(struct rte_dpaa_device));
+ if (!dev) {
+ DPAA_BUS_LOG(ERR, "Failed to allocate SEC devices");
+ ret = -1;
+ goto cleanup;
+ }
+
+ dev->device_type = FSL_DPAA_CRYPTO;
+ dev->id.dev_id = rte_dpaa_bus.device_count + i;
+
+ /* Even though RTE_CRYPTODEV_NAME_MAX_LEN is valid length of
+ * crypto PMD, using RTE_ETH_NAME_MAX_LEN as that is the size
+ * allocated for dev->name/
+ */
+ memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
+ sprintf(dev->name, "dpaa_sec-%d", i+1);
+ DPAA_BUS_LOG(INFO, "%s cryptodev added", dev->name);
+ dev->device.name = dev->name;
+ dev->device.devargs = dpaa_devargs_lookup(dev);
+
+ dpaa_add_to_device_list(dev);
+ }
+
+ rte_dpaa_bus.device_count += i;
+
+ return 0;
+
+cleanup:
+ dpaa_clean_device_list();
+ return ret;
+}
+
+static void
+dpaa_clean_device_list(void)
+{
+ struct rte_dpaa_device *dev = NULL;
+ struct rte_dpaa_device *tdev = NULL;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
+ TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ }
+}
+
+int rte_dpaa_portal_init(void *arg)
+{
+ unsigned int cpu, lcore = rte_lcore_id();
+ int ret;
+ struct dpaa_portal *dpaa_io_portal;
+
+ BUS_INIT_FUNC_TRACE();
+
+ if ((size_t)arg == 1 || lcore == LCORE_ID_ANY)
+ lcore = rte_get_master_lcore();
+ else
+ if (lcore >= RTE_MAX_LCORE)
+ return -1;
+
+ cpu = rte_lcore_to_cpu_id(lcore);
+
+ /* Initialise bman thread portals */
+ ret = bman_thread_init();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "bman_thread_init failed on core %u"
+ " (lcore=%u) with ret: %d", cpu, lcore, ret);
+ return ret;
+ }
+
+ DPAA_BUS_LOG(DEBUG, "BMAN thread initialized - CPU=%d lcore=%d",
+ cpu, lcore);
+
+ /* Initialise qman thread portals */
+ ret = qman_thread_init();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "qman_thread_init failed on core %u"
+ " (lcore=%u) with ret: %d", cpu, lcore, ret);
+ bman_thread_finish();
+ return ret;
+ }
+
+ DPAA_BUS_LOG(DEBUG, "QMAN thread initialized - CPU=%d lcore=%d",
+ cpu, lcore);
+
+ dpaa_io_portal = rte_malloc(NULL, sizeof(struct dpaa_portal),
+ RTE_CACHE_LINE_SIZE);
+ if (!dpaa_io_portal) {
+ DPAA_BUS_LOG(ERR, "Unable to allocate memory");
+ bman_thread_finish();
+ qman_thread_finish();
+ return -ENOMEM;
+ }
+
+ dpaa_io_portal->qman_idx = qman_get_portal_index();
+ dpaa_io_portal->bman_idx = bman_get_portal_index();
+ dpaa_io_portal->tid = syscall(SYS_gettid);
+
+ ret = pthread_setspecific(dpaa_portal_key, (void *)dpaa_io_portal);
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "pthread_setspecific failed on core %u"
+ " (lcore=%u) with ret: %d", cpu, lcore, ret);
+ dpaa_portal_finish(NULL);
+
+ return ret;
+ }
+
+ RTE_PER_LCORE(dpaa_io) = true;
+
+ DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
+
+ return 0;
+}
+
+int
+rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
+{
+ /* Affine above created portal with channel*/
+ u32 sdqcr;
+ int ret;
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init(arg);
+ if (ret < 0) {
+ DPAA_BUS_LOG(ERR, "portal initialization failure");
+ return ret;
+ }
+ }
+
+ /* Initialise qman specific portals */
+ ret = fsl_qman_fq_portal_init(fq->qp);
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "Unable to init fq portal");
+ return -1;
+ }
+
+ sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(fq->ch_id);
+ qman_static_dequeue_add(sdqcr, fq->qp);
+
+ return 0;
+}
+
+int rte_dpaa_portal_fq_close(struct qman_fq *fq)
+{
+ return fsl_qman_fq_portal_destroy(fq->qp);
+}
+
+void
+dpaa_portal_finish(void *arg)
+{
+ struct dpaa_portal *dpaa_io_portal = (struct dpaa_portal *)arg;
+
+ if (!dpaa_io_portal) {
+ DPAA_BUS_LOG(DEBUG, "Portal already cleaned");
+ return;
+ }
+
+ bman_thread_finish();
+ qman_thread_finish();
+
+ pthread_setspecific(dpaa_portal_key, NULL);
+
+ rte_free(dpaa_io_portal);
+ dpaa_io_portal = NULL;
+
+ RTE_PER_LCORE(dpaa_io) = false;
+}
+
+static int
+rte_dpaa_bus_parse(const char *name, void *out_name)
+{
+ int i, j;
+ int max_fman = 2, max_macs = 16;
+ char *dup_name;
+ char *sep = NULL;
+
+ /* There are two ways of passing device name, with and without
+ * separator. "dpaa_bus:fm1-mac3" with separator, and "fm1-mac3"
+ * without separator. Both need to be handled.
+ * It is also possible that "name=fm1-mac3" is passed along.
+ */
+ DPAA_BUS_DEBUG("Parse device name (%s)", name);
+
+ /* Check for dpaa_bus:fm1-mac3 style */
+ dup_name = strdup(name);
+ sep = strchr(dup_name, ':');
+ if (!sep)
+ /* If not, check for name=fm1-mac3 style */
+ sep = strchr(dup_name, '=');
+
+ if (sep)
+ /* jump over the seprator */
+ sep = (char *) (sep + 1);
+ else
+ sep = dup_name;
+
+ for (i = 0; i < max_fman; i++) {
+ for (j = 0; j < max_macs; j++) {
+ char fm_name[16];
+ snprintf(fm_name, 16, "fm%d-mac%d", i, j);
+ if (strcmp(fm_name, sep) == 0) {
+ if (out_name)
+ strcpy(out_name, sep);
+ free(dup_name);
+ return 0;
+ }
+ }
+ }
+
+ for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
+ char sec_name[16];
+
+ snprintf(sec_name, 16, "dpaa_sec-%d", i+1);
+ if (strcmp(sec_name, sep) == 0) {
+ if (out_name)
+ strcpy(out_name, sep);
+ free(dup_name);
+ return 0;
+ }
+ }
+
+ free(dup_name);
+ return -EINVAL;
+}
+
+#define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
+#define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa"
+
+static int
+rte_dpaa_bus_scan(void)
+{
+ int ret;
+
+ BUS_INIT_FUNC_TRACE();
+
+ if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
+ (access(DPAA_DEV_PATH2, F_OK) != 0)) {
+ RTE_LOG(DEBUG, EAL, "DPAA Bus not present. Skipping.\n");
+ return 0;
+ }
+
+ if (rte_dpaa_bus.detected)
+ return 0;
+
+ rte_dpaa_bus.detected = 1;
+
+ /* create the key, supplying a function that'll be invoked
+ * when a portal affined thread will be deleted.
+ */
+ ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
+ if (ret) {
+ DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
+ dpaa_clean_device_list();
+ return ret;
+ }
+
+ return 0;
+}
+
+/* register a dpaa bus based dpaa driver */
+void
+rte_dpaa_driver_register(struct rte_dpaa_driver *driver)
+{
+ RTE_VERIFY(driver);
+
+ BUS_INIT_FUNC_TRACE();
+
+ TAILQ_INSERT_TAIL(&rte_dpaa_bus.driver_list, driver, next);
+ /* Update Bus references */
+ driver->dpaa_bus = &rte_dpaa_bus;
+}
+
+/* un-register a dpaa bus based dpaa driver */
+void
+rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver)
+{
+ struct rte_dpaa_bus *dpaa_bus;
+
+ BUS_INIT_FUNC_TRACE();
+
+ dpaa_bus = driver->dpaa_bus;
+
+ TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next);
+ /* Update Bus references */
+ driver->dpaa_bus = NULL;
+}
+
+static int
+rte_dpaa_device_match(struct rte_dpaa_driver *drv,
+ struct rte_dpaa_device *dev)
+{
+ if (!drv || !dev) {
+ DPAA_BUS_DEBUG("Invalid drv or dev received.");
+ return -1;
+ }
+
+ if (drv->drv_type == dev->device_type)
+ return 0;
+
+ return -1;
+}
+
+static int
+rte_dpaa_bus_dev_build(void)
+{
+ int ret;
+
+ /* Load the device-tree driver */
+ ret = of_init();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "of_init failed with ret: %d", ret);
+ return -1;
+ }
+
+ /* Get the interface configurations from device-tree */
+ dpaa_netcfg = netcfg_acquire();
+ if (!dpaa_netcfg) {
+ DPAA_BUS_LOG(ERR, "netcfg_acquire failed");
+ return -EINVAL;
+ }
+
+ RTE_LOG(NOTICE, EAL, "DPAA Bus Detected\n");
+
+ if (!dpaa_netcfg->num_ethports) {
+ DPAA_BUS_LOG(INFO, "no network interfaces available");
+ /* This is not an error */
+ return 0;
+ }
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+ dump_netcfg(dpaa_netcfg);
+#endif
+
+ DPAA_BUS_LOG(DEBUG, "Number of ethernet devices = %d",
+ dpaa_netcfg->num_ethports);
+ ret = dpaa_create_device_list();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "Unable to create device list. (%d)", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int
+rte_dpaa_bus_probe(void)
+{
+ int ret = -1;
+ struct rte_dpaa_device *dev;
+ struct rte_dpaa_driver *drv;
+ FILE *svr_file = NULL;
+ unsigned int svr_ver;
+ int probe_all = rte_dpaa_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST;
+ static int process_once;
+
+ /* If DPAA bus is not present nothing needs to be done */
+ if (!rte_dpaa_bus.detected)
+ return 0;
+
+ /* Device list creation is only done once */
+ if (!process_once) {
+ rte_dpaa_bus_dev_build();
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* One time load of Qman/Bman drivers */
+ ret = qman_global_init();
+ if (ret) {
+ DPAA_BUS_ERR("QMAN initialization failed: %d",
+ ret);
+ return ret;
+ }
+ ret = bman_global_init();
+ if (ret) {
+ DPAA_BUS_ERR("BMAN initialization failed: %d",
+ ret);
+ return ret;
+ }
+ }
+ }
+ process_once = 1;
+
+ /* If no device present on DPAA bus nothing needs to be done */
+ if (TAILQ_EMPTY(&rte_dpaa_bus.device_list))
+ return 0;
+
+ svr_file = fopen(DPAA_SOC_ID_FILE, "r");
+ if (svr_file) {
+ if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
+ dpaa_svr_family = svr_ver & SVR_MASK;
+ fclose(svr_file);
+ }
+
+ /* And initialize the PA->VA translation table */
+ dpaax_iova_table_populate();
+
+ /* For each registered driver, and device, call the driver->probe */
+ TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
+ TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
+ ret = rte_dpaa_device_match(drv, dev);
+ if (ret)
+ continue;
+
+ if (rte_dev_is_probed(&dev->device))
+ continue;
+
+ if (!drv->probe ||
+ (dev->device.devargs &&
+ dev->device.devargs->policy == RTE_DEV_BLACKLISTED))
+ continue;
+
+ if (probe_all ||
+ (dev->device.devargs &&
+ dev->device.devargs->policy ==
+ RTE_DEV_WHITELISTED)) {
+ ret = drv->probe(drv, dev);
+ if (ret) {
+ DPAA_BUS_ERR("unable to probe:%s",
+ dev->name);
+ } else {
+ dev->driver = drv;
+ dev->device.driver = &drv->driver;
+ }
+ }
+ break;
+ }
+ }
+
+ /* Register DPAA mempool ops only if any DPAA device has
+ * been detected.
+ */
+ rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
+
+ return 0;
+}
+
+static struct rte_device *
+rte_dpaa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ struct rte_dpaa_device *dev;
+ const struct rte_dpaa_device *dstart;
+
+ /* find_device is called with 'data' as an opaque object - just call
+ * cmp with this and each device object on bus.
+ */
+
+ if (start != NULL) {
+ dstart = RTE_DEV_TO_DPAA_CONST(start);
+ dev = TAILQ_NEXT(dstart, next);
+ } else {
+ dev = TAILQ_FIRST(&rte_dpaa_bus.device_list);
+ }
+
+ while (dev != NULL) {
+ if (cmp(&dev->device, data) == 0) {
+ DPAA_BUS_DEBUG("Found dev=(%s)\n", dev->device.name);
+ return &dev->device;
+ }
+ dev = TAILQ_NEXT(dev, next);
+ }
+
+ DPAA_BUS_DEBUG("Unable to find any device\n");
+ return NULL;
+}
+
+/*
+ * Get iommu class of DPAA2 devices on the bus.
+ */
+static enum rte_iova_mode
+rte_dpaa_get_iommu_class(void)
+{
+ if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
+ (access(DPAA_DEV_PATH2, F_OK) != 0)) {
+ return RTE_IOVA_DC;
+ }
+ return RTE_IOVA_PA;
+}
+
+static int
+dpaa_bus_plug(struct rte_device *dev __rte_unused)
+{
+ /* No operation is performed while plugging the device */
+ return 0;
+}
+
+static int
+dpaa_bus_unplug(struct rte_device *dev __rte_unused)
+{
+ /* No operation is performed while unplugging the device */
+ return 0;
+}
+
+static void *
+dpaa_bus_dev_iterate(const void *start, const char *str,
+ const struct rte_dev_iterator *it __rte_unused)
+{
+ const struct rte_dpaa_device *dstart;
+ struct rte_dpaa_device *dev;
+ char *dup, *dev_name = NULL;
+
+ /* Expectation is that device would be name=device_name */
+ if (strncmp(str, "name=", 5) != 0) {
+ DPAA_BUS_DEBUG("Invalid device string (%s)\n", str);
+ return NULL;
+ }
+
+ /* Now that name=device_name format is available, split */
+ dup = strdup(str);
+ dev_name = dup + strlen("name=");
+
+ if (start != NULL) {
+ dstart = RTE_DEV_TO_DPAA_CONST(start);
+ dev = TAILQ_NEXT(dstart, next);
+ } else {
+ dev = TAILQ_FIRST(&rte_dpaa_bus.device_list);
+ }
+
+ while (dev != NULL) {
+ if (strcmp(dev->device.name, dev_name) == 0) {
+ free(dup);
+ return &dev->device;
+ }
+ dev = TAILQ_NEXT(dev, next);
+ }
+
+ free(dup);
+ return NULL;
+}
+
+static struct rte_dpaa_bus rte_dpaa_bus = {
+ .bus = {
+ .scan = rte_dpaa_bus_scan,
+ .probe = rte_dpaa_bus_probe,
+ .parse = rte_dpaa_bus_parse,
+ .find_device = rte_dpaa_find_device,
+ .get_iommu_class = rte_dpaa_get_iommu_class,
+ .plug = dpaa_bus_plug,
+ .unplug = dpaa_bus_unplug,
+ .dev_iterate = dpaa_bus_dev_iterate,
+ },
+ .device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list),
+ .driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list),
+ .device_count = 0,
+};
+
+RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);
+
+RTE_INIT(dpaa_init_log)
+{
+ dpaa_logtype_bus = rte_log_register("bus.dpaa");
+ if (dpaa_logtype_bus >= 0)
+ rte_log_set_level(dpaa_logtype_bus, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_bits.h b/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_bits.h
new file mode 100644
index 000000000..9bc14d0c3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_bits.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __DPAA_BITS_H
+#define __DPAA_BITS_H
+
+/* Bitfield stuff. */
+#define BITS_PER_ULONG (sizeof(unsigned long) << 3)
+#define SHIFT_PER_ULONG (((1 << 5) == BITS_PER_ULONG) ? 5 : 6)
+#define BITS_MASK(idx) (1UL << ((idx) & (BITS_PER_ULONG - 1)))
+#define BITS_IDX(idx) ((idx) >> SHIFT_PER_ULONG)
+
+static inline void dpaa_set_bits(unsigned long mask,
+ volatile unsigned long *p)
+{
+ *p |= mask;
+}
+
+static inline void dpaa_set_bit(int idx, volatile unsigned long *bits)
+{
+ dpaa_set_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
+}
+
+static inline void dpaa_clear_bits(unsigned long mask,
+ volatile unsigned long *p)
+{
+ *p &= ~mask;
+}
+
+static inline void dpaa_clear_bit(int idx,
+ volatile unsigned long *bits)
+{
+ dpaa_clear_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
+}
+
+#endif /* __DPAA_BITS_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_rbtree.h b/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_rbtree.h
new file mode 100644
index 000000000..6c237e705
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/dpaa_rbtree.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __DPAA_RBTREE_H
+#define __DPAA_RBTREE_H
+
+#include <rte_common.h>
+/************/
+/* RB-trees */
+/************/
+
+/* Linux has a good RB-tree implementation, that we can't use (GPL). It also has
+ * a flat/hooked-in interface that virtually requires license-contamination in
+ * order to write a caller-compatible implementation. Instead, I've created an
+ * RB-tree encapsulation on top of linux's primitives (it does some of the work
+ * the client logic would normally do), and this gives us something we can
+ * reimplement on LWE. Unfortunately there's no good+free RB-tree
+ * implementations out there that are license-compatible and "flat" (ie. no
+ * dynamic allocation). I did find a malloc-based one that I could convert, but
+ * that will be a task for later on. For now, LWE's RB-tree is implemented using
+ * an ordered linked-list.
+ *
+ * Note, the only linux-esque type is "struct rb_node", because it's used
+ * statically in the exported header, so it can't be opaque. Our version doesn't
+ * include a "rb_parent_color" field because we're doing linked-list instead of
+ * a true rb-tree.
+ */
+
+struct rb_node {
+ struct rb_node *prev, *next;
+};
+
+struct dpa_rbtree {
+ struct rb_node *head, *tail;
+};
+
+#define DPAA_RBTREE { NULL, NULL }
+static inline void dpa_rbtree_init(struct dpa_rbtree *tree)
+{
+ tree->head = tree->tail = NULL;
+}
+
+#define QMAN_NODE2OBJ(ptr, type, node_field) \
+ (type *)((char *)ptr - offsetof(type, node_field))
+
+#define IMPLEMENT_DPAA_RBTREE(name, type, node_field, val_field) \
+static inline int name##_push(struct dpa_rbtree *tree, type *obj) \
+{ \
+ struct rb_node *node = tree->head; \
+ if (!node) { \
+ tree->head = tree->tail = &obj->node_field; \
+ obj->node_field.prev = obj->node_field.next = NULL; \
+ return 0; \
+ } \
+ while (node) { \
+ type *item = QMAN_NODE2OBJ(node, type, node_field); \
+ if (obj->val_field == item->val_field) \
+ return -EBUSY; \
+ if (obj->val_field < item->val_field) { \
+ if (tree->head == node) \
+ tree->head = &obj->node_field; \
+ else \
+ node->prev->next = &obj->node_field; \
+ obj->node_field.prev = node->prev; \
+ obj->node_field.next = node; \
+ node->prev = &obj->node_field; \
+ return 0; \
+ } \
+ node = node->next; \
+ } \
+ obj->node_field.prev = tree->tail; \
+ obj->node_field.next = NULL; \
+ tree->tail->next = &obj->node_field; \
+ tree->tail = &obj->node_field; \
+ return 0; \
+} \
+static inline void name##_del(struct dpa_rbtree *tree, type *obj) \
+{ \
+ if (tree->head == &obj->node_field) { \
+ if (tree->tail == &obj->node_field) \
+ /* Only item in the list */ \
+ tree->head = tree->tail = NULL; \
+ else { \
+ /* Is the head, next != NULL */ \
+ tree->head = tree->head->next; \
+ tree->head->prev = NULL; \
+ } \
+ } else { \
+ if (tree->tail == &obj->node_field) { \
+ /* Is the tail, prev != NULL */ \
+ tree->tail = tree->tail->prev; \
+ tree->tail->next = NULL; \
+ } else { \
+ /* Is neither the head nor the tail */ \
+ obj->node_field.prev->next = obj->node_field.next; \
+ obj->node_field.next->prev = obj->node_field.prev; \
+ } \
+ } \
+} \
+static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \
+{ \
+ struct rb_node *node = tree->head; \
+ while (node) { \
+ type *item = QMAN_NODE2OBJ(node, type, node_field); \
+ if (val == item->val_field) \
+ return item; \
+ if (val < item->val_field) \
+ return NULL; \
+ node = node->next; \
+ } \
+ return NULL; \
+}
+
+#endif /* __DPAA_RBTREE_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fman.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fman.h
new file mode 100644
index 000000000..c02d32d22
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fman.h
@@ -0,0 +1,426 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2012 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ */
+
+#ifndef __FMAN_H
+#define __FMAN_H
+
+#include <stdbool.h>
+#include <net/if.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_ether.h>
+
+#include <compat.h>
+#include <dpaa_list.h>
+
+#ifndef FMAN_DEVICE_PATH
+#define FMAN_DEVICE_PATH "/dev/mem"
+#endif
+
+#define MEMAC_NUM_OF_PADDRS 7 /* Num of additional exact match MAC adr regs */
+
+/* Control and Configuration Register (COMMAND_CONFIG) for MEMAC */
+#define CMD_CFG_LOOPBACK_EN 0x00000400
+/**< 21 XGMII/GMII loopback enable */
+#define CMD_CFG_PROMIS_EN 0x00000010
+/**< 27 Promiscuous operation enable */
+#define CMD_CFG_PAUSE_IGNORE 0x00000100
+/**< 23 Ignore Pause frame quanta */
+
+/* Statistics Configuration Register (STATN_CONFIG) */
+#define STATS_CFG_CLR 0x00000004
+/**< 29 Reset all counters */
+#define STATS_CFG_CLR_ON_RD 0x00000002
+/**< 30 Clear on read */
+#define STATS_CFG_SATURATE 0x00000001
+/**< 31 Saturate at the maximum val */
+
+/**< Max receive frame length mask */
+#define MAXFRM_SIZE_MEMAC 0x00007fe0
+#define MAXFRM_RX_MASK 0x0000ffff
+
+/**< Interface Mode Register Register for MEMAC */
+#define IF_MODE_RLP 0x00000820
+
+/**< Pool Limits */
+#define FMAN_PORT_MAX_EXT_POOLS_NUM 8
+#define FMAN_PORT_OBS_EXT_POOLS_NUM 2
+
+#define FMAN_PORT_CG_MAP_NUM 8
+#define FMAN_PORT_PRS_RESULT_WORDS_NUM 8
+#define FMAN_PORT_BMI_FIFO_UNITS 0x100
+#define FMAN_PORT_IC_OFFSET_UNITS 0x10
+
+#define FMAN_ENABLE_BPOOL_DEPLETION 0xF00000F0
+
+#define HASH_CTRL_MCAST_EN 0x00000100
+#define GROUP_ADDRESS 0x0000010000000000LL
+#define HASH_CTRL_ADDR_MASK 0x0000003F
+
+/* Pre definitions of FMAN interface and Bpool structures */
+struct __fman_if;
+struct fman_if_bpool;
+/* Lists of fman interfaces and bpools */
+TAILQ_HEAD(rte_fman_if_list, __fman_if);
+
+/* Represents the different flavour of network interface */
+enum fman_mac_type {
+ fman_offline = 0,
+ fman_mac_1g,
+ fman_mac_10g,
+};
+
+struct mac_addr {
+ uint32_t mac_addr_l; /**< Lower 32 bits of 48-bit MAC address */
+ uint32_t mac_addr_u; /**< Upper 16 bits of 48-bit MAC address */
+};
+
+struct memac_regs {
+ /* General Control and Status */
+ uint32_t res0000[2];
+ uint32_t command_config; /**< 0x008 Ctrl and cfg */
+ struct mac_addr mac_addr0; /**< 0x00C-0x010 MAC_ADDR_0...1 */
+ uint32_t maxfrm; /**< 0x014 Max frame length */
+ uint32_t res0018[5];
+ uint32_t hashtable_ctrl; /**< 0x02C Hash table control */
+ uint32_t res0030[4];
+ uint32_t ievent; /**< 0x040 Interrupt event */
+ uint32_t tx_ipg_length;
+ /**< 0x044 Transmitter inter-packet-gap */
+ uint32_t res0048;
+ uint32_t imask; /**< 0x04C Interrupt mask */
+ uint32_t res0050;
+ uint32_t pause_quanta[4]; /**< 0x054 Pause quanta */
+ uint32_t pause_thresh[4]; /**< 0x064 Pause quanta threshold */
+ uint32_t rx_pause_status; /**< 0x074 Receive pause status */
+ uint32_t res0078[2];
+ struct mac_addr mac_addr[MEMAC_NUM_OF_PADDRS];
+ /**< 0x80-0x0B4 mac padr */
+ uint32_t lpwake_timer;
+ /**< 0x0B8 Low Power Wakeup Timer */
+ uint32_t sleep_timer;
+ /**< 0x0BC Transmit EEE Low Power Timer */
+ uint32_t res00c0[8];
+ uint32_t statn_config;
+ /**< 0x0E0 Statistics configuration */
+ uint32_t res00e4[7];
+ /* Rx Statistics Counter */
+ uint32_t reoct_l; /**<Rx Eth Octets Counter */
+ uint32_t reoct_u;
+ uint32_t roct_l; /**<Rx Octet Counters */
+ uint32_t roct_u;
+ uint32_t raln_l; /**<Rx Alignment Error Counter */
+ uint32_t raln_u;
+ uint32_t rxpf_l; /**<Rx valid Pause Frame */
+ uint32_t rxpf_u;
+ uint32_t rfrm_l; /**<Rx Frame counter */
+ uint32_t rfrm_u;
+ uint32_t rfcs_l; /**<Rx frame check seq error */
+ uint32_t rfcs_u;
+ uint32_t rvlan_l; /**<Rx Vlan Frame Counter */
+ uint32_t rvlan_u;
+ uint32_t rerr_l; /**<Rx Frame error */
+ uint32_t rerr_u;
+ uint32_t ruca_l; /**<Rx Unicast */
+ uint32_t ruca_u;
+ uint32_t rmca_l; /**<Rx Multicast */
+ uint32_t rmca_u;
+ uint32_t rbca_l; /**<Rx Broadcast */
+ uint32_t rbca_u;
+ uint32_t rdrp_l; /**<Rx Dropper Packet */
+ uint32_t rdrp_u;
+ uint32_t rpkt_l; /**<Rx packet */
+ uint32_t rpkt_u;
+ uint32_t rund_l; /**<Rx undersized packets */
+ uint32_t rund_u;
+ uint32_t r64_l; /**<Rx 64 byte */
+ uint32_t r64_u;
+ uint32_t r127_l;
+ uint32_t r127_u;
+ uint32_t r255_l;
+ uint32_t r255_u;
+ uint32_t r511_l;
+ uint32_t r511_u;
+ uint32_t r1023_l;
+ uint32_t r1023_u;
+ uint32_t r1518_l;
+ uint32_t r1518_u;
+ uint32_t r1519x_l;
+ uint32_t r1519x_u;
+ uint32_t rovr_l; /**<Rx oversized but good */
+ uint32_t rovr_u;
+ uint32_t rjbr_l; /**<Rx oversized with bad csum */
+ uint32_t rjbr_u;
+ uint32_t rfrg_l; /**<Rx fragment Packet */
+ uint32_t rfrg_u;
+ uint32_t rcnp_l; /**<Rx control packets (0x8808 */
+ uint32_t rcnp_u;
+ uint32_t rdrntp_l; /**<Rx dropped due to FIFO overflow */
+ uint32_t rdrntp_u;
+ uint32_t res01d0[12];
+ /* Tx Statistics Counter */
+ uint32_t teoct_l; /**<Tx eth octets */
+ uint32_t teoct_u;
+ uint32_t toct_l; /**<Tx Octets */
+ uint32_t toct_u;
+ uint32_t res0210[2];
+ uint32_t txpf_l; /**<Tx valid pause frame */
+ uint32_t txpf_u;
+ uint32_t tfrm_l; /**<Tx frame counter */
+ uint32_t tfrm_u;
+ uint32_t tfcs_l; /**<Tx FCS error */
+ uint32_t tfcs_u;
+ uint32_t tvlan_l; /**<Tx Vlan Frame */
+ uint32_t tvlan_u;
+ uint32_t terr_l; /**<Tx frame error */
+ uint32_t terr_u;
+ uint32_t tuca_l; /**<Tx Unicast */
+ uint32_t tuca_u;
+ uint32_t tmca_l; /**<Tx Multicast */
+ uint32_t tmca_u;
+ uint32_t tbca_l; /**<Tx Broadcast */
+ uint32_t tbca_u;
+ uint32_t res0258[2];
+ uint32_t tpkt_l; /**<Tx Packet */
+ uint32_t tpkt_u;
+ uint32_t tund_l; /**<Tx Undersized */
+ uint32_t tund_u;
+ uint32_t t64_l;
+ uint32_t t64_u;
+ uint32_t t127_l;
+ uint32_t t127_u;
+ uint32_t t255_l;
+ uint32_t t255_u;
+ uint32_t t511_l;
+ uint32_t t511_u;
+ uint32_t t1023_l;
+ uint32_t t1023_u;
+ uint32_t t1518_l;
+ uint32_t t1518_u;
+ uint32_t t1519x_l;
+ uint32_t t1519x_u;
+ uint32_t res02a8[6];
+ uint32_t tcnp_l; /**<Tx Control Packet type - 0x8808 */
+ uint32_t tcnp_u;
+ uint32_t res02c8[14];
+ /* Line Interface Control */
+ uint32_t if_mode; /**< 0x300 Interface Mode Control */
+ uint32_t if_status; /**< 0x304 Interface Status */
+ uint32_t res0308[14];
+ /* HiGig/2 */
+ uint32_t hg_config; /**< 0x340 Control and cfg */
+ uint32_t res0344[3];
+ uint32_t hg_pause_quanta; /**< 0x350 Pause quanta */
+ uint32_t res0354[3];
+ uint32_t hg_pause_thresh; /**< 0x360 Pause quanta threshold */
+ uint32_t res0364[3];
+ uint32_t hgrx_pause_status; /**< 0x370 Receive pause status */
+ uint32_t hg_fifos_status; /**< 0x374 fifos status */
+ uint32_t rhm; /**< 0x378 rx messages counter */
+ uint32_t thm; /**< 0x37C tx messages counter */
+};
+
+struct rx_bmi_regs {
+ uint32_t fmbm_rcfg; /**< Rx Configuration */
+ uint32_t fmbm_rst; /**< Rx Status */
+ uint32_t fmbm_rda; /**< Rx DMA attributes*/
+ uint32_t fmbm_rfp; /**< Rx FIFO Parameters*/
+ uint32_t fmbm_rfed; /**< Rx Frame End Data*/
+ uint32_t fmbm_ricp; /**< Rx Internal Context Parameters*/
+ uint32_t fmbm_rim; /**< Rx Internal Buffer Margins*/
+ uint32_t fmbm_rebm; /**< Rx External Buffer Margins*/
+ uint32_t fmbm_rfne; /**< Rx Frame Next Engine*/
+ uint32_t fmbm_rfca; /**< Rx Frame Command Attributes.*/
+ uint32_t fmbm_rfpne; /**< Rx Frame Parser Next Engine*/
+ uint32_t fmbm_rpso; /**< Rx Parse Start Offset*/
+ uint32_t fmbm_rpp; /**< Rx Policer Profile */
+ uint32_t fmbm_rccb; /**< Rx Coarse Classification Base */
+ uint32_t fmbm_reth; /**< Rx Excessive Threshold */
+ uint32_t reserved003c[1]; /**< (0x03C 0x03F) */
+ uint32_t fmbm_rprai[FMAN_PORT_PRS_RESULT_WORDS_NUM];
+ /**< Rx Parse Results Array Init*/
+ uint32_t fmbm_rfqid; /**< Rx Frame Queue ID*/
+ uint32_t fmbm_refqid; /**< Rx Error Frame Queue ID*/
+ uint32_t fmbm_rfsdm; /**< Rx Frame Status Discard Mask*/
+ uint32_t fmbm_rfsem; /**< Rx Frame Status Error Mask*/
+ uint32_t fmbm_rfene; /**< Rx Frame Enqueue Next Engine */
+ uint32_t reserved0074[0x2]; /**< (0x074-0x07C) */
+ uint32_t fmbm_rcmne;
+ /**< Rx Frame Continuous Mode Next Engine */
+ uint32_t reserved0080[0x20];/**< (0x080 0x0FF) */
+ uint32_t fmbm_ebmpi[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ /**< Buffer Manager pool Information-*/
+ uint32_t fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ /**< Allocate Counter-*/
+ uint32_t reserved0130[8];
+ /**< 0x130/0x140 - 0x15F reserved -*/
+ uint32_t fmbm_rcgm[FMAN_PORT_CG_MAP_NUM];
+ /**< Congestion Group Map*/
+ uint32_t fmbm_mpd; /**< BM Pool Depletion */
+ uint32_t reserved0184[0x1F]; /**< (0x184 0x1FF) */
+ uint32_t fmbm_rstc; /**< Rx Statistics Counters*/
+ uint32_t fmbm_rfrc; /**< Rx Frame Counter*/
+ uint32_t fmbm_rfbc; /**< Rx Bad Frames Counter*/
+ uint32_t fmbm_rlfc; /**< Rx Large Frames Counter*/
+ uint32_t fmbm_rffc; /**< Rx Filter Frames Counter*/
+ uint32_t fmbm_rfdc; /**< Rx Frame Discard Counter*/
+ uint32_t fmbm_rfldec; /**< Rx Frames List DMA Error Counter*/
+ uint32_t fmbm_rodc; /**< Rx Out of Buffers Discard nntr*/
+ uint32_t fmbm_rbdc; /**< Rx Buffers Deallocate Counter*/
+ uint32_t reserved0224[0x17]; /**< (0x224 0x27F) */
+ uint32_t fmbm_rpc; /**< Rx Performance Counters*/
+ uint32_t fmbm_rpcp; /**< Rx Performance Count Parameters*/
+ uint32_t fmbm_rccn; /**< Rx Cycle Counter*/
+ uint32_t fmbm_rtuc; /**< Rx Tasks Utilization Counter*/
+ uint32_t fmbm_rrquc;
+ /**< Rx Receive Queue Utilization cntr*/
+ uint32_t fmbm_rduc; /**< Rx DMA Utilization Counter*/
+ uint32_t fmbm_rfuc; /**< Rx FIFO Utilization Counter*/
+ uint32_t fmbm_rpac; /**< Rx Pause Activation Counter*/
+ uint32_t reserved02a0[0x18]; /**< (0x2A0 0x2FF) */
+ uint32_t fmbm_rdbg; /**< Rx Debug-*/
+};
+
+struct fman_port_qmi_regs {
+ uint32_t fmqm_pnc; /**< PortID n Configuration Register */
+ uint32_t fmqm_pns; /**< PortID n Status Register */
+ uint32_t fmqm_pnts; /**< PortID n Task Status Register */
+ uint32_t reserved00c[4]; /**< 0xn00C - 0xn01B */
+ uint32_t fmqm_pnen; /**< PortID n Enqueue NIA Register */
+ uint32_t fmqm_pnetfc; /**< PortID n Enq Total Frame Counter */
+ uint32_t reserved024[2]; /**< 0xn024 - 0x02B */
+ uint32_t fmqm_pndn; /**< PortID n Dequeue NIA Register */
+ uint32_t fmqm_pndc; /**< PortID n Dequeue Config Register */
+ uint32_t fmqm_pndtfc; /**< PortID n Dequeue tot Frame cntr */
+ uint32_t fmqm_pndfdc; /**< PortID n Dequeue FQID Dflt Cntr */
+ uint32_t fmqm_pndcc; /**< PortID n Dequeue Confirm Counter */
+};
+
+/* This struct exports parameters about an Fman network interface, determined
+ * from the device-tree.
+ */
+struct fman_if {
+ /* Which Fman this interface belongs to */
+ uint8_t fman_idx;
+ /* The type/speed of the interface */
+ enum fman_mac_type mac_type;
+ /* Boolean, set when mac type is memac */
+ uint8_t is_memac;
+ /* Boolean, set when PHY is RGMII */
+ uint8_t is_rgmii;
+ /* The index of this MAC (within the Fman it belongs to) */
+ uint8_t mac_idx;
+ /* The MAC address */
+ struct rte_ether_addr mac_addr;
+ /* The Qman channel to schedule Tx FQs to */
+ u16 tx_channel_id;
+ /* The hard-coded FQIDs for this interface. Note: this doesn't cover
+ * the PCD nor the "Rx default" FQIDs, which are configured via FMC
+ * and its XML-based configuration.
+ */
+ uint32_t fqid_rx_def;
+ uint32_t fqid_rx_err;
+ uint32_t fqid_tx_err;
+ uint32_t fqid_tx_confirm;
+
+ struct list_head bpool_list;
+ /* The node for linking this interface into "fman_if_list" */
+ struct list_head node;
+};
+
+/* This struct exposes parameters for buffer pools, extracted from the network
+ * interface settings in the device tree.
+ */
+struct fman_if_bpool {
+ uint32_t bpid;
+ uint64_t count;
+ uint64_t size;
+ uint64_t addr;
+ /* The node for linking this bpool into fman_if::bpool_list */
+ struct list_head node;
+};
+
+/* Internal Context transfer params - FMBM_RICP*/
+struct fman_if_ic_params {
+ /*IC offset in the packet buffer */
+ uint16_t iceof;
+ /*IC internal offset */
+ uint16_t iciof;
+ /*IC size to copy */
+ uint16_t icsz;
+};
+
+/* The exported "struct fman_if" type contains the subset of fields we want
+ * exposed. This struct is embedded in a larger "struct __fman_if" which
+ * contains the extra bits we *don't* want exposed.
+ */
+struct __fman_if {
+ struct fman_if __if;
+ char node_path[PATH_MAX];
+ uint64_t regs_size;
+ void *ccsr_map;
+ void *bmi_map;
+ void *qmi_map;
+ struct list_head node;
+};
+
+/* And this is the base list node that the interfaces are added to. (See
+ * fman_if_enable_all_rx() below for an example of its use.)
+ */
+extern const struct list_head *fman_if_list;
+
+extern int fman_ccsr_map_fd;
+
+/* To iterate the "bpool_list" for an interface. Eg;
+ * struct fman_if *p = get_ptr_to_some_interface();
+ * struct fman_if_bpool *bp;
+ * printf("Interface uses following BPIDs;\n");
+ * fman_if_for_each_bpool(bp, p) {
+ * printf(" %d\n", bp->bpid);
+ * [...]
+ * }
+ */
+#define fman_if_for_each_bpool(bp, __if) \
+ list_for_each_entry(bp, &(__if)->bpool_list, node)
+
+#define FMAN_ERR(rc, fmt, args...) \
+ do { \
+ _errno = (rc); \
+ DPAA_BUS_LOG(ERR, fmt "(%d)", ##args, errno); \
+ } while (0)
+
+#define FMAN_IP_REV_1 0xC30C4
+#define FMAN_IP_REV_1_MAJOR_MASK 0x0000FF00
+#define FMAN_IP_REV_1_MAJOR_SHIFT 8
+#define FMAN_V3 0x06
+#define FMAN_V3_CONTEXTA_EN_A2V 0x10000000
+#define FMAN_V3_CONTEXTA_EN_OVOM 0x02000000
+#define FMAN_V3_CONTEXTA_EN_EBD 0x80000000
+#define FMAN_CONTEXTA_DIS_CHECKSUM 0x7ull
+#define FMAN_CONTEXTA_SET_OPCODE11 0x2000000b00000000
+extern u16 fman_ip_rev;
+extern u32 fman_dealloc_bufs_mask_hi;
+extern u32 fman_dealloc_bufs_mask_lo;
+
+/**
+ * Initialize the FMAN driver
+ *
+ * @args void
+ * @return
+ * 0 for success; error OTHERWISE
+ */
+int fman_init(void);
+
+/**
+ * Teardown the FMAN driver
+ *
+ * @args void
+ * @return void
+ */
+void fman_finish(void);
+
+#endif /* __FMAN_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_bman.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_bman.h
new file mode 100644
index 000000000..82da2fcfe
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_bman.h
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __FSL_BMAN_H
+#define __FSL_BMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* This wrapper represents a bit-array for the depletion state of the 64 Bman
+ * buffer pools.
+ */
+struct bman_depletion {
+ u32 state[2];
+};
+
+static inline void bman_depletion_init(struct bman_depletion *c)
+{
+ c->state[0] = c->state[1] = 0;
+}
+
+static inline void bman_depletion_fill(struct bman_depletion *c)
+{
+ c->state[0] = c->state[1] = ~0;
+}
+
+/* --- Bman data structures (and associated constants) --- */
+
+/* Represents s/w corenet portal mapped data structures */
+struct bm_rcr_entry; /* RCR (Release Command Ring) entries */
+struct bm_mc_command; /* MC (Management Command) command */
+struct bm_mc_result; /* MC result */
+
+/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer
+ * pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI,
+ * BM_MCC_VERB_ACQUIRE), the 'bpid' field is used.
+ */
+struct bm_buffer {
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved1;
+ u8 bpid;
+ u16 hi; /* High 16-bits of 48-bit address */
+ u32 lo; /* Low 32-bits of 48-bit address */
+#else
+ u32 lo;
+ u16 hi;
+ u8 bpid;
+ u8 __reserved;
+#endif
+ };
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u64 __notaddress:16;
+ u64 addr:48;
+#else
+ u64 addr:48;
+ u64 __notaddress:16;
+#endif
+ };
+ u64 opaque;
+ };
+} __rte_aligned(8);
+static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
+{
+ return buf->addr;
+}
+
+static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
+{
+ return (dma_addr_t)buf->addr;
+}
+
+#define bm_buffer_set64(buf, v) \
+ do { \
+ struct bm_buffer *__buf931 = (buf); \
+ __buf931->hi = upper_32_bits(v); \
+ __buf931->lo = lower_32_bits(v); \
+ } while (0)
+
+/* See 1.5.3.5.4: "Release Command" */
+struct bm_rcr_entry {
+ union {
+ struct {
+ u8 __dont_write_directly__verb;
+ u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+ u8 __reserved1[62];
+ };
+ struct bm_buffer bufs[8];
+ };
+} __packed;
+#define BM_RCR_VERB_VBIT 0x80
+#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
+
+/* See 1.5.3.1: "Acquire Command" */
+/* See 1.5.3.2: "Query Command" */
+struct bm_mcc_acquire {
+ u8 bpid;
+ u8 __reserved1[62];
+} __packed;
+struct bm_mcc_query {
+ u8 __reserved2[63];
+} __packed;
+struct bm_mc_command {
+ u8 __dont_write_directly__verb;
+ union {
+ struct bm_mcc_acquire acquire;
+ struct bm_mcc_query query;
+ };
+} __packed;
+#define BM_MCC_VERB_VBIT 0x80
+#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE 0x10
+#define BM_MCC_VERB_CMD_QUERY 0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
+
+/* See 1.5.3.3: "Acquire Response" */
+/* See 1.5.3.4: "Query Response" */
+struct bm_pool_state {
+ u8 __reserved1[32];
+ /* "availability state" and "depletion state" */
+ struct {
+ u8 __reserved1[8];
+ /* Access using bman_depletion_***() */
+ struct bman_depletion state;
+ } as, ds;
+};
+
+struct bm_mc_result {
+ union {
+ struct {
+ u8 verb;
+ u8 __reserved1[63];
+ };
+ union {
+ struct {
+ u8 __reserved1;
+ u8 bpid;
+ u8 __reserved2[62];
+ };
+ struct bm_buffer bufs[8];
+ } acquire;
+ struct bm_pool_state query;
+ };
+} __packed;
+#define BM_MCR_VERB_VBIT 0x80
+#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
+#define BM_MCR_VERB_CMD_ERR_ECC 0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+
+/* Portal and Buffer Pools */
+/* Represents a managed portal */
+struct bman_portal;
+
+/* This object type represents Bman buffer pools. */
+struct bman_pool;
+
+/* This struct specifies parameters for a bman_pool object. */
+struct bman_pool_params {
+ /* index of the buffer pool to encapsulate (0-63), ignored if
+ * BMAN_POOL_FLAG_DYNAMIC_BPID is set.
+ */
+ u32 bpid;
+ /* bit-mask of BMAN_POOL_FLAG_*** options */
+ u32 flags;
+ /* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB:
+ * this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and*
+ * when run in the control plane (which controls Bman CCSR). This array
+ * matches the definition of bm_pool_set().
+ */
+ u32 thresholds[4];
+};
+
+/* Flags to bman_new_pool() */
+#define BMAN_POOL_FLAG_NO_RELEASE 0x00000001 /* can't release to pool */
+#define BMAN_POOL_FLAG_ONLY_RELEASE 0x00000002 /* can only release to pool */
+#define BMAN_POOL_FLAG_DYNAMIC_BPID 0x00000008 /* (de)allocate bpid */
+#define BMAN_POOL_FLAG_THRESH 0x00000010 /* set depletion thresholds */
+
+/* Flags to bman_release() */
+#define BMAN_RELEASE_FLAG_NOW 0x00000008 /* issue immediate release */
+
+
+/**
+ * bman_get_portal_index - get portal configuration index
+ */
+int bman_get_portal_index(void);
+
+/**
+ * bman_rcr_is_empty - Determine if portal's RCR is empty
+ *
+ * For use in situations where a cpu-affine caller needs to determine when all
+ * releases for the local portal have been processed by Bman but can't use the
+ * BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release().
+ * The function forces tracking of RCR consumption (which normally doesn't
+ * happen until release processing needs to find space to put new release
+ * commands), and returns zero if the ring still has unprocessed entries,
+ * non-zero if it is empty.
+ */
+int bman_rcr_is_empty(void);
+
+/**
+ * bman_alloc_bpid_range - Allocate a contiguous range of BPIDs
+ * @result: is set by the API to the base BPID of the allocated range
+ * @count: the number of BPIDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count BPIDs
+ *
+ * Returns the number of buffer pools allocated, or a negative error code. If
+ * @partial is non zero, the allocation request may return a smaller range of
+ * BPs than requested (though alignment will be as requested). If @partial is
+ * zero, the return value will either be 'count' or negative.
+ */
+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int bman_alloc_bpid(u32 *result)
+{
+ int ret = bman_alloc_bpid_range(result, 1, 0, 0);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * bman_release_bpid_range - Release the specified range of buffer pool IDs
+ * @bpid: the base BPID of the range to deallocate
+ * @count: the number of BPIDs in the range
+ *
+ * This function can also be used to seed the allocator with ranges of BPIDs
+ * that it can subsequently allocate from.
+ */
+void bman_release_bpid_range(u32 bpid, unsigned int count);
+static inline void bman_release_bpid(u32 bpid)
+{
+ bman_release_bpid_range(bpid, 1);
+}
+
+int bman_reserve_bpid_range(u32 bpid, unsigned int count);
+static inline int bman_reserve_bpid(u32 bpid)
+{
+ return bman_reserve_bpid_range(bpid, 1);
+}
+
+void bman_seed_bpid_range(u32 bpid, unsigned int count);
+
+int bman_shutdown_pool(u32 bpid);
+
+/**
+ * bman_new_pool - Allocates a Buffer Pool object
+ * @params: parameters specifying the buffer pool ID and behaviour
+ *
+ * Creates a pool object for the given @params. A portal and the depletion
+ * callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag
+ * is set. NB, the fields from @params are copied into the new pool object, so
+ * the structure provided by the caller can be released or reused after the
+ * function returns.
+ */
+__rte_internal
+struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
+
+/**
+ * bman_free_pool - Deallocates a Buffer Pool object
+ * @pool: the pool object to release
+ */
+__rte_internal
+void bman_free_pool(struct bman_pool *pool);
+
+/**
+ * bman_get_params - Returns a pool object's parameters.
+ * @pool: the pool object
+ *
+ * The returned pointer refers to state within the pool object so must not be
+ * modified and can no longer be read once the pool object is destroyed.
+ */
+__rte_internal
+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
+
+/**
+ * bman_release - Release buffer(s) to the buffer pool
+ * @pool: the buffer pool object to release to
+ * @bufs: an array of buffers to release
+ * @num: the number of buffers in @bufs (1-8)
+ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
+ *
+ */
+__rte_internal
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
+ u32 flags);
+
+/**
+ * bman_acquire - Acquire buffer(s) from a buffer pool
+ * @pool: the buffer pool object to acquire from
+ * @bufs: array for storing the acquired buffers
+ * @num: the number of buffers desired (@bufs is at least this big)
+ *
+ * Issues an "Acquire" command via the portal's management command interface.
+ * The return value will be the number of buffers obtained from the pool, or a
+ * negative error code if a h/w error or pool starvation was encountered.
+ */
+__rte_internal
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
+ u32 flags);
+
+/**
+ * bman_query_pools - Query all buffer pool states
+ * @state: storage for the queried availability and depletion states
+ */
+int bman_query_pools(struct bm_pool_state *state);
+
+/**
+ * bman_query_free_buffers - Query how many free buffers are in buffer pool
+ * @pool: the buffer pool object to query
+ *
+ * Return the number of the free buffers
+ */
+__rte_internal
+u32 bman_query_free_buffers(struct bman_pool *pool);
+
+/**
+ * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
+ * @pool: the buffer pool object to which the thresholds will be set
+ * @thresholds: the new thresholds
+ */
+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
+
+/**
+ * bm_pool_set_hw_threshold - Change the buffer pool's thresholds
+ * @pool: Pool id
+ * @low_thresh: low threshold
+ * @high_thresh: high threshold
+ */
+int bm_pool_set_hw_threshold(u32 bpid, const u32 low_thresh,
+ const u32 high_thresh);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FSL_BMAN_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman.h
new file mode 100644
index 000000000..6c87c8db0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef __FSL_FMAN_H
+#define __FSL_FMAN_H
+
+#include <rte_compat.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Status field in FD is updated on Rx side by FMAN with following information.
+ * Refer to field description in FM BG.
+ */
+struct fm_status_t {
+ unsigned int reserved0:3;
+ unsigned int dcl4c:1; /* Don't Check L4 Checksum */
+ unsigned int reserved1:1;
+ unsigned int ufd:1; /* Unsupported Format */
+ unsigned int lge:1; /* Length Error */
+ unsigned int dme:1; /* DMA Error */
+
+ unsigned int reserved2:4;
+ unsigned int fpe:1; /* Frame physical Error */
+ unsigned int fse:1; /* Frame Size Error */
+ unsigned int dis:1; /* Discard by Classification */
+ unsigned int reserved3:1;
+
+ unsigned int eof:1; /* Key Extraction goes out of frame */
+ unsigned int nss:1; /* No Scheme selected */
+ unsigned int kso:1; /* Key Size Overflow */
+ unsigned int reserved4:1;
+ unsigned int fcl:2; /* Frame Color */
+ unsigned int ipp:1; /* Illegal Policer Profile Selected */
+ unsigned int flm:1; /* Frame Length Mismatch */
+ unsigned int pte:1; /* Parser Timeout */
+ unsigned int isp:1; /* Invalid Soft Parser Instruction */
+ unsigned int phe:1; /* Header Error during parsing */
+ unsigned int frdr:1; /* Frame Dropped by disabled port */
+ unsigned int reserved5:4;
+} __rte_packed;
+
+/* Set MAC address for a particular interface */
+__rte_internal
+int fman_if_add_mac_addr(struct fman_if *p, uint8_t *eth, uint8_t addr_num);
+
+/* Remove a MAC address for a particular interface */
+__rte_internal
+void fman_if_clear_mac_addr(struct fman_if *p, uint8_t addr_num);
+
+/* Get the FMAN statistics */
+__rte_internal
+void fman_if_stats_get(struct fman_if *p, struct rte_eth_stats *stats);
+
+/* Reset the FMAN statistics */
+__rte_internal
+void fman_if_stats_reset(struct fman_if *p);
+
+/* Get all of the FMAN statistics */
+__rte_internal
+void fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);
+
+/* Set ignore pause option for a specific interface */
+void fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable);
+
+/* Set max frame length */
+void fman_if_conf_max_frame_len(struct fman_if *p, unsigned int max_frame_len);
+
+/* Enable/disable Rx promiscuous mode on specified interface */
+__rte_internal
+void fman_if_promiscuous_enable(struct fman_if *p);
+__rte_internal
+void fman_if_promiscuous_disable(struct fman_if *p);
+
+/* Enable/disable Rx on specific interfaces */
+__rte_internal
+void fman_if_enable_rx(struct fman_if *p);
+__rte_internal
+void fman_if_disable_rx(struct fman_if *p);
+
+/* Enable/disable loopback on specific interfaces */
+__rte_internal
+void fman_if_loopback_enable(struct fman_if *p);
+__rte_internal
+void fman_if_loopback_disable(struct fman_if *p);
+
+/* Set buffer pool on specific interface */
+__rte_internal
+void fman_if_set_bp(struct fman_if *fm_if, unsigned int num, int bpid,
+ size_t bufsize);
+
+/* Get Flow Control threshold parameters on specific interface */
+__rte_internal
+int fman_if_get_fc_threshold(struct fman_if *fm_if);
+
+/* Enable and Set Flow Control threshold parameters on specific interface */
+__rte_internal
+int fman_if_set_fc_threshold(struct fman_if *fm_if,
+ u32 high_water, u32 low_water, u32 bpid);
+
+/* Get Flow Control pause quanta on specific interface */
+__rte_internal
+int fman_if_get_fc_quanta(struct fman_if *fm_if);
+
+/* Set Flow Control pause quanta on specific interface */
+__rte_internal
+int fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta);
+
+/* Set default error fqid on specific interface */
+void fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid);
+
+/* Get IC transfer params */
+int fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params *icp);
+
+/* Set IC transfer params */
+__rte_internal
+int fman_if_set_ic_params(struct fman_if *fm_if,
+ const struct fman_if_ic_params *icp);
+
+/* Get interface fd->offset value */
+__rte_internal
+int fman_if_get_fdoff(struct fman_if *fm_if);
+
+/* Set interface fd->offset value */
+__rte_internal
+void fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset);
+
+/* Get interface SG enable status value */
+__rte_internal
+int fman_if_get_sg_enable(struct fman_if *fm_if);
+
+/* Set interface SG support mode */
+__rte_internal
+void fman_if_set_sg(struct fman_if *fm_if, int enable);
+
+/* Get interface Max Frame length (MTU) */
+uint16_t fman_if_get_maxfrm(struct fman_if *fm_if);
+
+/* Set interface Max Frame length (MTU) */
+__rte_internal
+void fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm);
+
+/* Set interface next invoked action for dequeue operation */
+void fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia);
+
+/* discard error packets on rx */
+__rte_internal
+void fman_if_discard_rx_errors(struct fman_if *fm_if);
+
+__rte_internal
+void fman_if_set_mcast_filter_table(struct fman_if *p);
+
+__rte_internal
+void fman_if_reset_mcast_filter_table(struct fman_if *p);
+
+int fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth);
+
+int fman_if_get_primary_mac_addr(struct fman_if *p, uint8_t *eth);
+
+
+/* Enable/disable Rx on all interfaces */
+static inline void fman_if_enable_all_rx(void)
+{
+ struct fman_if *__if;
+
+ list_for_each_entry(__if, fman_if_list, node)
+ fman_if_enable_rx(__if);
+}
+
+static inline void fman_if_disable_all_rx(void)
+{
+ struct fman_if *__if;
+
+ list_for_each_entry(__if, fman_if_list, node)
+ fman_if_disable_rx(__if);
+}
+#endif /* __FSL_FMAN_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman_crc64.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman_crc64.h
new file mode 100644
index 000000000..08ad63042
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_fman_crc64.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __FSL_FMAN_CRC64_H
+#define __FSL_FMAN_CRC64_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This following definitions provide a software implementation of the CRC64
+ * algorithm implemented within Fman.
+ *
+ * The following example shows how to compute a CRC64 hash value based on
+ * SRC_IP, DST_IP and ESP_SPI values
+ *
+ * #define compute_hash(saddr,daddr,spi) \
+ * do { \
+ * uint64_t result; \
+ * result = fman_crc64_init(); \
+ * result = fman_crc64_compute_32bit(saddr, result); \
+ * result = fman_crc64_compute_32bit(daddr, result); \
+ * result = fman_crc64_compute_32bit(spi, result); \
+ * return (uint32_t) result & RC_HASH_MASK; \
+ * } while (0);
+ *
+ * If hashing over a different number of fields (or of different types) is
+ * required, this can be implemented using the following primitives.
+ */
+
+/* The following table provides the constants used by the Fman CRC64
+ * implementation. The table is instantiated within the DPAA fman driver.
+ * However if the application is not going to be linked against the DPAA fman
+ * driver but will use this Fman CRC64 implementation, then it will need to
+ * instantiate this table by using the DECLARE_FMAN_CRC64_TABLE() macro.
+ */
+struct fman_crc64_t {
+ uint64_t initial;
+ uint64_t table[1 << 8];
+};
+extern struct fman_crc64_t fman_crc64_ecma_182;
+#define DECLARE_FMAN_CRC64_TABLE() \
+struct fman_crc64_t fman_crc64_ecma_182 = { \
+ 0xFFFFFFFFFFFFFFFFULL, \
+ { \
+ 0x0000000000000000ULL, 0xb32e4cbe03a75f6fULL, \
+ 0xf4843657a840a05bULL, 0x47aa7ae9abe7ff34ULL, \
+ 0x7bd0c384ff8f5e33ULL, 0xc8fe8f3afc28015cULL, \
+ 0x8f54f5d357cffe68ULL, 0x3c7ab96d5468a107ULL, \
+ 0xf7a18709ff1ebc66ULL, 0x448fcbb7fcb9e309ULL, \
+ 0x0325b15e575e1c3dULL, 0xb00bfde054f94352ULL, \
+ 0x8c71448d0091e255ULL, 0x3f5f08330336bd3aULL, \
+ 0x78f572daa8d1420eULL, 0xcbdb3e64ab761d61ULL, \
+ 0x7d9ba13851336649ULL, 0xceb5ed8652943926ULL, \
+ 0x891f976ff973c612ULL, 0x3a31dbd1fad4997dULL, \
+ 0x064b62bcaebc387aULL, 0xb5652e02ad1b6715ULL, \
+ 0xf2cf54eb06fc9821ULL, 0x41e11855055bc74eULL, \
+ 0x8a3a2631ae2dda2fULL, 0x39146a8fad8a8540ULL, \
+ 0x7ebe1066066d7a74ULL, 0xcd905cd805ca251bULL, \
+ 0xf1eae5b551a2841cULL, 0x42c4a90b5205db73ULL, \
+ 0x056ed3e2f9e22447ULL, 0xb6409f5cfa457b28ULL, \
+ 0xfb374270a266cc92ULL, 0x48190ecea1c193fdULL, \
+ 0x0fb374270a266cc9ULL, 0xbc9d3899098133a6ULL, \
+ 0x80e781f45de992a1ULL, 0x33c9cd4a5e4ecdceULL, \
+ 0x7463b7a3f5a932faULL, 0xc74dfb1df60e6d95ULL, \
+ 0x0c96c5795d7870f4ULL, 0xbfb889c75edf2f9bULL, \
+ 0xf812f32ef538d0afULL, 0x4b3cbf90f69f8fc0ULL, \
+ 0x774606fda2f72ec7ULL, 0xc4684a43a15071a8ULL, \
+ 0x83c230aa0ab78e9cULL, 0x30ec7c140910d1f3ULL, \
+ 0x86ace348f355aadbULL, 0x3582aff6f0f2f5b4ULL, \
+ 0x7228d51f5b150a80ULL, 0xc10699a158b255efULL, \
+ 0xfd7c20cc0cdaf4e8ULL, 0x4e526c720f7dab87ULL, \
+ 0x09f8169ba49a54b3ULL, 0xbad65a25a73d0bdcULL, \
+ 0x710d64410c4b16bdULL, 0xc22328ff0fec49d2ULL, \
+ 0x85895216a40bb6e6ULL, 0x36a71ea8a7ace989ULL, \
+ 0x0adda7c5f3c4488eULL, 0xb9f3eb7bf06317e1ULL, \
+ 0xfe5991925b84e8d5ULL, 0x4d77dd2c5823b7baULL, \
+ 0x64b62bcaebc387a1ULL, 0xd7986774e864d8ceULL, \
+ 0x90321d9d438327faULL, 0x231c512340247895ULL, \
+ 0x1f66e84e144cd992ULL, 0xac48a4f017eb86fdULL, \
+ 0xebe2de19bc0c79c9ULL, 0x58cc92a7bfab26a6ULL, \
+ 0x9317acc314dd3bc7ULL, 0x2039e07d177a64a8ULL, \
+ 0x67939a94bc9d9b9cULL, 0xd4bdd62abf3ac4f3ULL, \
+ 0xe8c76f47eb5265f4ULL, 0x5be923f9e8f53a9bULL, \
+ 0x1c4359104312c5afULL, 0xaf6d15ae40b59ac0ULL, \
+ 0x192d8af2baf0e1e8ULL, 0xaa03c64cb957be87ULL, \
+ 0xeda9bca512b041b3ULL, 0x5e87f01b11171edcULL, \
+ 0x62fd4976457fbfdbULL, 0xd1d305c846d8e0b4ULL, \
+ 0x96797f21ed3f1f80ULL, 0x2557339fee9840efULL, \
+ 0xee8c0dfb45ee5d8eULL, 0x5da24145464902e1ULL, \
+ 0x1a083bacedaefdd5ULL, 0xa9267712ee09a2baULL, \
+ 0x955cce7fba6103bdULL, 0x267282c1b9c65cd2ULL, \
+ 0x61d8f8281221a3e6ULL, 0xd2f6b4961186fc89ULL, \
+ 0x9f8169ba49a54b33ULL, 0x2caf25044a02145cULL, \
+ 0x6b055fede1e5eb68ULL, 0xd82b1353e242b407ULL, \
+ 0xe451aa3eb62a1500ULL, 0x577fe680b58d4a6fULL, \
+ 0x10d59c691e6ab55bULL, 0xa3fbd0d71dcdea34ULL, \
+ 0x6820eeb3b6bbf755ULL, 0xdb0ea20db51ca83aULL, \
+ 0x9ca4d8e41efb570eULL, 0x2f8a945a1d5c0861ULL, \
+ 0x13f02d374934a966ULL, 0xa0de61894a93f609ULL, \
+ 0xe7741b60e174093dULL, 0x545a57dee2d35652ULL, \
+ 0xe21ac88218962d7aULL, 0x5134843c1b317215ULL, \
+ 0x169efed5b0d68d21ULL, 0xa5b0b26bb371d24eULL, \
+ 0x99ca0b06e7197349ULL, 0x2ae447b8e4be2c26ULL, \
+ 0x6d4e3d514f59d312ULL, 0xde6071ef4cfe8c7dULL, \
+ 0x15bb4f8be788911cULL, 0xa6950335e42fce73ULL, \
+ 0xe13f79dc4fc83147ULL, 0x521135624c6f6e28ULL, \
+ 0x6e6b8c0f1807cf2fULL, 0xdd45c0b11ba09040ULL, \
+ 0x9aefba58b0476f74ULL, 0x29c1f6e6b3e0301bULL, \
+ 0xc96c5795d7870f42ULL, 0x7a421b2bd420502dULL, \
+ 0x3de861c27fc7af19ULL, 0x8ec62d7c7c60f076ULL, \
+ 0xb2bc941128085171ULL, 0x0192d8af2baf0e1eULL, \
+ 0x4638a2468048f12aULL, 0xf516eef883efae45ULL, \
+ 0x3ecdd09c2899b324ULL, 0x8de39c222b3eec4bULL, \
+ 0xca49e6cb80d9137fULL, 0x7967aa75837e4c10ULL, \
+ 0x451d1318d716ed17ULL, 0xf6335fa6d4b1b278ULL, \
+ 0xb199254f7f564d4cULL, 0x02b769f17cf11223ULL, \
+ 0xb4f7f6ad86b4690bULL, 0x07d9ba1385133664ULL, \
+ 0x4073c0fa2ef4c950ULL, 0xf35d8c442d53963fULL, \
+ 0xcf273529793b3738ULL, 0x7c0979977a9c6857ULL, \
+ 0x3ba3037ed17b9763ULL, 0x888d4fc0d2dcc80cULL, \
+ 0x435671a479aad56dULL, 0xf0783d1a7a0d8a02ULL, \
+ 0xb7d247f3d1ea7536ULL, 0x04fc0b4dd24d2a59ULL, \
+ 0x3886b22086258b5eULL, 0x8ba8fe9e8582d431ULL, \
+ 0xcc0284772e652b05ULL, 0x7f2cc8c92dc2746aULL, \
+ 0x325b15e575e1c3d0ULL, 0x8175595b76469cbfULL, \
+ 0xc6df23b2dda1638bULL, 0x75f16f0cde063ce4ULL, \
+ 0x498bd6618a6e9de3ULL, 0xfaa59adf89c9c28cULL, \
+ 0xbd0fe036222e3db8ULL, 0x0e21ac88218962d7ULL, \
+ 0xc5fa92ec8aff7fb6ULL, 0x76d4de52895820d9ULL, \
+ 0x317ea4bb22bfdfedULL, 0x8250e80521188082ULL, \
+ 0xbe2a516875702185ULL, 0x0d041dd676d77eeaULL, \
+ 0x4aae673fdd3081deULL, 0xf9802b81de97deb1ULL, \
+ 0x4fc0b4dd24d2a599ULL, 0xfceef8632775faf6ULL, \
+ 0xbb44828a8c9205c2ULL, 0x086ace348f355aadULL, \
+ 0x34107759db5dfbaaULL, 0x873e3be7d8faa4c5ULL, \
+ 0xc094410e731d5bf1ULL, 0x73ba0db070ba049eULL, \
+ 0xb86133d4dbcc19ffULL, 0x0b4f7f6ad86b4690ULL, \
+ 0x4ce50583738cb9a4ULL, 0xffcb493d702be6cbULL, \
+ 0xc3b1f050244347ccULL, 0x709fbcee27e418a3ULL, \
+ 0x3735c6078c03e797ULL, 0x841b8ab98fa4b8f8ULL, \
+ 0xadda7c5f3c4488e3ULL, 0x1ef430e13fe3d78cULL, \
+ 0x595e4a08940428b8ULL, 0xea7006b697a377d7ULL, \
+ 0xd60abfdbc3cbd6d0ULL, 0x6524f365c06c89bfULL, \
+ 0x228e898c6b8b768bULL, 0x91a0c532682c29e4ULL, \
+ 0x5a7bfb56c35a3485ULL, 0xe955b7e8c0fd6beaULL, \
+ 0xaeffcd016b1a94deULL, 0x1dd181bf68bdcbb1ULL, \
+ 0x21ab38d23cd56ab6ULL, 0x9285746c3f7235d9ULL, \
+ 0xd52f0e859495caedULL, 0x6601423b97329582ULL, \
+ 0xd041dd676d77eeaaULL, 0x636f91d96ed0b1c5ULL, \
+ 0x24c5eb30c5374ef1ULL, 0x97eba78ec690119eULL, \
+ 0xab911ee392f8b099ULL, 0x18bf525d915feff6ULL, \
+ 0x5f1528b43ab810c2ULL, 0xec3b640a391f4fadULL, \
+ 0x27e05a6e926952ccULL, 0x94ce16d091ce0da3ULL, \
+ 0xd3646c393a29f297ULL, 0x604a2087398eadf8ULL, \
+ 0x5c3099ea6de60cffULL, 0xef1ed5546e415390ULL, \
+ 0xa8b4afbdc5a6aca4ULL, 0x1b9ae303c601f3cbULL, \
+ 0x56ed3e2f9e224471ULL, 0xe5c372919d851b1eULL, \
+ 0xa26908783662e42aULL, 0x114744c635c5bb45ULL, \
+ 0x2d3dfdab61ad1a42ULL, 0x9e13b115620a452dULL, \
+ 0xd9b9cbfcc9edba19ULL, 0x6a978742ca4ae576ULL, \
+ 0xa14cb926613cf817ULL, 0x1262f598629ba778ULL, \
+ 0x55c88f71c97c584cULL, 0xe6e6c3cfcadb0723ULL, \
+ 0xda9c7aa29eb3a624ULL, 0x69b2361c9d14f94bULL, \
+ 0x2e184cf536f3067fULL, 0x9d36004b35545910ULL, \
+ 0x2b769f17cf112238ULL, 0x9858d3a9ccb67d57ULL, \
+ 0xdff2a94067518263ULL, 0x6cdce5fe64f6dd0cULL, \
+ 0x50a65c93309e7c0bULL, 0xe388102d33392364ULL, \
+ 0xa4226ac498dedc50ULL, 0x170c267a9b79833fULL, \
+ 0xdcd7181e300f9e5eULL, 0x6ff954a033a8c131ULL, \
+ 0x28532e49984f3e05ULL, 0x9b7d62f79be8616aULL, \
+ 0xa707db9acf80c06dULL, 0x14299724cc279f02ULL, \
+ 0x5383edcd67c06036ULL, 0xe0ada17364673f59ULL} \
+}
+
+/*
+ * Return the initial CRC seed. Use the value returned from this API as the
+ * "crc" parameter to the first call to add data.
+ */
+static inline uint64_t fman_crc64_init(void)
+{
+ return fman_crc64_ecma_182.initial;
+}
+
+/* Updates the CRC with arbitrary data */
+static inline uint64_t fman_crc64_update(uint64_t crc,
+ void *data, unsigned int len)
+{
+ uint8_t *p = data;
+ while (len--)
+ crc = fman_crc64_ecma_182.table[(crc ^ *(p++)) & 0xff] ^
+ (crc >> 8);
+ return crc;
+}
+
+/* Shorthands for updating the CRC with 8/16/32 bits of data.
+ * IMPORTANT NOTE: the typed "data" arguments should not be mistaken for
+ * host-endian numerical values, the assumption is that these values contain
+ * big-endian (ie. network byte order) data.
+ */
+static inline uint64_t fman_crc64_compute_32bit(uint32_t data, uint64_t crc)
+{
+ return fman_crc64_update(crc, &data, sizeof(data));
+}
+static inline uint64_t fman_crc64_compute_16bit(uint16_t data, uint64_t crc)
+{
+ return fman_crc64_update(crc, &data, sizeof(data));
+}
+static inline uint64_t fman_crc64_compute_8bit(uint8_t data, uint64_t crc)
+{
+ return fman_crc64_update(crc, &data, sizeof(data));
+}
+
+/*
+ * Finalise the CRC (using 2's complement)
+ */
+static inline uint64_t fman_crc64_finish(uint64_t seed)
+{
+ return ~seed;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FSL_FMAN_CRC64_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_qman.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_qman.h
new file mode 100644
index 000000000..78b698f39
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_qman.h
@@ -0,0 +1,2139 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright 2019 NXP
+ *
+ */
+
+#ifndef __FSL_QMAN_H
+#define __FSL_QMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <dpaa_rbtree.h>
+#include <rte_eventdev.h>
+
+/* FQ lookups (turn this on for 64bit user-space) */
+#if (__WORDSIZE == 64)
+#define CONFIG_FSL_QMAN_FQ_LOOKUP
+/* if FQ lookups are supported, this controls the number of initialised,
+ * s/w-consumed FQs that can be supported at any one time.
+ */
+#define CONFIG_FSL_QMAN_FQ_LOOKUP_MAX (32 * 1024)
+#endif
+
+/* Last updated for v00.800 of the BG */
+
+/* Hardware constants */
+#define QM_CHANNEL_SWPORTAL0 0
+#define QMAN_CHANNEL_POOL1 0x21
+#define QMAN_CHANNEL_CAAM 0x80
+#define QMAN_CHANNEL_PME 0xa0
+#define QMAN_CHANNEL_POOL1_REV3 0x401
+#define QMAN_CHANNEL_CAAM_REV3 0x840
+#define QMAN_CHANNEL_PME_REV3 0x860
+extern u16 qm_channel_pool1;
+extern u16 qm_channel_caam;
+extern u16 qm_channel_pme;
+enum qm_dc_portal {
+ qm_dc_portal_fman0 = 0,
+ qm_dc_portal_fman1 = 1,
+ qm_dc_portal_caam = 2,
+ qm_dc_portal_pme = 3
+};
+
+__rte_internal
+u16 dpaa_get_qm_channel_caam(void);
+
+__rte_internal
+u16 dpaa_get_qm_channel_pool(void);
+
+/* Portal processing (interrupt) sources */
+#define QM_PIRQ_CCSCI 0x00200000 /* CEETM Congestion State Change */
+#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
+#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
+#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
+#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
+#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
+/*
+ * This mask contains all the interrupt sources that need handling except DQRI,
+ * ie. that if present should trigger slow-path processing.
+ */
+#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
+ QM_PIRQ_MRI | QM_PIRQ_CCSCI)
+
+/* For qman_static_dequeue_*** APIs */
+#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
+/* for n in [1,15] */
+#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
+/* for conversion from n of qm_channel */
+static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
+{
+ return QM_SDQCR_CHANNELS_POOL(channel + 1 - dpaa_get_qm_channel_pool());
+}
+
+/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID.
+ */
+#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
+#define QM_VDQCR_EXACT 0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
+
+/* --- QMan data structures (and associated constants) --- */
+
+/* Represents s/w corenet portal mapped data structures */
+struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */
+struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */
+struct qm_mr_entry; /* MR (Message Ring) entries */
+struct qm_mc_command; /* MC (Management Command) command */
+struct qm_mc_result; /* MC result */
+
+#define QM_FD_FORMAT_SG 0x4
+#define QM_FD_FORMAT_LONG 0x2
+#define QM_FD_FORMAT_COMPOUND 0x1
+enum qm_fd_format {
+ /*
+ * 'contig' implies a contiguous buffer, whereas 'sg' implies a
+ * scatter-gather table. 'big' implies a 29-bit length with no offset
+ * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
+ * implies a s/g-like table, where each entry itself represents a frame
+ * (contiguous or scatter-gather) and the 29-bit "length" is
+ * interpreted purely for congestion calculations, ie. a "congestion
+ * weight".
+ */
+ qm_fd_contig = 0,
+ qm_fd_contig_big = QM_FD_FORMAT_LONG,
+ qm_fd_sg = QM_FD_FORMAT_SG,
+ qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
+ qm_fd_compound = QM_FD_FORMAT_COMPOUND
+};
+
+/* Capitalised versions are un-typed but can be used in static expressions */
+#define QM_FD_CONTIG 0
+#define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
+#define QM_FD_SG QM_FD_FORMAT_SG
+#define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
+#define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND
+
+/* "Frame Descriptor (FD)" */
+struct qm_fd {
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 dd:2; /* dynamic debug */
+ u8 liodn_offset:6;
+ u8 bpid:8; /* Buffer Pool ID */
+ u8 eliodn_offset:4;
+ u8 __reserved:4;
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+#else
+ u8 liodn_offset:6;
+ u8 dd:2; /* dynamic debug */
+ u8 bpid:8; /* Buffer Pool ID */
+ u8 __reserved:4;
+ u8 eliodn_offset:4;
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+#endif
+ };
+ struct {
+ u64 __notaddress:24;
+ /* More efficient address accessor */
+ u64 addr:40;
+ };
+ u64 opaque_addr;
+ };
+ /* The 'format' field indicates the interpretation of the remaining 29
+ * bits of the 32-bit word. For packing reasons, it is duplicated in the
+ * other union elements. Note, union'd structs are difficult to use with
+ * static initialisation under gcc, in which case use the "opaque" form
+ * with one of the macros.
+ */
+ union {
+ /* For easier/faster copying of this part of the fd (eg. from a
+ * DQRR entry to an EQCR entry) copy 'opaque'
+ */
+ u32 opaque;
+ /* If 'format' is _contig or _sg, 20b length and 9b offset */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ enum qm_fd_format format:3;
+ u16 offset:9;
+ u32 length20:20;
+#else
+ u32 length20:20;
+ u16 offset:9;
+ enum qm_fd_format format:3;
+#endif
+ };
+ /* If 'format' is _contig_big or _sg_big, 29b length */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ enum qm_fd_format _format1:3;
+ u32 length29:29;
+#else
+ u32 length29:29;
+ enum qm_fd_format _format1:3;
+#endif
+ };
+ /* If 'format' is _compound, 29b "congestion weight" */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ enum qm_fd_format _format2:3;
+ u32 cong_weight:29;
+#else
+ u32 cong_weight:29;
+ enum qm_fd_format _format2:3;
+#endif
+ };
+ };
+ union {
+ u32 cmd;
+ u32 status;
+ };
+} __rte_aligned(8);
+#define QM_FD_DD_NULL 0x00
+#define QM_FD_PID_MASK 0x3f
+static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
+{
+ return fd->addr;
+}
+
+static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
+{
+ return (dma_addr_t)fd->addr;
+}
+
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_fd_addr_set64(fd, v) \
+ do { \
+ struct qm_fd *__fd931 = (fd); \
+ __fd931->addr = v; \
+ } while (0)
+
+/* Scatter/Gather table entry */
+struct qm_sg_entry {
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved1[3];
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+#else
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u8 __reserved1[3];
+#endif
+ };
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u64 __notaddress:24;
+ u64 addr:40;
+#else
+ u64 addr:40;
+ u64 __notaddress:24;
+#endif
+ };
+ u64 opaque;
+ };
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 extension:1; /* Extension bit */
+ u32 final:1; /* Final bit */
+ u32 length:30;
+#else
+ u32 length:30;
+ u32 final:1; /* Final bit */
+ u32 extension:1; /* Extension bit */
+#endif
+ };
+ u32 val;
+ };
+ u8 __reserved2;
+ u8 bpid;
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved3:3;
+ u16 offset:13;
+#else
+ u16 offset:13;
+ u16 __reserved3:3;
+#endif
+ };
+ u16 val_off;
+ };
+} __packed;
+static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
+{
+ return sg->addr;
+}
+
+static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
+{
+ return (dma_addr_t)sg->addr;
+}
+
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_sg_entry_set64(sg, v) \
+ do { \
+ struct qm_sg_entry *__sg931 = (sg); \
+ __sg931->addr = v; \
+ } while (0)
+
+/* See 1.5.8.1: "Enqueue Command" */
+struct __rte_aligned(8) qm_eqcr_entry {
+ u8 __dont_write_directly__verb;
+ u8 dca;
+ u16 seqnum;
+ u32 orp; /* 24-bit */
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd; /* this has alignment 8 */
+ u8 __reserved3[32];
+} __packed;
+
+
+/* "Frame Dequeue Response" */
+struct __rte_aligned(8) qm_dqrr_entry {
+ u8 verb;
+ u8 stat;
+ u16 seqnum; /* 15-bit */
+ u8 tok;
+ u8 __reserved2[3];
+ u32 fqid; /* 24-bit */
+ u32 contextB;
+ struct qm_fd fd; /* this has alignment 8 */
+ u8 __reserved4[32];
+};
+
+#define QM_DQRR_VERB_VBIT 0x80
+#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
+#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
+#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
+#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
+#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
+#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
+#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
+
+
+/* "ERN Message Response" */
+/* "FQ State Change Notification" */
+struct qm_mr_entry {
+ union {
+ struct {
+ u8 verb;
+ u8 dca;
+ u16 seqnum;
+ u8 rc; /* Rejection Code */
+ u32 orp:24;
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd; /* this has alignment 8 */
+ } __packed __rte_aligned(8) ern;
+ struct {
+ u8 verb;
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
+ u8 __reserved1:4;
+ enum qm_dc_portal portal:2;
+#else
+ enum qm_dc_portal portal:3;
+ u8 __reserved1:3;
+ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
+#endif
+ u16 __reserved2;
+ u8 rc; /* Rejection Code */
+ u32 __reserved3:24;
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd; /* this has alignment 8 */
+ } __packed __rte_aligned(8) dcern;
+ struct {
+ u8 verb;
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[6];
+ u32 fqid; /* 24-bit */
+ u32 contextB;
+ u8 __reserved2[16];
+ } __packed __rte_aligned(8) fq; /* FQRN/FQRNI/FQRL/FQPN */
+ };
+ u8 __reserved2[32];
+} __packed __rte_aligned(8);
+#define QM_MR_VERB_VBIT 0x80
+/*
+ * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
+ * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
+ * from the other MR types by noting if the 0x20 bit is unset.
+ */
+#define QM_MR_VERB_TYPE_MASK 0x27
+#define QM_MR_VERB_DC_ERN 0x20
+#define QM_MR_VERB_FQRN 0x21
+#define QM_MR_VERB_FQRNI 0x22
+#define QM_MR_VERB_FQRL 0x23
+#define QM_MR_VERB_FQPN 0x24
+#define QM_MR_RC_MASK 0xf0 /* contains one of; */
+#define QM_MR_RC_CGR_TAILDROP 0x00
+#define QM_MR_RC_WRED 0x10
+#define QM_MR_RC_ERROR 0x20
+#define QM_MR_RC_ORPWINDOW_EARLY 0x30
+#define QM_MR_RC_ORPWINDOW_LATE 0x40
+#define QM_MR_RC_FQ_TAILDROP 0x50
+#define QM_MR_RC_ORPWINDOW_RETIRED 0x60
+#define QM_MR_RC_ORP_ZERO 0x70
+#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+#define QM_MR_DCERN_COLOUR_GREEN 0x00
+#define QM_MR_DCERN_COLOUR_YELLOW 0x01
+#define QM_MR_DCERN_COLOUR_RED 0x02
+#define QM_MR_DCERN_COLOUR_OVERRIDE 0x03
+/*
+ * An identical structure of FQD fields is present in the "Init FQ" command and
+ * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
+ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
+ * latter has two inlines to assist with converting to/from the mant+exp
+ * representation.
+ */
+struct qm_fqd_stashing {
+ /* See QM_STASHING_EXCL_<...> */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 exclusive;
+ u8 __reserved1:2;
+ /* Numbers of cachelines */
+ u8 annotation_cl:2;
+ u8 data_cl:2;
+ u8 context_cl:2;
+#else
+ u8 context_cl:2;
+ u8 data_cl:2;
+ u8 annotation_cl:2;
+ u8 __reserved1:2;
+ u8 exclusive;
+#endif
+} __packed;
+struct qm_fqd_taildrop {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved1:3;
+ u16 mant:8;
+ u16 exp:5;
+#else
+ u16 exp:5;
+ u16 mant:8;
+ u16 __reserved1:3;
+#endif
+} __packed;
+struct qm_fqd_oac {
+ /* "Overhead Accounting Control", see QM_OAC_<...> */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 oac:2; /* "Overhead Accounting Control" */
+ u8 __reserved1:6;
+#else
+ u8 __reserved1:6;
+ u8 oac:2; /* "Overhead Accounting Control" */
+#endif
+ /* Two's-complement value (-128 to +127) */
+ signed char oal; /* "Overhead Accounting Length" */
+} __packed;
+struct qm_fqd {
+ union {
+ u8 orpc;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved1:2;
+ u8 orprws:3;
+ u8 oa:1;
+ u8 olws:2;
+#else
+ u8 olws:2;
+ u8 oa:1;
+ u8 orprws:3;
+ u8 __reserved1:2;
+#endif
+ } __packed;
+ };
+ u8 cgid;
+ u16 fq_ctrl; /* See QM_FQCTRL_<...> */
+ union {
+ u16 dest_wq;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 channel:13; /* qm_channel */
+ u16 wq:3;
+#else
+ u16 wq:3;
+ u16 channel:13; /* qm_channel */
+#endif
+ } __packed dest;
+ };
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved2:1;
+ u16 ics_cred:15;
+#else
+ u16 __reserved2:1;
+ u16 ics_cred:15;
+#endif
+ /*
+ * For "Initialize Frame Queue" commands, the write-enable mask
+ * determines whether 'td' or 'oac_init' is observed. For query
+ * commands, this field is always 'td', and 'oac_query' (below) reflects
+ * the Overhead ACcounting values.
+ */
+ union {
+ uint16_t opaque_td;
+ struct qm_fqd_taildrop td;
+ struct qm_fqd_oac oac_init;
+ };
+ u32 context_b;
+ union {
+ /* Treat it as 64-bit opaque */
+ u64 opaque;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 hi;
+ u32 lo;
+#else
+ u32 lo;
+ u32 hi;
+#endif
+ };
+ /* Treat it as s/w portal stashing config */
+ /* see "FQD Context_A field used for [...]" */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ struct qm_fqd_stashing stashing;
+ /*
+ * 48-bit address of FQ context to
+ * stash, must be cacheline-aligned
+ */
+ u16 context_hi;
+ u32 context_lo;
+#else
+ u32 context_lo;
+ u16 context_hi;
+ struct qm_fqd_stashing stashing;
+#endif
+ } __packed;
+ } context_a;
+ struct qm_fqd_oac oac_query;
+} __packed;
+/* 64-bit converters for context_hi/lo */
+static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
+{
+ return ((u64)fqd->context_a.context_hi << 32) |
+ (u64)fqd->context_a.context_lo;
+}
+
+static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
+{
+ return (dma_addr_t)qm_fqd_stashing_get64(fqd);
+}
+
+static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
+{
+ return ((u64)fqd->context_a.hi << 32) |
+ (u64)fqd->context_a.lo;
+}
+
+static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
+{
+ fqd->context_a.context_hi = upper_32_bits(addr);
+ fqd->context_a.context_lo = lower_32_bits(addr);
+}
+
+static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
+{
+ fqd->context_a.hi = upper_32_bits(addr);
+ fqd->context_a.lo = lower_32_bits(addr);
+}
+
+/* convert a threshold value into mant+exp representation */
+static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
+ int roundup)
+{
+ u32 e = 0;
+ int oddbit = 0;
+
+ if (val > 0xe0000000)
+ return -ERANGE;
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ td->exp = e;
+ td->mant = val;
+ return 0;
+}
+
+/* and the other direction */
+static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
+{
+ return (u32)td->mant << td->exp;
+}
+
+
+/* See "Frame Queue Descriptor (FQD)" */
+/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
+#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
+#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
+#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
+#define QM_FQCTRL_ORP 0x0100 /* ORP Enable */
+#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
+#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
+#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
+#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
+#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
+#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
+#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
+
+/* See "FQD Context_A field used for [...] */
+/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
+#define QM_STASHING_EXCL_ANNOTATION 0x04
+#define QM_STASHING_EXCL_DATA 0x02
+#define QM_STASHING_EXCL_CTX 0x01
+
+/* See "Intra Class Scheduling" */
+/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
+#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
+#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
+
+/*
+ * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
+ * and associated commands/responses. The WRED parameters are calculated from
+ * these fields as follows;
+ * MaxTH = MA * (2 ^ Mn)
+ * Slope = SA / (2 ^ Sn)
+ * MaxP = 4 * (Pn + 1)
+ */
+struct qm_cgr_wr_parm {
+ union {
+ u32 word;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 MA:8;
+ u32 Mn:5;
+ u32 SA:7; /* must be between 64-127 */
+ u32 Sn:6;
+ u32 Pn:6;
+#else
+ u32 Pn:6;
+ u32 Sn:6;
+ u32 SA:7; /* must be between 64-127 */
+ u32 Mn:5;
+ u32 MA:8;
+#endif
+ } __packed;
+ };
+} __packed;
+/*
+ * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
+ * management commands, this is padded to a 16-bit structure field, so that's
+ * how we represent it here. The congestion state threshold is calculated from
+ * these fields as follows;
+ * CS threshold = TA * (2 ^ Tn)
+ */
+struct qm_cgr_cs_thres {
+ union {
+ u16 hword;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved:3;
+ u16 TA:8;
+ u16 Tn:5;
+#else
+ u16 Tn:5;
+ u16 TA:8;
+ u16 __reserved:3;
+#endif
+ } __packed;
+ };
+} __packed;
+/*
+ * This identical structure of CGR fields is present in the "Init/Modify CGR"
+ * commands and the "Query CGR" result. It's suctioned out here into its own
+ * struct.
+ */
+struct __qm_mc_cgr {
+ struct qm_cgr_wr_parm wr_parm_g;
+ struct qm_cgr_wr_parm wr_parm_y;
+ struct qm_cgr_wr_parm wr_parm_r;
+ u8 wr_en_g; /* boolean, use QM_CGR_EN */
+ u8 wr_en_y; /* boolean, use QM_CGR_EN */
+ u8 wr_en_r; /* boolean, use QM_CGR_EN */
+ u8 cscn_en; /* boolean, use QM_CGR_EN */
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
+#else
+ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
+ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+#endif
+ };
+ u32 cscn_targ; /* use QM_CGR_TARG_* */
+ };
+ u8 cstd_en; /* boolean, use QM_CGR_EN */
+ u8 cs; /* boolean, only used in query response */
+ union {
+ struct qm_cgr_cs_thres cs_thres;
+ /* use qm_cgr_cs_thres_set64() */
+ u16 __cs_thres;
+ };
+ u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
+} __packed;
+#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
+#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
+#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
+#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
+#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
+#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
+/* Convert CGR thresholds to/from "cs_thres" format */
+static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
+{
+ return (u64)th->TA << th->Tn;
+}
+
+static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
+ int roundup)
+{
+ u32 e = 0;
+ int oddbit = 0;
+
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ th->Tn = e;
+ th->TA = val;
+ return 0;
+}
+
+/* See 1.5.8.5.1: "Initialize FQ" */
+/* See 1.5.8.5.2: "Query FQ" */
+/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
+/* See 1.5.8.5.4: "Alter FQ State Commands " */
+/* See 1.5.8.6.1: "Initialize/Modify CGR" */
+/* See 1.5.8.6.2: "CGR Test Write" */
+/* See 1.5.8.6.3: "Query CGR" */
+/* See 1.5.8.6.4: "Query Congestion Group State" */
+struct qm_mcc_initfq {
+ u8 __reserved1;
+ u16 we_mask; /* Write Enable Mask */
+ u32 fqid; /* 24-bit */
+ u16 count; /* Initialises 'count+1' FQDs */
+ struct qm_fqd fqd; /* the FQD fields go here */
+ u8 __reserved3[30];
+} __packed;
+struct qm_mcc_queryfq {
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+struct qm_mcc_queryfq_np {
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+struct qm_mcc_alterfq {
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2;
+ u8 count; /* number of consecutive FQID */
+ u8 __reserved3[10];
+ u32 context_b; /* frame queue context b */
+ u8 __reserved4[40];
+} __packed;
+struct qm_mcc_initcgr {
+ u8 __reserved1;
+ u16 we_mask; /* Write Enable Mask */
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[2];
+ u8 cgid;
+ u8 __reserved4[32];
+} __packed;
+struct qm_mcc_cgrtestwrite {
+ u8 __reserved1[2];
+ u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u8 __reserved2[23];
+ u8 cgid;
+ u8 __reserved3[32];
+} __packed;
+struct qm_mcc_querycgr {
+ u8 __reserved1[30];
+ u8 cgid;
+ u8 __reserved2[32];
+} __packed;
+struct qm_mcc_querycongestion {
+ u8 __reserved[63];
+} __packed;
+struct qm_mcc_querywq {
+ u8 __reserved;
+ /* select channel if verb != QUERYWQ_DEDICATED */
+ union {
+ u16 channel_wq; /* ignores wq (3 lsbits) */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 id:13; /* qm_channel */
+ u16 __reserved1:3;
+#else
+ u16 __reserved1:3;
+ u16 id:13; /* qm_channel */
+#endif
+ } __packed channel;
+ };
+ u8 __reserved2[60];
+} __packed;
+
+struct qm_mc_command {
+ u8 __dont_write_directly__verb;
+ union {
+ struct qm_mcc_initfq initfq;
+ struct qm_mcc_queryfq queryfq;
+ struct qm_mcc_queryfq_np queryfq_np;
+ struct qm_mcc_alterfq alterfq;
+ struct qm_mcc_initcgr initcgr;
+ struct qm_mcc_cgrtestwrite cgrtestwrite;
+ struct qm_mcc_querycgr querycgr;
+ struct qm_mcc_querycongestion querycongestion;
+ struct qm_mcc_querywq querywq;
+ };
+} __packed;
+
+/* INITFQ-specific flags */
+#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
+#define QM_INITFQ_WE_OAC 0x0100
+#define QM_INITFQ_WE_ORPC 0x0080
+#define QM_INITFQ_WE_CGID 0x0040
+#define QM_INITFQ_WE_FQCTRL 0x0020
+#define QM_INITFQ_WE_DESTWQ 0x0010
+#define QM_INITFQ_WE_ICSCRED 0x0008
+#define QM_INITFQ_WE_TDTHRESH 0x0004
+#define QM_INITFQ_WE_CONTEXTB 0x0002
+#define QM_INITFQ_WE_CONTEXTA 0x0001
+/* INITCGR/MODIFYCGR-specific flags */
+#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
+#define QM_CGR_WE_WR_PARM_G 0x0400
+#define QM_CGR_WE_WR_PARM_Y 0x0200
+#define QM_CGR_WE_WR_PARM_R 0x0100
+#define QM_CGR_WE_WR_EN_G 0x0080
+#define QM_CGR_WE_WR_EN_Y 0x0040
+#define QM_CGR_WE_WR_EN_R 0x0020
+#define QM_CGR_WE_CSCN_EN 0x0010
+#define QM_CGR_WE_CSCN_TARG 0x0008
+#define QM_CGR_WE_CSTD_EN 0x0004
+#define QM_CGR_WE_CS_THRES 0x0002
+#define QM_CGR_WE_MODE 0x0001
+
+struct qm_mcr_initfq {
+ u8 __reserved1[62];
+} __packed;
+struct qm_mcr_queryfq {
+ u8 __reserved1[8];
+ struct qm_fqd fqd; /* the FQD fields are here */
+ u8 __reserved2[30];
+} __packed;
+struct qm_mcr_queryfq_np {
+ u8 __reserved1;
+ u8 state; /* QM_MCR_NP_STATE_*** */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved2;
+ u32 fqd_link:24;
+ u16 __reserved3:2;
+ u16 odp_seq:14;
+ u16 __reserved4:2;
+ u16 orp_nesn:14;
+ u16 __reserved5:1;
+ u16 orp_ea_hseq:15;
+ u16 __reserved6:1;
+ u16 orp_ea_tseq:15;
+ u8 __reserved7;
+ u32 orp_ea_hptr:24;
+ u8 __reserved8;
+ u32 orp_ea_tptr:24;
+ u8 __reserved9;
+ u32 pfdr_hptr:24;
+ u8 __reserved10;
+ u32 pfdr_tptr:24;
+ u8 __reserved11[5];
+ u8 __reserved12:7;
+ u8 is:1;
+ u16 ics_surp;
+ u32 byte_cnt;
+ u8 __reserved13;
+ u32 frm_cnt:24;
+ u32 __reserved14;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved15;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+#else
+ u8 __reserved2;
+ u32 fqd_link:24;
+
+ u16 odp_seq:14;
+ u16 __reserved3:2;
+
+ u16 orp_nesn:14;
+ u16 __reserved4:2;
+
+ u16 orp_ea_hseq:15;
+ u16 __reserved5:1;
+
+ u16 orp_ea_tseq:15;
+ u16 __reserved6:1;
+
+ u8 __reserved7;
+ u32 orp_ea_hptr:24;
+
+ u8 __reserved8;
+ u32 orp_ea_tptr:24;
+
+ u8 __reserved9;
+ u32 pfdr_hptr:24;
+
+ u8 __reserved10;
+ u32 pfdr_tptr:24;
+
+ u8 __reserved11[5];
+ u8 is:1;
+ u8 __reserved12:7;
+ u16 ics_surp;
+ u32 byte_cnt;
+ u8 __reserved13;
+ u32 frm_cnt:24;
+ u32 __reserved14;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved15;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+#endif
+} __packed;
+
+struct qm_mcr_alterfq {
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[61];
+} __packed;
+struct qm_mcr_initcgr {
+ u8 __reserved1[62];
+} __packed;
+struct qm_mcr_cgrtestwrite {
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[3];
+ u32 __reserved3:24;
+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 __reserved4:24;
+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ u16 lgt; /* Last Group Tick */
+ u16 wr_prob_g;
+ u16 wr_prob_y;
+ u16 wr_prob_r;
+ u8 __reserved5[8];
+} __packed;
+struct qm_mcr_querycgr {
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[3];
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 __reserved3:24;
+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+#else
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 __reserved3:24;
+#endif
+ };
+ u64 i_bcnt;
+ };
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 __reserved4:24;
+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+#else
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+ u32 __reserved4:24;
+#endif
+ };
+ u64 a_bcnt;
+ };
+ union {
+ u32 cscn_targ_swp[4];
+ u8 __reserved5[16];
+ };
+} __packed;
+
+struct __qm_mcr_querycongestion {
+ u32 state[8];
+};
+
+struct qm_mcr_querycongestion {
+ u8 __reserved[30];
+ /* Access this struct using QM_MCR_QUERYCONGESTION() */
+ struct __qm_mcr_querycongestion state;
+} __packed;
+struct qm_mcr_querywq {
+ union {
+ u16 channel_wq; /* ignores wq (3 lsbits) */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 id:13; /* qm_channel */
+ u16 __reserved:3;
+#else
+ u16 __reserved:3;
+ u16 id:13; /* qm_channel */
+#endif
+ } __packed channel;
+ };
+ u8 __reserved[28];
+ u32 wq_len[8];
+} __packed;
+
+struct qm_mc_result {
+ u8 verb;
+ u8 result;
+ union {
+ struct qm_mcr_initfq initfq;
+ struct qm_mcr_queryfq queryfq;
+ struct qm_mcr_queryfq_np queryfq_np;
+ struct qm_mcr_alterfq alterfq;
+ struct qm_mcr_initcgr initcgr;
+ struct qm_mcr_cgrtestwrite cgrtestwrite;
+ struct qm_mcr_querycgr querycgr;
+ struct qm_mcr_querycongestion querycongestion;
+ struct qm_mcr_querywq querywq;
+ };
+} __packed;
+
+#define QM_MCR_VERB_RRID 0x80
+#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL 0x00
+#define QM_MCR_RESULT_OK 0xf0
+#define QM_MCR_RESULT_ERR_FQID 0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
+#define QM_MCR_RESULT_PENDING 0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
+#define QM_MCR_NP_STATE_FE 0x10
+#define QM_MCR_NP_STATE_R 0x08
+#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS 0x00
+#define QM_MCR_NP_STATE_RETIRED 0x01
+#define QM_MCR_NP_STATE_TEN_SCHED 0x02
+#define QM_MCR_NP_STATE_TRU_SCHED 0x03
+#define QM_MCR_NP_STATE_PARKED 0x04
+#define QM_MCR_NP_STATE_ACTIVE 0x05
+#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
+#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+/* This extracts the state for congestion group 'n' from a query response.
+ * Eg.
+ * u8 cgr = [...];
+ * struct qm_mc_result *res = [...];
+ * printf("congestion group %d congestion state: %d\n", cgr,
+ * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
+ */
+#define __CGR_WORD(num) (num >> 5)
+#define __CGR_SHIFT(num) (num & 0x1f)
+#define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
+static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
+ u8 cgr)
+{
+ return p->state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
+}
+
+ /* Portal and Frame Queues */
+/* Represents a managed portal */
+struct qman_portal;
+
+/*
+ * This object type represents QMan frame queue descriptors (FQD), it is
+ * cacheline-aligned, and initialised by qman_create_fq(). The structure is
+ * defined further down.
+ */
+struct qman_fq;
+
+/*
+ * This object type represents a QMan congestion group, it is defined further
+ * down.
+ */
+struct qman_cgr;
+
+/*
+ * This enum, and the callback type that returns it, are used when handling
+ * dequeued frames via DQRR. Note that for "null" callbacks registered with the
+ * portal object (for handling dequeues that do not demux because context_b is
+ * NULL), the return value *MUST* be qman_cb_dqrr_consume.
+ */
+enum qman_cb_dqrr_result {
+ /* DQRR entry can be consumed */
+ qman_cb_dqrr_consume,
+ /* Like _consume, but requests parking - FQ must be held-active */
+ qman_cb_dqrr_park,
+ /* Does not consume, for DCA mode only. This allows out-of-order
+ * consumes by explicit calls to qman_dca() and/or the use of implicit
+ * DCA via EQCR entries.
+ */
+ qman_cb_dqrr_defer,
+ /*
+ * Stop processing without consuming this ring entry. Exits the current
+ * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
+ * an interrupt handler, the callback would typically call
+ * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
+ * otherwise the interrupt will reassert immediately.
+ */
+ qman_cb_dqrr_stop,
+ /* Like qman_cb_dqrr_stop, but consumes the current entry. */
+ qman_cb_dqrr_consume_stop
+};
+
+typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr);
+
+typedef enum qman_cb_dqrr_result (*qman_dpdk_cb_dqrr)(void *event,
+ struct qman_portal *qm,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr,
+ void **bd);
+
+/* This callback type is used when handling buffers in dpdk pull mode */
+typedef void (*qman_dpdk_pull_cb_dqrr)(struct qman_fq **fq,
+ struct qm_dqrr_entry **dqrr,
+ void **bufs,
+ int num_bufs);
+
+typedef void (*qman_dpdk_cb_prepare)(struct qm_dqrr_entry *dq, void **bufs);
+
+/*
+ * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
+ * are always consumed after the callback returns.
+ */
+typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
+ const struct qm_mr_entry *msg);
+
+/* This callback type is used when handling DCP ERNs */
+typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
+ const struct qm_mr_entry *msg);
+/*
+ * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
+ * held-active + held-suspended are just "sched". Things like "retired" will not
+ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
+ * then, to indicate it's completing and to gate attempts to retry the retire
+ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
+ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
+ * index rather than the FQ that ring entry corresponds to), so repeated park
+ * commands are allowed (if you're silly enough to try) but won't change FQ
+ * state, and the resulting park notifications move FQs from "sched" to
+ * "parked".
+ */
+enum qman_fq_state {
+ qman_fq_state_oos,
+ qman_fq_state_parked,
+ qman_fq_state_sched,
+ qman_fq_state_retired
+};
+
+
+/*
+ * Frame queue objects (struct qman_fq) are stored within memory passed to
+ * qman_create_fq(), as this allows stashing of caller-provided demux callback
+ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
+ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
+ * they should;
+ *
+ * (a) extend the qman_fq structure with their state; eg.
+ *
+ * // myfq is allocated and driver_fq callbacks filled in;
+ * struct my_fq {
+ * struct qman_fq base;
+ * int an_extra_field;
+ * [ ... add other fields to be associated with each FQ ...]
+ * } *myfq = some_my_fq_allocator();
+ * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
+ *
+ * // in a dequeue callback, access extra fields from 'fq' via a cast;
+ * struct my_fq *myfq = (struct my_fq *)fq;
+ * do_something_with(myfq->an_extra_field);
+ * [...]
+ *
+ * (b) when and if configuring the FQ for context stashing, specify how ever
+ * many cachelines are required to stash 'struct my_fq', to accelerate not
+ * only the QMan driver but the callback as well.
+ */
+
+struct qman_fq_cb {
+ union { /* for dequeued frames */
+ qman_dpdk_cb_dqrr dqrr_dpdk_cb;
+ qman_dpdk_pull_cb_dqrr dqrr_dpdk_pull_cb;
+ qman_cb_dqrr dqrr;
+ };
+ qman_dpdk_cb_prepare dqrr_prepare;
+ qman_cb_mr ern; /* for s/w ERNs */
+ qman_cb_mr fqs; /* frame-queue state changes*/
+};
+
+struct qman_fq {
+ /* Caller of qman_create_fq() provides these demux callbacks */
+ struct qman_fq_cb cb;
+
+ u32 fqid_le;
+ u32 fqid;
+
+ int q_fd;
+ u16 ch_id;
+ u8 cgr_groupid;
+ u8 is_static:4;
+ u8 qp_initialized:4;
+
+ /* DPDK Interface */
+ void *dpaa_intf;
+
+ struct rte_event ev;
+ /* affined portal in case of static queue */
+ struct qman_portal *qp;
+ struct dpaa_bp_info *bp_array;
+
+ volatile unsigned long flags;
+
+ enum qman_fq_state state;
+ spinlock_t fqlock;
+
+ struct rb_node node;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ void **qman_fq_lookup_table;
+ u32 key;
+#endif
+};
+
+/*
+ * This callback type is used when handling congestion group entry/exit.
+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
+ */
+typedef void (*qman_cb_cgr)(struct qman_portal *qm,
+ struct qman_cgr *cgr, int congested);
+
+struct qman_cgr {
+ /* Set these prior to qman_create_cgr() */
+ u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
+ qman_cb_cgr cb;
+ /* These are private to the driver */
+ u16 chan; /* portal channel this object is created on */
+ struct list_head node;
+};
+
+/* Flags to qman_create_fq() */
+#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
+#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
+#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
+#define QMAN_FQ_FLAG_LOCKED 0x00000008 /* multi-core locking */
+#define QMAN_FQ_FLAG_AS_IS 0x00000010 /* query h/w state */
+#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
+
+/* Flags to qman_destroy_fq() */
+#define QMAN_FQ_DESTROY_PARKED 0x00000001 /* FQ can be parked or OOS */
+
+/* Flags from qman_fq_state() */
+#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
+#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
+#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
+#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
+#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
+#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
+
+/* Flags to qman_init_fq() */
+#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
+#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
+
+/* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
+ * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
+ * any change here should be audited in PME.)
+ */
+#define QMAN_ENQUEUE_FLAG_WATCH_CGR 0x00080000 /* watch congestion state */
+#define QMAN_ENQUEUE_FLAG_DCA 0x00008000 /* perform enqueue-DCA */
+#define QMAN_ENQUEUE_FLAG_DCA_PARK 0x00004000 /* If DCA, requests park */
+#define QMAN_ENQUEUE_FLAG_DCA_PTR(p) /* If DCA, p is DQRR entry */ \
+ (((u32)(p) << 2) & 0x00000f00)
+#define QMAN_ENQUEUE_FLAG_C_GREEN 0x00000000 /* choose one C_*** flag */
+#define QMAN_ENQUEUE_FLAG_C_YELLOW 0x00000008
+#define QMAN_ENQUEUE_FLAG_C_RED 0x00000010
+#define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
+/* For the ORP-specific qman_enqueue_orp() variant;
+ * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
+ * of a frame.
+ */
+#define QMAN_ENQUEUE_FLAG_NLIS 0x01000000
+/* - this flag performs no enqueue but fills in an ORP sequence number that
+ * would otherwise block it (eg. if a frame has been dropped).
+ */
+#define QMAN_ENQUEUE_FLAG_HOLE 0x02000000
+/* - this flag performs no enqueue but advances NESN to the given sequence
+ * number.
+ */
+#define QMAN_ENQUEUE_FLAG_NESN 0x04000000
+
+/* Flags to qman_modify_cgr() */
+#define QMAN_CGR_FLAG_USE_INIT 0x00000001
+#define QMAN_CGR_MODE_FRAME 0x00000001
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+__rte_internal
+void qman_set_fq_lookup_table(void **table);
+#endif
+
+/**
+ * qman_get_portal_index - get portal configuration index
+ */
+int qman_get_portal_index(void);
+
+__rte_internal
+u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
+ void **bufs);
+
+/**
+ * qman_irqsource_add - add processing sources to be interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Adds processing sources that should be interrupt-driven (rather than
+ * processed via qman_poll_***() functions). Returns zero for success, or
+ * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ */
+__rte_internal
+int qman_irqsource_add(u32 bits);
+
+/**
+ * qman_fq_portal_irqsource_add - samilar to qman_irqsource_add, but it
+ * takes portal (fq specific) as input rather than using the thread affined
+ * portal.
+ */
+__rte_internal
+int qman_fq_portal_irqsource_add(struct qman_portal *p, u32 bits);
+
+/**
+ * qman_irqsource_remove - remove processing sources from being interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Removes processing sources from being interrupt-driven, so that they will
+ * instead be processed via qman_poll_***() functions. Returns zero for success,
+ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ */
+__rte_internal
+int qman_irqsource_remove(u32 bits);
+
+/**
+ * qman_fq_portal_irqsource_remove - similar to qman_irqsource_remove, but it
+ * takes portal (fq specific) as input rather than using the thread affined
+ * portal.
+ */
+__rte_internal
+int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits);
+
+/**
+ * qman_affine_channel - return the channel ID of an portal
+ * @cpu: the cpu whose affine portal is the subject of the query
+ *
+ * If @cpu is -1, the affine portal for the current CPU will be used. It is a
+ * bug to call this function for any value of @cpu (other than -1) that is not a
+ * member of the cpu mask.
+ */
+u16 qman_affine_channel(int cpu);
+
+__rte_internal
+unsigned int qman_portal_poll_rx(unsigned int poll_limit,
+ void **bufs, struct qman_portal *q);
+
+/**
+ * qman_set_vdq - Issue a volatile dequeue command
+ * @fq: Frame Queue on which the volatile dequeue command is issued
+ * @num: Number of Frames requested for volatile dequeue
+ * @vdqcr_flags: QM_VDQCR_EXACT flag to for VDQCR command
+ *
+ * This function will issue a volatile dequeue command to the QMAN.
+ */
+__rte_internal
+int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags);
+
+/**
+ * qman_dequeue - Get the DQRR entry after volatile dequeue command
+ * @fq: Frame Queue on which the volatile dequeue command is issued
+ *
+ * This function will return the DQRR entry after a volatile dequeue command
+ * is issued. It will keep returning NULL until there is no packet available on
+ * the DQRR.
+ */
+__rte_internal
+struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
+
+/**
+ * qman_dqrr_consume - Consume the DQRR entriy after volatile dequeue
+ * @fq: Frame Queue on which the volatile dequeue command is issued
+ * @dq: DQRR entry to consume. This is the one which is provided by the
+ * 'qbman_dequeue' command.
+ *
+ * This will consume the DQRR enrey and make it available for next volatile
+ * dequeue.
+ */
+__rte_internal
+void qman_dqrr_consume(struct qman_fq *fq,
+ struct qm_dqrr_entry *dq);
+
+/**
+ * qman_poll_dqrr - process DQRR (fast-path) entries
+ * @limit: the maximum number of DQRR entries to process
+ *
+ * Use of this function requires that DQRR processing not be interrupt-driven.
+ * Ie. the value returned by qman_irqsource_get() should not include
+ * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
+ * this function will return -EINVAL, otherwise the return value is >=0 and
+ * represents the number of DQRR entries processed.
+ */
+__rte_internal
+int qman_poll_dqrr(unsigned int limit);
+
+/**
+ * qman_poll
+ *
+ * Dispatcher logic on a cpu can use this to trigger any maintenance of the
+ * affine portal. There are two classes of portal processing in question;
+ * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
+ * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
+ * thresholds, congestion state changes, etc). This function does whatever
+ * processing is not triggered by interrupts.
+ *
+ * Note, if DQRR and some slow-path processing are poll-driven (rather than
+ * interrupt-driven) then this function uses a heuristic to determine how often
+ * to run slow-path processing - as slow-path processing introduces at least a
+ * minimum latency each time it is run, whereas fast-path (DQRR) processing is
+ * close to zero-cost if there is no work to be done.
+ */
+void qman_poll(void);
+
+/**
+ * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
+ *
+ * Disables DQRR processing of the portal. This is reference-counted, so
+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
+ * truly re-enable dequeuing.
+ */
+void qman_stop_dequeues(void);
+
+/**
+ * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
+ *
+ * Enables DQRR processing of the portal. This is reference-counted, so
+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
+ * truly re-enable dequeuing.
+ */
+void qman_start_dequeues(void);
+
+/**
+ * qman_static_dequeue_add - Add pool channels to the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Adds a set of pool channels to the portal's static dequeue command register
+ * (SDQCR). The requested pools are limited to those the portal has dequeue
+ * access to.
+ */
+__rte_internal
+void qman_static_dequeue_add(u32 pools, struct qman_portal *qm);
+
+/**
+ * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Removes a set of pool channels from the portal's static dequeue command
+ * register (SDQCR). The requested pools are limited to those the portal has
+ * dequeue access to.
+ */
+void qman_static_dequeue_del(u32 pools, struct qman_portal *qp);
+
+/**
+ * qman_static_dequeue_get - return the portal's current SDQCR
+ *
+ * Returns the portal's current static dequeue command register (SDQCR). The
+ * entire register is returned, so if only the currently-enabled pool channels
+ * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
+ */
+u32 qman_static_dequeue_get(struct qman_portal *qp);
+
+/**
+ * qman_dca - Perform a Discrete Consumption Acknowledgment
+ * @dq: the DQRR entry to be consumed
+ * @park_request: indicates whether the held-active @fq should be parked
+ *
+ * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
+ * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
+ * does not take a 'portal' argument but implies the core affine portal from the
+ * cpu that is currently executing the function. For reasons of locking, this
+ * function must be called from the same CPU as that which processed the DQRR
+ * entry in the first place.
+ */
+void qman_dca(const struct qm_dqrr_entry *dq, int park_request);
+
+/**
+ * qman_dca_index - Perform a Discrete Consumption Acknowledgment
+ * @index: the DQRR index to be consumed
+ * @park_request: indicates whether the held-active @fq should be parked
+ *
+ * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
+ * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
+ * does not take a 'portal' argument but implies the core affine portal from the
+ * cpu that is currently executing the function. For reasons of locking, this
+ * function must be called from the same CPU as that which processed the DQRR
+ * entry in the first place.
+ */
+__rte_internal
+void qman_dca_index(u8 index, int park_request);
+
+/**
+ * qman_eqcr_is_empty - Determine if portal's EQCR is empty
+ *
+ * For use in situations where a cpu-affine caller needs to determine when all
+ * enqueues for the local portal have been processed by Qman but can't use the
+ * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
+ * The function forces tracking of EQCR consumption (which normally doesn't
+ * happen until enqueue processing needs to find space to put new enqueue
+ * commands), and returns zero if the ring still has unprocessed entries,
+ * non-zero if it is empty.
+ */
+int qman_eqcr_is_empty(void);
+
+/**
+ * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
+ * @handler: callback for processing DCP ERNs
+ * @affine: whether this handler is specific to the locally affine portal
+ *
+ * If a hardware block's interface to Qman (ie. its direct-connect portal, or
+ * DCP) is configured not to receive enqueue rejections, then any enqueues
+ * through that DCP that are rejected will be sent to a given software portal.
+ * If @affine is non-zero, then this handler will only be used for DCP ERNs
+ * received on the portal affine to the current CPU. If multiple CPUs share a
+ * portal and they all call this function, they will be setting the handler for
+ * the same portal! If @affine is zero, then this handler will be global to all
+ * portals handled by this instance of the driver. Only those portals that do
+ * not have their own affine handler will use the global handler.
+ */
+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
+
+ /* FQ management */
+ /* ------------- */
+/**
+ * qman_create_fq - Allocates a FQ
+ * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
+ * @flags: bit-mask of QMAN_FQ_FLAG_*** options
+ * @fq: memory for storing the 'fq', with callbacks filled in
+ *
+ * Creates a frame queue object for the given @fqid, unless the
+ * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
+ * dynamically allocated (or the function fails if none are available). Once
+ * created, the caller should not touch the memory at 'fq' except as extended to
+ * adjacent memory for user-defined fields (see the definition of "struct
+ * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
+ * pre-existing frame-queues that aren't to be otherwise interfered with, it
+ * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
+ * causes the driver to honour any contextB modifications requested in the
+ * qm_init_fq() API, as this indicates the frame queue will be consumed by a
+ * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
+ * software portals, the contextB field is controlled by the driver and can't be
+ * modified by the caller. If the AS_IS flag is specified, management commands
+ * will be used on portal @p to query state for frame queue @fqid and construct
+ * a frame queue object based on that, rather than assuming/requiring that it be
+ * Out of Service.
+ */
+__rte_internal
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
+
+/**
+ * qman_destroy_fq - Deallocates a FQ
+ * @fq: the frame queue object to release
+ * @flags: bit-mask of QMAN_FQ_FREE_*** options
+ *
+ * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
+ * not deallocated but the caller regains ownership, to do with as desired. The
+ * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
+ * is specified, in which case it may also be in the 'parked' state.
+ */
+void qman_destroy_fq(struct qman_fq *fq, u32 flags);
+
+/**
+ * qman_fq_fqid - Queries the frame queue ID of a FQ object
+ * @fq: the frame queue object to query
+ */
+__rte_internal
+u32 qman_fq_fqid(struct qman_fq *fq);
+
+/**
+ * qman_fq_state - Queries the state of a FQ object
+ * @fq: the frame queue object to query
+ * @state: pointer to state enum to return the FQ scheduling state
+ * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
+ *
+ * Queries the state of the FQ object, without performing any h/w commands.
+ * This captures the state, as seen by the driver, at the time the function
+ * executes.
+ */
+__rte_internal
+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
+
+/**
+ * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
+ * @fq: the frame queue object to modify, must be 'parked' or new.
+ * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
+ * @opts: the FQ-modification settings, as defined in the low-level API
+ *
+ * The @opts parameter comes from the low-level portal API. Select
+ * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
+ * rather than parked. NB, @opts can be NULL.
+ *
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver;
+ * 1. the 'count' and 'fqid' fields are always ignored (this operation only
+ * affects one frame queue: @fq).
+ * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
+ * 'fqd' structure's 'context_b' field are sometimes overwritten;
+ * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
+ * initialised to a value used by the driver for demux.
+ * - if context_b is initialised for demux, so is context_a in case stashing
+ * is requested (see item 4).
+ * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
+ * objects.)
+ * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
+ * 'dest::channel' field will be overwritten to match the portal used to issue
+ * the command. If the WE_DESTWQ write-enable bit had already been set by the
+ * caller, the channel workqueue will be left as-is, otherwise the write-enable
+ * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
+ * isn't set, the destination channel/workqueue fields and the write-enable bit
+ * are left as-is.
+ * 4. if the driver overwrites context_a/b for demux, then if
+ * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
+ * context_a.address fields and will leave the stashing fields provided by the
+ * user alone, otherwise it will zero out the context_a.stashing fields.
+ */
+__rte_internal
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
+
+/**
+ * qman_schedule_fq - Schedules a FQ
+ * @fq: the frame queue object to schedule, must be 'parked'
+ *
+ * Schedules the frame queue, which must be Parked, which takes it to
+ * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
+ */
+int qman_schedule_fq(struct qman_fq *fq);
+
+/**
+ * qman_retire_fq - Retires a FQ
+ * @fq: the frame queue object to retire
+ * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
+ *
+ * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
+ * the retirement was started asynchronously, otherwise it returns negative for
+ * failure. When this function returns zero, @flags is set to indicate whether
+ * the retired FQ is empty and/or whether it has any ORL fragments (to show up
+ * as ERNs). Otherwise the corresponding flags will be known when a subsequent
+ * FQRN message shows up on the portal's message ring.
+ *
+ * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
+ * Active state), the completion will be via the message ring as a FQRN - but
+ * the corresponding callback may occur before this function returns!! Ie. the
+ * caller should be prepared to accept the callback as the function is called,
+ * not only once it has returned.
+ */
+__rte_internal
+int qman_retire_fq(struct qman_fq *fq, u32 *flags);
+
+/**
+ * qman_oos_fq - Puts a FQ "out of service"
+ * @fq: the frame queue object to be put out-of-service, must be 'retired'
+ *
+ * The frame queue must be retired and empty, and if any order restoration list
+ * was released as ERNs at the time of retirement, they must all be consumed.
+ */
+__rte_internal
+int qman_oos_fq(struct qman_fq *fq);
+
+/**
+ * qman_fq_flow_control - Set the XON/XOFF state of a FQ
+ * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
+ * or 'retired' or 'parked' state
+ * @xon: boolean to set fq in XON or XOFF state
+ *
+ * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
+ * otherwise the IFSI interrupt will be asserted.
+ */
+int qman_fq_flow_control(struct qman_fq *fq, int xon);
+
+/**
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+/**
+ * qman_query_fq_has_pkts - Queries non-programmable FQD fields and returns '1'
+ * if packets are in the frame queue. If there are no packets on frame
+ * queue '0' is returned.
+ * @fq: the frame queue object to be queried
+ */
+int qman_query_fq_has_pkts(struct qman_fq *fq);
+
+/**
+ * qman_query_fq_np - Queries non-programmable FQD fields
+ * @fq: the frame queue object to be queried
+ * @np: storage for the queried FQD fields
+ */
+__rte_internal
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
+
+/**
+ * qman_query_fq_frmcnt - Queries fq frame count
+ * @fq: the frame queue object to be queried
+ * @frm_cnt: number of frames in the queue
+ */
+__rte_internal
+int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt);
+
+/**
+ * qman_query_wq - Queries work queue lengths
+ * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
+ * to this software portal. Otherwise, query length of WQs in a
+ * channel specified in wq.
+ * @wq: storage for the queried WQs lengths. Also specified the channel to
+ * to query if query_dedicated is zero.
+ */
+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
+
+/**
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
+ * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
+ * "flags" retrieved from qman_fq_state().
+ */
+__rte_internal
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
+
+/**
+ * qman_enqueue - Enqueue a frame to a frame queue
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ *
+ * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
+ * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
+ * field is ignored. The return value is non-zero on error, such as ring full
+ * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
+ * specified), etc. If the ring is full and FLAG_WAIT is specified, this
+ * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
+ * interrupt will assert when Qman consumes the EQCR entry (subject to "status
+ * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will
+ * perform an implied "discrete consumption acknowledgment" on the dequeue
+ * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
+ * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
+ * this implicit DCA can delay the release of a "held active" frame queue
+ * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing
+ * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
+ * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
+ * acknowledgment should "park request" the "held active" frame queue. Ie.
+ * when the portal eventually releases that frame queue, it will be left in the
+ * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
+ * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
+ * is requested, and the FQ is a member of a congestion group, then this
+ * function returns -EAGAIN if the congestion group is currently congested.
+ * Note, this does not eliminate ERNs, as the async interface means we can be
+ * sending enqueue commands to an un-congested FQ that becomes congested before
+ * the enqueue commands are processed, but it does minimise needless thrashing
+ * of an already busy hardware resource by throttling many of the to-be-dropped
+ * enqueues "at the source".
+ */
+__rte_internal
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
+
+__rte_internal
+int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,
+ int frames_to_send);
+
+/**
+ * qman_enqueue_multi_fq - Enqueue multiple frames to their respective frame
+ * queues.
+ * @fq[]: Array of frame queue objects to enqueue to
+ * @fd: pointer to first descriptor of frame to be enqueued
+ * @frames_to_send: number of frames to be sent.
+ *
+ * This API is similar to qman_enqueue_multi(), but it takes fd which needs
+ * to be processed by different frame queues.
+ */
+__rte_internal
+int
+qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
+ u32 *flags, int frames_to_send);
+
+typedef int (*qman_cb_precommit) (void *arg);
+
+/**
+ * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ * @orp: the frame queue object used as an order restoration point.
+ * @orp_seqnum: the sequence number of this frame in the order restoration path
+ *
+ * Similar to qman_enqueue(), but with the addition of an Order Restoration
+ * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
+ * enqueue operation to employ order restoration. Each frame queue object acts
+ * as an Order Definition Point (ODP) by providing each frame dequeued from it
+ * with an incrementing sequence number, this value is generally ignored unless
+ * that sequence of dequeued frames will need order restoration later. Each
+ * frame queue object also encapsulates an Order Restoration Point (ORP), which
+ * is a re-assembly context for re-ordering frames relative to their sequence
+ * numbers as they are enqueued. The ORP does not have to be within the frame
+ * queue that receives the enqueued frame, in fact it is usually the frame
+ * queue from which the frames were originally dequeued. For the purposes of
+ * order restoration, multiple frames (or "fragments") can be enqueued for a
+ * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
+ * enqueues except the final fragment of a given sequence number. Ordering
+ * between sequence numbers is guaranteed, even if fragments of different
+ * sequence numbers are interlaced with one another. Fragments of the same
+ * sequence number will retain the order in which they are enqueued. If no
+ * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
+ * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
+ * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
+ * sequence number should become the ORP's "Next Expected Sequence Number".
+ *
+ * Side note: a frame queue object can be used purely as an ORP, without
+ * carrying any frames at all. Care should be taken not to deallocate a frame
+ * queue object that is being actively used as an ORP, as a future allocation
+ * of the frame queue object may start using the internal ORP before the
+ * previous use has finished.
+ */
+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
+ struct qman_fq *orp, u16 orp_seqnum);
+
+/**
+ * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
+ * @result: is set by the API to the base FQID of the allocated range
+ * @count: the number of FQIDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count FQIDs
+ *
+ * Returns the number of frame queues allocated, or a negative error code. If
+ * @partial is non zero, the allocation request may return a smaller range of
+ * FQs than requested (though alignment will be as requested). If @partial is
+ * zero, the return value will either be 'count' or negative.
+ */
+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_fqid(u32 *result)
+{
+ int ret = qman_alloc_fqid_range(result, 1, 0, 0);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_fqid_range - Release the specified range of frame queue IDs
+ * @fqid: the base FQID of the range to deallocate
+ * @count: the number of FQIDs in the range
+ *
+ * This function can also be used to seed the allocator with ranges of FQIDs
+ * that it can subsequently allocate from.
+ */
+void qman_release_fqid_range(u32 fqid, unsigned int count);
+static inline void qman_release_fqid(u32 fqid)
+{
+ qman_release_fqid_range(fqid, 1);
+}
+
+void qman_seed_fqid_range(u32 fqid, unsigned int count);
+
+int qman_shutdown_fq(u32 fqid);
+
+/**
+ * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
+ * @fqid: the base FQID of the range to deallocate
+ * @count: the number of FQIDs in the range
+ */
+__rte_internal
+int qman_reserve_fqid_range(u32 fqid, unsigned int count);
+static inline int qman_reserve_fqid(u32 fqid)
+{
+ return qman_reserve_fqid_range(fqid, 1);
+}
+
+/* Pool-channel management */
+/**
+ * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
+ * @result: is set by the API to the base pool-channel ID of the allocated range
+ * @count: the number of pool-channel IDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count
+ *
+ * Returns the number of pool-channel IDs allocated, or a negative error code.
+ * If @partial is non zero, the allocation request may return a smaller range of
+ * than requested (though alignment will be as requested). If @partial is zero,
+ * the return value will either be 'count' or negative.
+ */
+__rte_internal
+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_pool(u32 *result)
+{
+ int ret = qman_alloc_pool_range(result, 1, 0, 0);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_pool_range - Release the specified range of pool-channel IDs
+ * @id: the base pool-channel ID of the range to deallocate
+ * @count: the number of pool-channel IDs in the range
+ */
+void qman_release_pool_range(u32 id, unsigned int count);
+static inline void qman_release_pool(u32 id)
+{
+ qman_release_pool_range(id, 1);
+}
+
+/**
+ * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
+ * @id: the base pool-channel ID of the range to reserve
+ * @count: the number of pool-channel IDs in the range
+ */
+int qman_reserve_pool_range(u32 id, unsigned int count);
+static inline int qman_reserve_pool(u32 id)
+{
+ return qman_reserve_pool_range(id, 1);
+}
+
+void qman_seed_pool_range(u32 id, unsigned int count);
+
+ /* CGR management */
+ /* -------------- */
+/**
+ * qman_create_cgr - Register a congestion group object
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: optional state of CGR settings
+ *
+ * Registers this object to receiving congestion entry/exit callbacks on the
+ * portal affine to the cpu portal on which this API is executed. If opts is
+ * NULL then only the callback (cgr->cb) function is registered. If @flags
+ * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
+ * any unspecified parameters) will be used rather than a modify hw hardware
+ * (which only modifies the specified parameters).
+ */
+__rte_internal
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @dcp_portal: the DCP portal to which the cgr object is registered.
+ * @opts: optional state of CGR settings
+ *
+ */
+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
+ struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_delete_cgr - Deregisters a congestion group object
+ * @cgr: the 'cgr' object to deregister
+ *
+ * "Unplugs" this CGR object from the portal affine to the cpu on which this API
+ * is executed. This must be excuted on the same affine portal on which it was
+ * created.
+ */
+__rte_internal
+int qman_delete_cgr(struct qman_cgr *cgr);
+
+/**
+ * qman_modify_cgr - Modify CGR fields
+ * @cgr: the 'cgr' object to modify
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: the CGR-modification settings
+ *
+ * The @opts parameter comes from the low-level portal API, and can be NULL.
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver, in particular the 'cgrid' field is ignored (this operation
+ * only affects the given CGR object). If @flags contains
+ * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
+ * unspecified parameters) will be used rather than a modify hw hardware (which
+ * only modifies the specified parameters).
+ */
+__rte_internal
+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_query_cgr - Queries CGR fields
+ * @cgr: the 'cgr' object to query
+ * @result: storage for the queried congestion group record
+ */
+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
+
+/**
+ * qman_query_congestion - Queries the state of all congestion groups
+ * @congestion: storage for the queried state of all congestion groups
+ */
+int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
+
+/**
+ * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
+ * @result: is set by the API to the base CGR ID of the allocated range
+ * @count: the number of CGR IDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count
+ *
+ * Returns the number of CGR IDs allocated, or a negative error code.
+ * If @partial is non zero, the allocation request may return a smaller range of
+ * than requested (though alignment will be as requested). If @partial is zero,
+ * the return value will either be 'count' or negative.
+ */
+__rte_internal
+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_cgrid(u32 *result)
+{
+ int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_cgrid_range - Release the specified range of CGR IDs
+ * @id: the base CGR ID of the range to deallocate
+ * @count: the number of CGR IDs in the range
+ */
+__rte_internal
+void qman_release_cgrid_range(u32 id, unsigned int count);
+static inline void qman_release_cgrid(u32 id)
+{
+ qman_release_cgrid_range(id, 1);
+}
+
+/**
+ * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
+ * @id: the base CGR ID of the range to reserve
+ * @count: the number of CGR IDs in the range
+ */
+int qman_reserve_cgrid_range(u32 id, unsigned int count);
+static inline int qman_reserve_cgrid(u32 id)
+{
+ return qman_reserve_cgrid_range(id, 1);
+}
+
+void qman_seed_cgrid_range(u32 id, unsigned int count);
+
+ /* Helpers */
+ /* ------- */
+/**
+ * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
+ * @fqid: the FQID that will be initialised by other s/w
+ *
+ * In many situations, a FQID is provided for communication between s/w
+ * entities, and whilst the consumer is responsible for initialising and
+ * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
+ * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
+ * qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
+ * However, data can not be enqueued to the FQ until it is initialised out of
+ * the OOS state - this function polls for that condition. It is particularly
+ * useful for users of IPC functions - each endpoint's Rx FQ is the other
+ * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
+ * and then use this API on the (NO_MODIFY) Tx FQ object in order to
+ * synchronise. The function returns zero for success, +1 if the FQ is still in
+ * the OOS state, or negative if there was an error.
+ */
+static inline int qman_poll_fq_for_init(struct qman_fq *fq)
+{
+ struct qm_mcr_queryfq_np np;
+ int err;
+
+ err = qman_query_fq_np(fq, &np);
+ if (err)
+ return err;
+ if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
+ return 1;
+ return 0;
+}
+
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define cpu_to_hw_sg(x)
+#define hw_sg_to_cpu(x)
+#else
+#define cpu_to_hw_sg(x) __cpu_to_hw_sg(x)
+#define hw_sg_to_cpu(x) __hw_sg_to_cpu(x)
+
+static inline void __cpu_to_hw_sg(struct qm_sg_entry *sgentry)
+{
+ sgentry->opaque = cpu_to_be64(sgentry->opaque);
+ sgentry->val = cpu_to_be32(sgentry->val);
+ sgentry->val_off = cpu_to_be16(sgentry->val_off);
+}
+
+static inline void __hw_sg_to_cpu(struct qm_sg_entry *sgentry)
+{
+ sgentry->opaque = be64_to_cpu(sgentry->opaque);
+ sgentry->val = be32_to_cpu(sgentry->val);
+ sgentry->val_off = be16_to_cpu(sgentry->val_off);
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FSL_QMAN_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_usd.h b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_usd.h
new file mode 100644
index 000000000..dcf35e4ad
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/fsl_usd.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ * Copyright 2019 NXP
+ *
+ */
+
+#ifndef __FSL_USD_H
+#define __FSL_USD_H
+
+#include <compat.h>
+#include <dpaa_list.h>
+#include <fsl_qman.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Thread-entry/exit hooks; */
+int qman_thread_init(void);
+int bman_thread_init(void);
+int qman_thread_finish(void);
+int bman_thread_finish(void);
+
+#define QBMAN_ANY_PORTAL_IDX 0xffffffff
+
+/* Obtain and free raw (unitialized) portals */
+
+struct dpaa_raw_portal {
+ /* inputs */
+
+ /* set to non zero to turn on stashing */
+ uint8_t enable_stash;
+ /* Stashing attributes for the portal */
+ uint32_t cpu;
+ uint32_t cache;
+ uint32_t window;
+
+ /* Specifies the stash request queue this portal should use */
+ uint8_t sdest;
+
+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ * for don't care. The portal index will be populated by the
+ * driver when the ioctl() successfully completes.
+ */
+ uint32_t index;
+
+ /* outputs */
+ uint64_t cinh;
+ uint64_t cena;
+};
+
+int qman_allocate_raw_portal(struct dpaa_raw_portal *portal);
+int qman_free_raw_portal(struct dpaa_raw_portal *portal);
+
+int bman_allocate_raw_portal(struct dpaa_raw_portal *portal);
+int bman_free_raw_portal(struct dpaa_raw_portal *portal);
+
+/* Obtain thread-local UIO file-descriptors */
+__rte_internal
+int qman_thread_fd(void);
+int bman_thread_fd(void);
+
+/* Post-process interrupts. NB, the kernel IRQ handler disables the interrupt
+ * line before notifying us, and this post-processing re-enables it once
+ * processing is complete. As such, it is essential to call this before going
+ * into another blocking read/select/poll.
+ */
+__rte_internal
+void qman_thread_irq(void);
+
+__rte_internal
+void bman_thread_irq(void);
+__rte_internal
+void qman_fq_portal_thread_irq(struct qman_portal *qp);
+__rte_internal
+void qman_clear_irq(void);
+
+/* Global setup */
+int qman_global_init(void);
+int bman_global_init(void);
+
+/* Direct portal create and destroy */
+__rte_internal
+struct qman_portal *fsl_qman_fq_portal_create(int *fd);
+int fsl_qman_fq_portal_destroy(struct qman_portal *qp);
+int fsl_qman_fq_portal_init(struct qman_portal *qp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FSL_USD_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/netcfg.h b/src/spdk/dpdk/drivers/bus/dpaa/include/netcfg.h
new file mode 100644
index 000000000..d7d1befd2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/netcfg.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2012 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ */
+
+#ifndef __NETCFG_H
+#define __NETCFG_H
+
+#include <fman.h>
+#include <argp.h>
+
+/* Configuration information related to a specific ethernet port */
+struct fm_eth_port_cfg {
+ /**< A list of PCD FQ ranges, obtained from FMC configuration */
+ struct list_head *list;
+ /**< The "Rx default" FQID, obtained from FMC configuration */
+ uint32_t rx_def;
+ /**< Other interface details are in the fman driver interface */
+ struct fman_if *fman_if;
+};
+
+struct netcfg_info {
+ uint8_t num_ethports;
+ /**< Number of ports */
+ struct fm_eth_port_cfg port_cfg[0];
+ /**< Variable structure array of size num_ethports */
+};
+
+struct interface_info {
+ char *name;
+ struct rte_ether_addr mac_addr;
+ struct rte_ether_addr peer_mac;
+ int mac_present;
+ int fman_enabled_mac_interface;
+};
+
+struct netcfg_interface {
+ uint8_t numof_netcfg_interface;
+ uint8_t numof_fman_enabled_macless;
+ struct interface_info interface_info[0];
+};
+
+/* pcd_file: FMC netpcd XML ("policy") file, that contains PCD information.
+ * cfg_file: FMC config XML file
+ * Returns the configuration information in newly allocated memory.
+ */
+__rte_internal
+struct netcfg_info *netcfg_acquire(void);
+
+/* cfg_ptr: configuration information pointer.
+ * Frees the resources allocated by the configuration layer.
+ */
+__rte_internal
+void netcfg_release(struct netcfg_info *cfg_ptr);
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+/* cfg_ptr: configuration information pointer.
+ * This function dumps configuration data to stdout.
+ */
+void dump_netcfg(struct netcfg_info *cfg_ptr);
+#endif
+
+#endif /* __NETCFG_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/include/process.h b/src/spdk/dpdk/drivers/bus/dpaa/include/process.h
new file mode 100644
index 000000000..d9ec94ee2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/include/process.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ */
+
+#ifndef __PROCESS_H
+#define __PROCESS_H
+
+#include <compat.h>
+
+/* The process device underlies process-wide user/kernel interactions, such as
+ * mapping dma_mem memory and providing accompanying ioctl()s. (This isn't used
+ * for portals, which use one UIO device each.).
+ */
+#define PROCESS_PATH "/dev/fsl-usdpaa"
+
+/* Allocation of resource IDs uses a generic interface. This enum is used to
+ * distinguish between the type of underlying object being manipulated.
+ */
+enum dpaa_id_type {
+ dpaa_id_fqid,
+ dpaa_id_bpid,
+ dpaa_id_qpool,
+ dpaa_id_cgrid,
+ dpaa_id_max /* <-- not a valid type, represents the number of types */
+};
+
+int process_alloc(enum dpaa_id_type id_type, uint32_t *base, uint32_t num,
+ uint32_t align, int partial);
+void process_release(enum dpaa_id_type id_type, uint32_t base, uint32_t num);
+
+int process_reserve(enum dpaa_id_type id_type, uint32_t base, uint32_t num);
+
+/* Mapping and using QMan/BMan portals */
+enum dpaa_portal_type {
+ dpaa_portal_qman,
+ dpaa_portal_bman,
+};
+
+struct dpaa_portal_map {
+ void *cinh;
+ void *cena;
+};
+
+struct dpaa_ioctl_portal_map {
+ /* Input parameter, is a qman or bman portal required. */
+ enum dpaa_portal_type type;
+ /* Specifes a specific portal index to map or 0xffffffff
+ * for don't care.
+ */
+ uint32_t index;
+
+ /* Return value if the map succeeds, this gives the mapped
+ * cache-inhibited (cinh) and cache-enabled (cena) addresses.
+ */
+ struct dpaa_portal_map addr;
+
+ /* Qman-specific return values */
+ u16 channel;
+ uint32_t pools;
+};
+
+int process_portal_map(struct dpaa_ioctl_portal_map *params);
+int process_portal_unmap(struct dpaa_portal_map *map);
+
+struct dpaa_ioctl_irq_map {
+ enum dpaa_portal_type type; /* Type of portal to map */
+ int fd; /* File descriptor that contains the portal */
+ void *portal_cinh; /* Cache inhibited area to identify the portal */
+};
+
+int process_portal_irq_map(int fd, struct dpaa_ioctl_irq_map *irq);
+int process_portal_irq_unmap(int fd);
+
+#endif /* __PROCESS_H */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/meson.build b/src/spdk/dpdk/drivers/bus/dpaa/meson.build
new file mode 100644
index 000000000..c2cffa494
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/meson.build
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if not is_linux
+ build = false
+ reason = 'only supported on linux'
+endif
+
+deps += ['common_dpaax', 'eventdev']
+sources = files('base/fman/fman.c',
+ 'base/fman/fman_hw.c',
+ 'base/fman/netcfg_layer.c',
+ 'base/qbman/bman.c',
+ 'base/qbman/bman_driver.c',
+ 'base/qbman/dpaa_alloc.c',
+ 'base/qbman/dpaa_sys.c',
+ 'base/qbman/process.c',
+ 'base/qbman/qman.c',
+ 'base/qbman/qman_driver.c',
+ 'dpaa_bus.c')
+
+if cc.has_argument('-Wno-cast-qual')
+ cflags += '-Wno-cast-qual'
+endif
+if cc.has_argument('-Wno-pointer-arith')
+ cflags += '-Wno-pointer-arith'
+endif
+
+includes += include_directories('include', 'base/qbman')
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/rte_bus_dpaa_version.map b/src/spdk/dpdk/drivers/bus/dpaa/rte_bus_dpaa_version.map
new file mode 100644
index 000000000..46d42f7d6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/rte_bus_dpaa_version.map
@@ -0,0 +1,93 @@
+DPDK_20.0 {
+ local: *;
+};
+
+INTERNAL {
+ global:
+
+ bman_acquire;
+ bman_free_pool;
+ bman_get_params;
+ bman_new_pool;
+ bman_query_free_buffers;
+ bman_release;
+ bman_thread_irq;
+ dpaa_get_eth_port_cfg;
+ dpaa_get_qm_channel_caam;
+ dpaa_get_qm_channel_pool;
+ dpaa_svr_family;
+ fman_dealloc_bufs_mask_hi;
+ fman_dealloc_bufs_mask_lo;
+ fman_if_add_mac_addr;
+ fman_if_clear_mac_addr;
+ fman_if_disable_rx;
+ fman_if_discard_rx_errors;
+ fman_if_enable_rx;
+ fman_if_get_fc_quanta;
+ fman_if_get_fc_threshold;
+ fman_if_get_fdoff;
+ fman_if_get_sg_enable;
+ fman_if_loopback_disable;
+ fman_if_loopback_enable;
+ fman_if_promiscuous_disable;
+ fman_if_promiscuous_enable;
+ fman_if_reset_mcast_filter_table;
+ fman_if_set_bp;
+ fman_if_set_fc_quanta;
+ fman_if_set_fc_threshold;
+ fman_if_set_fdoff;
+ fman_if_set_ic_params;
+ fman_if_set_maxfrm;
+ fman_if_set_mcast_filter_table;
+ fman_if_set_sg;
+ fman_if_stats_get;
+ fman_if_stats_get_all;
+ fman_if_stats_reset;
+ fman_ip_rev;
+ fsl_qman_fq_portal_create;
+ netcfg_acquire;
+ netcfg_release;
+ per_lcore_dpaa_io;
+ per_lcore_held_bufs;
+ qman_alloc_cgrid_range;
+ qman_alloc_pool_range;
+ qman_clear_irq;
+ qman_create_cgr;
+ qman_create_fq;
+ qman_dca_index;
+ qman_delete_cgr;
+ qman_dequeue;
+ qman_dqrr_consume;
+ qman_enqueue;
+ qman_enqueue_multi;
+ qman_enqueue_multi_fq;
+ qman_fq_fqid;
+ qman_fq_portal_irqsource_add;
+ qman_fq_portal_irqsource_remove;
+ qman_fq_portal_thread_irq;
+ qman_fq_state;
+ qman_init_fq;
+ qman_irqsource_add;
+ qman_irqsource_remove;
+ qman_modify_cgr;
+ qman_oos_fq;
+ qman_poll_dqrr;
+ qman_portal_dequeue;
+ qman_portal_poll_rx;
+ qman_query_fq_frm_cnt;
+ qman_query_fq_np;
+ qman_release_cgrid_range;
+ qman_reserve_fqid_range;
+ qman_retire_fq;
+ qman_set_fq_lookup_table;
+ qman_set_vdq;
+ qman_static_dequeue_add;
+ qman_thread_fd;
+ qman_thread_irq;
+ qman_volatile_dequeue;
+ rte_dpaa_driver_register;
+ rte_dpaa_driver_unregister;
+ rte_dpaa_portal_fq_close;
+ rte_dpaa_portal_fq_init;
+ rte_dpaa_portal_init;
+};
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_bus.h b/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_bus.h
new file mode 100644
index 000000000..2a186d83f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_bus.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017-2019 NXP
+ *
+ */
+#ifndef __RTE_DPAA_BUS_H__
+#define __RTE_DPAA_BUS_H__
+
+#include <rte_bus.h>
+#include <rte_mempool.h>
+#include <dpaax_iova_table.h>
+
+#include <dpaa_of.h>
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <netcfg.h>
+
+#define DPAA_MEMPOOL_OPS_NAME "dpaa"
+
+#define DEV_TO_DPAA_DEVICE(ptr) \
+ container_of(ptr, struct rte_dpaa_device, device)
+
+/* DPAA SoC identifier; If this is not available, it can be concluded
+ * that board is non-DPAA. Single slot is currently supported.
+ */
+#define DPAA_SOC_ID_FILE "/sys/devices/soc0/soc_id"
+
+#define SVR_LS1043A_FAMILY 0x87920000
+#define SVR_LS1046A_FAMILY 0x87070000
+#define SVR_MASK 0xffff0000
+
+#define RTE_DEV_TO_DPAA_CONST(ptr) \
+ container_of(ptr, const struct rte_dpaa_device, device)
+
+extern unsigned int dpaa_svr_family;
+
+extern RTE_DEFINE_PER_LCORE(bool, dpaa_io);
+
+struct rte_dpaa_device;
+struct rte_dpaa_driver;
+
+/* DPAA Device and Driver lists for DPAA bus */
+TAILQ_HEAD(rte_dpaa_device_list, rte_dpaa_device);
+TAILQ_HEAD(rte_dpaa_driver_list, rte_dpaa_driver);
+
+enum rte_dpaa_type {
+ FSL_DPAA_ETH = 1,
+ FSL_DPAA_CRYPTO,
+};
+
+struct rte_dpaa_bus {
+ struct rte_bus bus;
+ struct rte_dpaa_device_list device_list;
+ struct rte_dpaa_driver_list driver_list;
+ int device_count;
+ int detected;
+};
+
+struct dpaa_device_id {
+ uint8_t fman_id; /**< Fman interface ID, for ETH type device */
+ uint8_t mac_id; /**< Fman MAC interface ID, for ETH type device */
+ uint16_t dev_id; /**< Device Identifier from DPDK */
+};
+
+struct rte_dpaa_device {
+ TAILQ_ENTRY(rte_dpaa_device) next;
+ struct rte_device device;
+ union {
+ struct rte_eth_dev *eth_dev;
+ struct rte_cryptodev *crypto_dev;
+ };
+ struct rte_dpaa_driver *driver;
+ struct dpaa_device_id id;
+ struct rte_intr_handle intr_handle;
+ enum rte_dpaa_type device_type; /**< Ethernet or crypto type device */
+ char name[RTE_ETH_NAME_MAX_LEN];
+};
+
+typedef int (*rte_dpaa_probe_t)(struct rte_dpaa_driver *dpaa_drv,
+ struct rte_dpaa_device *dpaa_dev);
+typedef int (*rte_dpaa_remove_t)(struct rte_dpaa_device *dpaa_dev);
+
+struct rte_dpaa_driver {
+ TAILQ_ENTRY(rte_dpaa_driver) next;
+ struct rte_driver driver;
+ struct rte_dpaa_bus *dpaa_bus;
+ enum rte_dpaa_type drv_type;
+ rte_dpaa_probe_t probe;
+ rte_dpaa_remove_t remove;
+};
+
+struct dpaa_portal {
+ uint32_t bman_idx; /**< BMAN Portal ID*/
+ uint32_t qman_idx; /**< QMAN Portal ID*/
+ uint64_t tid;/**< Parent Thread id for this portal */
+};
+
+/* Various structures representing contiguous memory maps */
+struct dpaa_memseg {
+ TAILQ_ENTRY(dpaa_memseg) next;
+ char *vaddr;
+ rte_iova_t iova;
+ size_t len;
+};
+
+TAILQ_HEAD(dpaa_memseg_list, dpaa_memseg);
+extern struct dpaa_memseg_list rte_dpaa_memsegs;
+
+/* Either iterate over the list of internal memseg references or fallback to
+ * EAL memseg based iova2virt.
+ */
+static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr)
+{
+ struct dpaa_memseg *ms;
+ void *va;
+
+ va = dpaax_iova_table_get_va(paddr);
+ if (likely(va != NULL))
+ return va;
+
+ /* Check if the address is already part of the memseg list internally
+ * maintained by the dpaa driver.
+ */
+ TAILQ_FOREACH(ms, &rte_dpaa_memsegs, next) {
+ if (paddr >= ms->iova && paddr <
+ ms->iova + ms->len)
+ return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova));
+ }
+
+ /* If not, Fallback to full memseg list searching */
+ va = rte_mem_iova2virt(paddr);
+
+ dpaax_iova_table_update(paddr, va, RTE_CACHE_LINE_SIZE);
+
+ return va;
+}
+
+static inline rte_iova_t
+rte_dpaa_mem_vtop(void *vaddr)
+{
+ const struct rte_memseg *ms;
+
+ ms = rte_mem_virt2memseg(vaddr, NULL);
+ if (ms)
+ return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
+
+ return (size_t)NULL;
+}
+
+/**
+ * Register a DPAA driver.
+ *
+ * @param driver
+ * A pointer to a rte_dpaa_driver structure describing the driver
+ * to be registered.
+ */
+__rte_internal
+void rte_dpaa_driver_register(struct rte_dpaa_driver *driver);
+
+/**
+ * Unregister a DPAA driver.
+ *
+ * @param driver
+ * A pointer to a rte_dpaa_driver structure describing the driver
+ * to be unregistered.
+ */
+__rte_internal
+void rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver);
+
+/**
+ * Initialize a DPAA portal
+ *
+ * @param arg
+ * Per thread ID
+ *
+ * @return
+ * 0 in case of success, error otherwise
+ */
+__rte_internal
+int rte_dpaa_portal_init(void *arg);
+
+__rte_internal
+int rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq);
+
+__rte_internal
+int rte_dpaa_portal_fq_close(struct qman_fq *fq);
+
+/**
+ * Cleanup a DPAA Portal
+ */
+void dpaa_portal_finish(void *arg);
+
+/** Helper for DPAA device registration from driver (eth, crypto) instance */
+#define RTE_PMD_REGISTER_DPAA(nm, dpaa_drv) \
+RTE_INIT(dpaainitfn_ ##nm) \
+{\
+ (dpaa_drv).driver.name = RTE_STR(nm);\
+ rte_dpaa_driver_register(&dpaa_drv); \
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+/* Create storage for dqrr entries per lcore */
+#define DPAA_PORTAL_DEQUEUE_DEPTH 16
+struct dpaa_portal_dqrr {
+ void *mbuf[DPAA_PORTAL_DEQUEUE_DEPTH];
+ uint64_t dqrr_held;
+ uint8_t dqrr_size;
+};
+
+RTE_DECLARE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
+
+#define DPAA_PER_LCORE_DQRR_SIZE RTE_PER_LCORE(held_bufs).dqrr_size
+#define DPAA_PER_LCORE_DQRR_HELD RTE_PER_LCORE(held_bufs).dqrr_held
+#define DPAA_PER_LCORE_DQRR_MBUF(i) RTE_PER_LCORE(held_bufs).mbuf[i]
+
+__rte_internal
+struct fm_eth_port_cfg *dpaa_get_eth_port_cfg(int dev_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_DPAA_BUS_H__ */
diff --git a/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_logs.h b/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_logs.h
new file mode 100644
index 000000000..149b12743
--- /dev/null
+++ b/src/spdk/dpdk/drivers/bus/dpaa/rte_dpaa_logs.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#ifndef _DPAA_LOGS_H_
+#define _DPAA_LOGS_H_
+
+#include <rte_log.h>
+
+extern int dpaa_logtype_bus;
+
+#define DPAA_BUS_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_bus, "dpaa: " fmt "\n", ##args)
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
+#define DPAA_BUS_HWWARN(cond, fmt, args...) \
+ do {\
+ if (cond) \
+ DPAA_BUS_LOG(DEBUG, "WARN: " fmt, ##args); \
+ } while (0)
+#else
+#define DPAA_BUS_HWWARN(cond, fmt, args...) do { } while (0)
+#endif
+
+#define DPAA_BUS_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa_logtype_bus, "dpaa: %s(): " fmt "\n", \
+ __func__, ##args)
+
+#define BUS_INIT_FUNC_TRACE() DPAA_BUS_DEBUG(" >>")
+
+#define DPAA_BUS_INFO(fmt, args...) \
+ DPAA_BUS_LOG(INFO, fmt, ## args)
+#define DPAA_BUS_ERR(fmt, args...) \
+ DPAA_BUS_LOG(ERR, fmt, ## args)
+#define DPAA_BUS_WARN(fmt, args...) \
+ DPAA_BUS_LOG(WARNING, fmt, ## args)
+
+#endif /* _DPAA_LOGS_H_ */