summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/net/pfe
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/drivers/net/pfe
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/net/pfe')
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/Makefile31
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/base/cbus.h66
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/base/cbus/bmu.h41
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/base/cbus/class_csr.h277
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/base/cbus/emac_mtip.h231
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/base/cbus/gpi.h77
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/base/cbus/hif.h86
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/base/cbus/hif_nocpy.h36
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/base/cbus/tmu_csr.h154
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/base/cbus/util_csr.h47
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/base/pfe.h422
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/meson.build18
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/pfe_eth.h76
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/pfe_ethdev.c1190
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/pfe_hal.c629
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/pfe_hif.c868
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/pfe_hif.h156
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.c576
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.h181
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/pfe_logs.h31
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/pfe_mod.h64
-rw-r--r--src/spdk/dpdk/drivers/net/pfe/rte_pmd_pfe_version.map3
22 files changed, 5260 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/net/pfe/Makefile b/src/spdk/dpdk/drivers/net/pfe/Makefile
new file mode 100644
index 000000000..75d30b01a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018-2019 NXP
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_pfe.a
+
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -Wno-pointer-arith
+CFLAGS += -I$(RTE_SDK)/drivers/net/pfe/base/
+CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax
+
+EXPORT_MAP := rte_pmd_pfe_version.map
+
+# Interfaces with DPDK
+SRCS-$(CONFIG_RTE_LIBRTE_PFE_PMD) += pfe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PFE_PMD) += pfe_hal.c
+SRCS-$(CONFIG_RTE_LIBRTE_PFE_PMD) += pfe_hif_lib.c
+SRCS-$(CONFIG_RTE_LIBRTE_PFE_PMD) += pfe_hif.c
+
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_common_dpaax
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_ethdev -lrte_kvargs
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus.h
new file mode 100644
index 000000000..fe7ea6006
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef _CBUS_H_
+#define _CBUS_H_
+
+#include <compat.h>
+
+#define EMAC1_BASE_ADDR (CBUS_BASE_ADDR + 0x200000)
+#define EGPI1_BASE_ADDR (CBUS_BASE_ADDR + 0x210000)
+#define EMAC2_BASE_ADDR (CBUS_BASE_ADDR + 0x220000)
+#define EGPI2_BASE_ADDR (CBUS_BASE_ADDR + 0x230000)
+#define BMU1_BASE_ADDR (CBUS_BASE_ADDR + 0x240000)
+#define BMU2_BASE_ADDR (CBUS_BASE_ADDR + 0x250000)
+#define ARB_BASE_ADDR (CBUS_BASE_ADDR + 0x260000)
+#define DDR_CONFIG_BASE_ADDR (CBUS_BASE_ADDR + 0x270000)
+#define HIF_BASE_ADDR (CBUS_BASE_ADDR + 0x280000)
+#define HGPI_BASE_ADDR (CBUS_BASE_ADDR + 0x290000)
+#define LMEM_BASE_ADDR (CBUS_BASE_ADDR + 0x300000)
+#define LMEM_SIZE 0x10000
+#define LMEM_END (LMEM_BASE_ADDR + LMEM_SIZE)
+#define TMU_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x310000)
+#define CLASS_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x320000)
+#define HIF_NOCPY_BASE_ADDR (CBUS_BASE_ADDR + 0x350000)
+#define UTIL_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x360000)
+#define CBUS_GPT_BASE_ADDR (CBUS_BASE_ADDR + 0x370000)
+
+/*
+ * defgroup XXX_MEM_ACCESS_ADDR PE memory access through CSR
+ * XXX_MEM_ACCESS_ADDR register bit definitions.
+ */
+#define PE_MEM_ACCESS_WRITE BIT(31) /* Internal Memory Write. */
+#define PE_MEM_ACCESS_IMEM BIT(15)
+#define PE_MEM_ACCESS_DMEM BIT(16)
+
+/* Byte Enables of the Internal memory access. These are interpred in BE */
+#define PE_MEM_ACCESS_BYTE_ENABLE(offset, size) \
+ ({ typeof(size) size_ = (size); \
+ (((BIT(size_) - 1) << (4 - (offset) - (size_))) & 0xf) << 24; })
+
+#include "cbus/emac_mtip.h"
+#include "cbus/gpi.h"
+#include "cbus/bmu.h"
+#include "cbus/hif.h"
+#include "cbus/tmu_csr.h"
+#include "cbus/class_csr.h"
+#include "cbus/hif_nocpy.h"
+#include "cbus/util_csr.h"
+
+/* PFE cores states */
+#define CORE_DISABLE 0x00000000
+#define CORE_ENABLE 0x00000001
+#define CORE_SW_RESET 0x00000002
+
+/* LMEM defines */
+#define LMEM_HDR_SIZE 0x0010
+#define LMEM_BUF_SIZE_LN2 0x7
+#define LMEM_BUF_SIZE BIT(LMEM_BUF_SIZE_LN2)
+
+/* DDR defines */
+#define DDR_HDR_SIZE 0x0100
+#define DDR_BUF_SIZE_LN2 0xb
+#define DDR_BUF_SIZE BIT(DDR_BUF_SIZE_LN2)
+
+#endif /* _CBUS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus/bmu.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus/bmu.h
new file mode 100644
index 000000000..4821fd1f2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus/bmu.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef _BMU_H_
+#define _BMU_H_
+
+#define BMU_VERSION 0x000
+#define BMU_CTRL 0x004
+#define BMU_UCAST_CONFIG 0x008
+#define BMU_UCAST_BASE_ADDR 0x00c
+#define BMU_BUF_SIZE 0x010
+#define BMU_BUF_CNT 0x014
+#define BMU_THRES 0x018
+#define BMU_INT_SRC 0x020
+#define BMU_INT_ENABLE 0x024
+#define BMU_ALLOC_CTRL 0x030
+#define BMU_FREE_CTRL 0x034
+#define BMU_FREE_ERR_ADDR 0x038
+#define BMU_CURR_BUF_CNT 0x03c
+#define BMU_MCAST_CNT 0x040
+#define BMU_MCAST_ALLOC_CTRL 0x044
+#define BMU_REM_BUF_CNT 0x048
+#define BMU_LOW_WATERMARK 0x050
+#define BMU_HIGH_WATERMARK 0x054
+#define BMU_INT_MEM_ACCESS 0x100
+
+struct BMU_CFG {
+ unsigned long baseaddr;
+ u32 count;
+ u32 size;
+ u32 low_watermark;
+ u32 high_watermark;
+};
+
+#define BMU1_BUF_SIZE LMEM_BUF_SIZE_LN2
+#define BMU2_BUF_SIZE DDR_BUF_SIZE_LN2
+
+#define BMU2_MCAST_ALLOC_CTRL (BMU2_BASE_ADDR + BMU_MCAST_ALLOC_CTRL)
+
+#endif /* _BMU_H_ */
diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus/class_csr.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus/class_csr.h
new file mode 100644
index 000000000..a3f51c3a3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus/class_csr.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef _CLASS_CSR_H_
+#define _CLASS_CSR_H_
+
+#include <compat.h>
+
+/* @file class_csr.h.
+ * class_csr - block containing all the classifier control and status register.
+ * Mapped on CBUS and accessible from all PE's and ARM.
+ */
+#define CLASS_VERSION (CLASS_CSR_BASE_ADDR + 0x000)
+#define CLASS_TX_CTRL (CLASS_CSR_BASE_ADDR + 0x004)
+#define CLASS_INQ_PKTPTR (CLASS_CSR_BASE_ADDR + 0x010)
+
+/* (ddr_hdr_size[24:16], lmem_hdr_size[5:0]) */
+#define CLASS_HDR_SIZE (CLASS_CSR_BASE_ADDR + 0x014)
+
+/* LMEM header size for the Classifier block.\ Data in the LMEM
+ * is written from this offset.
+ */
+#define CLASS_HDR_SIZE_LMEM(off) ((off) & 0x3f)
+
+/* DDR header size for the Classifier block.\ Data in the DDR
+ * is written from this offset.
+ */
+#define CLASS_HDR_SIZE_DDR(off) (((off) & 0x1ff) << 16)
+
+#define CLASS_PE0_QB_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x020)
+
+/* DMEM address of first [15:0] and second [31:16] buffers on QB side. */
+#define CLASS_PE0_QB_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x024)
+
+/* DMEM address of third [15:0] and fourth [31:16] buffers on QB side. */
+#define CLASS_PE0_RO_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x060)
+
+/* DMEM address of first [15:0] and second [31:16] buffers on RO side. */
+#define CLASS_PE0_RO_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x064)
+
+/* DMEM address of third [15:0] and fourth [31:16] buffers on RO side. */
+
+/* @name Class PE memory access. Allows external PE's and HOST to
+ * read/write PMEM/DMEM memory ranges for each classifier PE.
+ */
+/* {sr_pe_mem_cmd[31], csr_pe_mem_wren[27:24], csr_pe_mem_addr[23:0]},
+ * See \ref XXX_MEM_ACCESS_ADDR for details.
+ */
+#define CLASS_MEM_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x100)
+
+/* Internal Memory Access Write Data [31:0] */
+#define CLASS_MEM_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x104)
+
+/* Internal Memory Access Read Data [31:0] */
+#define CLASS_MEM_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x108)
+#define CLASS_TM_INQ_ADDR (CLASS_CSR_BASE_ADDR + 0x114)
+#define CLASS_PE_STATUS (CLASS_CSR_BASE_ADDR + 0x118)
+
+#define CLASS_PHY1_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x11c)
+#define CLASS_PHY1_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x120)
+#define CLASS_PHY1_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x124)
+#define CLASS_PHY1_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x128)
+#define CLASS_PHY1_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x12c)
+#define CLASS_PHY1_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x130)
+#define CLASS_PHY1_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x134)
+#define CLASS_PHY1_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x138)
+#define CLASS_PHY1_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x13c)
+#define CLASS_PHY1_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x140)
+#define CLASS_PHY2_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x144)
+#define CLASS_PHY2_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x148)
+#define CLASS_PHY2_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x14c)
+#define CLASS_PHY2_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x150)
+#define CLASS_PHY2_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x154)
+#define CLASS_PHY2_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x158)
+#define CLASS_PHY2_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x15c)
+#define CLASS_PHY2_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x160)
+#define CLASS_PHY2_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x164)
+#define CLASS_PHY2_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x168)
+#define CLASS_PHY3_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x16c)
+#define CLASS_PHY3_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x170)
+#define CLASS_PHY3_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x174)
+#define CLASS_PHY3_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x178)
+#define CLASS_PHY3_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x17c)
+#define CLASS_PHY3_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x180)
+#define CLASS_PHY3_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x184)
+#define CLASS_PHY3_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x188)
+#define CLASS_PHY3_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x18c)
+#define CLASS_PHY3_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x190)
+#define CLASS_PHY1_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x194)
+#define CLASS_PHY1_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x198)
+#define CLASS_PHY1_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x19c)
+#define CLASS_PHY1_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a0)
+#define CLASS_PHY2_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a4)
+#define CLASS_PHY2_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a8)
+#define CLASS_PHY2_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1ac)
+#define CLASS_PHY2_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b0)
+#define CLASS_PHY3_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b4)
+#define CLASS_PHY3_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b8)
+#define CLASS_PHY3_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1bc)
+#define CLASS_PHY3_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c0)
+#define CLASS_PHY4_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c4)
+#define CLASS_PHY4_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c8)
+#define CLASS_PHY4_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1cc)
+#define CLASS_PHY4_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1d0)
+#define CLASS_PHY4_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d4)
+#define CLASS_PHY4_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d8)
+#define CLASS_PHY4_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1dc)
+#define CLASS_PHY4_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e0)
+#define CLASS_PHY4_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x1e4)
+#define CLASS_PHY4_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e8)
+#define CLASS_PHY4_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x1ec)
+#define CLASS_PHY4_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x1f0)
+#define CLASS_PHY4_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f4)
+#define CLASS_PHY4_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f8)
+
+#define CLASS_PE_SYS_CLK_RATIO (CLASS_CSR_BASE_ADDR + 0x200)
+#define CLASS_AFULL_THRES (CLASS_CSR_BASE_ADDR + 0x204)
+#define CLASS_GAP_BETWEEN_READS (CLASS_CSR_BASE_ADDR + 0x208)
+#define CLASS_MAX_BUF_CNT (CLASS_CSR_BASE_ADDR + 0x20c)
+#define CLASS_TSQ_FIFO_THRES (CLASS_CSR_BASE_ADDR + 0x210)
+#define CLASS_TSQ_MAX_CNT (CLASS_CSR_BASE_ADDR + 0x214)
+#define CLASS_IRAM_DATA_0 (CLASS_CSR_BASE_ADDR + 0x218)
+#define CLASS_IRAM_DATA_1 (CLASS_CSR_BASE_ADDR + 0x21c)
+#define CLASS_IRAM_DATA_2 (CLASS_CSR_BASE_ADDR + 0x220)
+#define CLASS_IRAM_DATA_3 (CLASS_CSR_BASE_ADDR + 0x224)
+
+#define CLASS_BUS_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x228)
+
+#define CLASS_BUS_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x22c)
+#define CLASS_BUS_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x230)
+
+/* (route_entry_size[9:0], route_hash_size[23:16]
+ * (this is actually ln2(size)))
+ */
+#define CLASS_ROUTE_HASH_ENTRY_SIZE (CLASS_CSR_BASE_ADDR + 0x234)
+
+#define CLASS_ROUTE_ENTRY_SIZE(size) ((size) & 0x1ff)
+#define CLASS_ROUTE_HASH_SIZE(hash_bits) (((hash_bits) & 0xff) << 16)
+
+#define CLASS_ROUTE_TABLE_BASE (CLASS_CSR_BASE_ADDR + 0x238)
+
+#define CLASS_ROUTE_MULTI (CLASS_CSR_BASE_ADDR + 0x23c)
+#define CLASS_SMEM_OFFSET (CLASS_CSR_BASE_ADDR + 0x240)
+#define CLASS_LMEM_BUF_SIZE (CLASS_CSR_BASE_ADDR + 0x244)
+#define CLASS_VLAN_ID (CLASS_CSR_BASE_ADDR + 0x248)
+#define CLASS_BMU1_BUF_FREE (CLASS_CSR_BASE_ADDR + 0x24c)
+#define CLASS_USE_TMU_INQ (CLASS_CSR_BASE_ADDR + 0x250)
+#define CLASS_VLAN_ID1 (CLASS_CSR_BASE_ADDR + 0x254)
+
+#define CLASS_BUS_ACCESS_BASE (CLASS_CSR_BASE_ADDR + 0x258)
+#define CLASS_BUS_ACCESS_BASE_MASK (0xFF000000)
+/* bit 31:24 of PE peripheral address are stored in CLASS_BUS_ACCESS_BASE */
+
+#define CLASS_HIF_PARSE (CLASS_CSR_BASE_ADDR + 0x25c)
+
+#define CLASS_HOST_PE0_GP (CLASS_CSR_BASE_ADDR + 0x260)
+#define CLASS_PE0_GP (CLASS_CSR_BASE_ADDR + 0x264)
+#define CLASS_HOST_PE1_GP (CLASS_CSR_BASE_ADDR + 0x268)
+#define CLASS_PE1_GP (CLASS_CSR_BASE_ADDR + 0x26c)
+#define CLASS_HOST_PE2_GP (CLASS_CSR_BASE_ADDR + 0x270)
+#define CLASS_PE2_GP (CLASS_CSR_BASE_ADDR + 0x274)
+#define CLASS_HOST_PE3_GP (CLASS_CSR_BASE_ADDR + 0x278)
+#define CLASS_PE3_GP (CLASS_CSR_BASE_ADDR + 0x27c)
+#define CLASS_HOST_PE4_GP (CLASS_CSR_BASE_ADDR + 0x280)
+#define CLASS_PE4_GP (CLASS_CSR_BASE_ADDR + 0x284)
+#define CLASS_HOST_PE5_GP (CLASS_CSR_BASE_ADDR + 0x288)
+#define CLASS_PE5_GP (CLASS_CSR_BASE_ADDR + 0x28c)
+
+#define CLASS_PE_INT_SRC (CLASS_CSR_BASE_ADDR + 0x290)
+#define CLASS_PE_INT_ENABLE (CLASS_CSR_BASE_ADDR + 0x294)
+
+#define CLASS_TPID0_TPID1 (CLASS_CSR_BASE_ADDR + 0x298)
+#define CLASS_TPID2 (CLASS_CSR_BASE_ADDR + 0x29c)
+
+#define CLASS_L4_CHKSUM_ADDR (CLASS_CSR_BASE_ADDR + 0x2a0)
+
+#define CLASS_PE0_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a4)
+#define CLASS_PE1_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a8)
+#define CLASS_PE2_DEBUG (CLASS_CSR_BASE_ADDR + 0x2ac)
+#define CLASS_PE3_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b0)
+#define CLASS_PE4_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b4)
+#define CLASS_PE5_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b8)
+
+#define CLASS_STATE (CLASS_CSR_BASE_ADDR + 0x2bc)
+
+/* CLASS defines */
+#define CLASS_PBUF_SIZE 0x100 /* Fixed by hardware */
+#define CLASS_PBUF_HEADER_OFFSET 0x80 /* Can be configured */
+
+/* Can be configured */
+#define CLASS_PBUF0_BASE_ADDR 0x000
+/* Can be configured */
+#define CLASS_PBUF1_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_SIZE)
+/* Can be configured */
+#define CLASS_PBUF2_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_SIZE)
+/* Can be configured */
+#define CLASS_PBUF3_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_SIZE)
+
+#define CLASS_PBUF0_HEADER_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + \
+ CLASS_PBUF_HEADER_OFFSET)
+#define CLASS_PBUF1_HEADER_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + \
+ CLASS_PBUF_HEADER_OFFSET)
+#define CLASS_PBUF2_HEADER_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + \
+ CLASS_PBUF_HEADER_OFFSET)
+#define CLASS_PBUF3_HEADER_BASE_ADDR (CLASS_PBUF3_BASE_ADDR + \
+ CLASS_PBUF_HEADER_OFFSET)
+
+#define CLASS_PE0_RO_DM_ADDR0_VAL ((CLASS_PBUF1_BASE_ADDR << 16) | \
+ CLASS_PBUF0_BASE_ADDR)
+#define CLASS_PE0_RO_DM_ADDR1_VAL ((CLASS_PBUF3_BASE_ADDR << 16) | \
+ CLASS_PBUF2_BASE_ADDR)
+
+#define CLASS_PE0_QB_DM_ADDR0_VAL ((CLASS_PBUF1_HEADER_BASE_ADDR << 16) |\
+ CLASS_PBUF0_HEADER_BASE_ADDR)
+#define CLASS_PE0_QB_DM_ADDR1_VAL ((CLASS_PBUF3_HEADER_BASE_ADDR << 16) |\
+ CLASS_PBUF2_HEADER_BASE_ADDR)
+
+#define CLASS_ROUTE_SIZE 128
+#define CLASS_MAX_ROUTE_SIZE 256
+#define CLASS_ROUTE_HASH_BITS 20
+#define CLASS_ROUTE_HASH_MASK (BIT(CLASS_ROUTE_HASH_BITS) - 1)
+
+/* Can be configured */
+#define CLASS_ROUTE0_BASE_ADDR 0x400
+/* Can be configured */
+#define CLASS_ROUTE1_BASE_ADDR (CLASS_ROUTE0_BASE_ADDR + CLASS_ROUTE_SIZE)
+/* Can be configured */
+#define CLASS_ROUTE2_BASE_ADDR (CLASS_ROUTE1_BASE_ADDR + CLASS_ROUTE_SIZE)
+/* Can be configured */
+#define CLASS_ROUTE3_BASE_ADDR (CLASS_ROUTE2_BASE_ADDR + CLASS_ROUTE_SIZE)
+
+#define CLASS_SA_SIZE 128
+#define CLASS_IPSEC_SA0_BASE_ADDR 0x600
+/* not used */
+#define CLASS_IPSEC_SA1_BASE_ADDR (CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE)
+/* not used */
+#define CLASS_IPSEC_SA2_BASE_ADDR (CLASS_IPSEC_SA1_BASE_ADDR + CLASS_SA_SIZE)
+/* not used */
+#define CLASS_IPSEC_SA3_BASE_ADDR (CLASS_IPSEC_SA2_BASE_ADDR + CLASS_SA_SIZE)
+
+/* generic purpose free dmem buffer, last portion of 2K dmem pbuf */
+#define CLASS_GP_DMEM_BUF_SIZE (2048 - (CLASS_PBUF_SIZE * 4) - \
+ (CLASS_ROUTE_SIZE * 4) - (CLASS_SA_SIZE))
+#define CLASS_GP_DMEM_BUF ((void *)(CLASS_IPSEC_SA0_BASE_ADDR + \
+ CLASS_SA_SIZE))
+
+#define TWO_LEVEL_ROUTE BIT(0)
+#define PHYNO_IN_HASH BIT(1)
+#define HW_ROUTE_FETCH BIT(3)
+#define HW_BRIDGE_FETCH BIT(5)
+#define IP_ALIGNED BIT(6)
+#define ARC_HIT_CHECK_EN BIT(7)
+#define CLASS_TOE BIT(11)
+#define HASH_NORMAL (0 << 12)
+#define HASH_CRC_PORT BIT(12)
+#define HASH_CRC_IP (2 << 12)
+#define HASH_CRC_PORT_IP (3 << 12)
+#define QB2BUS_LE BIT(15)
+
+#define TCP_CHKSUM_DROP BIT(0)
+#define UDP_CHKSUM_DROP BIT(1)
+#define IPV4_CHKSUM_DROP BIT(9)
+
+/*CLASS_HIF_PARSE bits*/
+#define HIF_PKT_CLASS_EN BIT(0)
+#define HIF_PKT_OFFSET(ofst) (((ofst) & 0xF) << 1)
+
+struct class_cfg {
+ u32 toe_mode;
+ unsigned long route_table_baseaddr;
+ u32 route_table_hash_bits;
+ u32 pe_sys_clk_ratio;
+ u32 resume;
+};
+
+#endif /* _CLASS_CSR_H_ */
diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus/emac_mtip.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus/emac_mtip.h
new file mode 100644
index 000000000..e1afc3148
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus/emac_mtip.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef _EMAC_H_
+#define _EMAC_H_
+
+/* This file is for Ethernet MAC registers and offsets
+ */
+
+#include <linux/ethtool.h>
+
+#define EMAC_IEVENT_REG 0x004
+#define EMAC_IMASK_REG 0x008
+#define EMAC_R_DES_ACTIVE_REG 0x010
+#define EMAC_X_DES_ACTIVE_REG 0x014
+#define EMAC_ECNTRL_REG 0x024
+#define EMAC_MII_DATA_REG 0x040
+#define EMAC_MII_CTRL_REG 0x044
+#define EMAC_MIB_CTRL_STS_REG 0x064
+#define EMAC_RCNTRL_REG 0x084
+#define EMAC_TCNTRL_REG 0x0C4
+#define EMAC_PHY_ADDR_LOW 0x0E4
+#define EMAC_PHY_ADDR_HIGH 0x0E8
+#define EMAC_GAUR 0x120
+#define EMAC_GALR 0x124
+#define EMAC_TFWR_STR_FWD 0x144
+#define EMAC_RX_SECTION_FULL 0x190
+#define EMAC_RX_SECTION_EMPTY 0x194
+#define EMAC_TX_SECTION_EMPTY 0x1A0
+#define EMAC_TRUNC_FL 0x1B0
+
+#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
+#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
+#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
+#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */
+#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
+#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
+#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
+#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */
+#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
+#define RMON_T_COL 0x224 /* RMON TX collision count */
+#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
+#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */
+#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
+#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
+#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
+#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */
+#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
+#define RMON_T_OCTETS 0x244 /* RMON TX octets */
+#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
+#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */
+#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
+#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
+#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
+#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
+#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
+#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
+#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
+#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
+#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
+#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
+#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
+#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
+#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */
+#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
+#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
+#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
+#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */
+#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
+#define RMON_R_RESVD_O 0x2a4 /* Reserved */
+#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */
+#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */
+#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */
+#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */
+#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */
+#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */
+#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */
+#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */
+#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */
+#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */
+#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */
+#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */
+#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */
+#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */
+#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */
+
+#define EMAC_SMAC_0_0 0x500 /*Supplemental MAC Address 0 (RW).*/
+#define EMAC_SMAC_0_1 0x504 /*Supplemental MAC Address 0 (RW).*/
+
+/* GEMAC definitions and settings */
+
+#define EMAC_PORT_0 0
+#define EMAC_PORT_1 1
+
+/* GEMAC Bit definitions */
+#define EMAC_IEVENT_HBERR 0x80000000
+#define EMAC_IEVENT_BABR 0x40000000
+#define EMAC_IEVENT_BABT 0x20000000
+#define EMAC_IEVENT_GRA 0x10000000
+#define EMAC_IEVENT_TXF 0x08000000
+#define EMAC_IEVENT_TXB 0x04000000
+#define EMAC_IEVENT_RXF 0x02000000
+#define EMAC_IEVENT_RXB 0x01000000
+#define EMAC_IEVENT_MII 0x00800000
+#define EMAC_IEVENT_EBERR 0x00400000
+#define EMAC_IEVENT_LC 0x00200000
+#define EMAC_IEVENT_RL 0x00100000
+#define EMAC_IEVENT_UN 0x00080000
+
+#define EMAC_IMASK_HBERR 0x80000000
+#define EMAC_IMASK_BABR 0x40000000
+#define EMAC_IMASKT_BABT 0x20000000
+#define EMAC_IMASK_GRA 0x10000000
+#define EMAC_IMASKT_TXF 0x08000000
+#define EMAC_IMASK_TXB 0x04000000
+#define EMAC_IMASKT_RXF 0x02000000
+#define EMAC_IMASK_RXB 0x01000000
+#define EMAC_IMASK_MII 0x00800000
+#define EMAC_IMASK_EBERR 0x00400000
+#define EMAC_IMASK_LC 0x00200000
+#define EMAC_IMASKT_RL 0x00100000
+#define EMAC_IMASK_UN 0x00080000
+
+#define EMAC_RCNTRL_MAX_FL_SHIFT 16
+#define EMAC_RCNTRL_LOOP 0x00000001
+#define EMAC_RCNTRL_DRT 0x00000002
+#define EMAC_RCNTRL_MII_MODE 0x00000004
+#define EMAC_RCNTRL_PROM 0x00000008
+#define EMAC_RCNTRL_BC_REJ 0x00000010
+#define EMAC_RCNTRL_FCE 0x00000020
+#define EMAC_RCNTRL_RGMII 0x00000040
+#define EMAC_RCNTRL_SGMII 0x00000080
+#define EMAC_RCNTRL_RMII 0x00000100
+#define EMAC_RCNTRL_RMII_10T 0x00000200
+#define EMAC_RCNTRL_CRC_FWD 0x00004000
+
+#define EMAC_TCNTRL_GTS 0x00000001
+#define EMAC_TCNTRL_HBC 0x00000002
+#define EMAC_TCNTRL_FDEN 0x00000004
+#define EMAC_TCNTRL_TFC_PAUSE 0x00000008
+#define EMAC_TCNTRL_RFC_PAUSE 0x00000010
+
+#define EMAC_ECNTRL_RESET 0x00000001 /* reset the EMAC */
+#define EMAC_ECNTRL_ETHER_EN 0x00000002 /* enable the EMAC */
+#define EMAC_ECNTRL_MAGIC_ENA 0x00000004
+#define EMAC_ECNTRL_SLEEP 0x00000008
+#define EMAC_ECNTRL_SPEED 0x00000020
+#define EMAC_ECNTRL_DBSWAP 0x00000100
+
+#define EMAC_X_WMRK_STRFWD 0x00000100
+
+#define EMAC_X_DES_ACTIVE_TDAR 0x01000000
+#define EMAC_R_DES_ACTIVE_RDAR 0x01000000
+
+#define EMAC_RX_SECTION_EMPTY_V 0x00010006
+/*
+ * The possible operating speeds of the MAC, currently supporting 10, 100 and
+ * 1000Mb modes.
+ */
+enum mac_speed {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS};
+
+/* MII-related definitios */
+#define EMAC_MII_DATA_ST 0x40000000 /* Start of frame delimiter */
+#define EMAC_MII_DATA_OP_RD 0x20000000 /* Perform a read operation */
+#define EMAC_MII_DATA_OP_CL45_RD 0x30000000 /* Perform a read operation */
+#define EMAC_MII_DATA_OP_WR 0x10000000 /* Perform a write operation */
+#define EMAC_MII_DATA_OP_CL45_WR 0x10000000 /* Perform a write operation */
+#define EMAC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address field mask */
+#define EMAC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register field mask */
+#define EMAC_MII_DATA_TA 0x00020000 /* Turnaround */
+#define EMAC_MII_DATA_DATAMSK 0x0000ffff /* PHY data field */
+
+#define EMAC_MII_DATA_RA_SHIFT 18 /* MII Register address bits */
+#define EMAC_MII_DATA_RA_MASK 0x1F /* MII Register address mask */
+#define EMAC_MII_DATA_PA_SHIFT 23 /* MII PHY address bits */
+#define EMAC_MII_DATA_PA_MASK 0x1F /* MII PHY address mask */
+
+#define EMAC_MII_DATA_RA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
+ EMAC_MII_DATA_RA_SHIFT)
+#define EMAC_MII_DATA_PA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
+ EMAC_MII_DATA_PA_SHIFT)
+#define EMAC_MII_DATA(v) ((v) & 0xffff)
+
+#define EMAC_MII_SPEED_SHIFT 1
+#define EMAC_HOLDTIME_SHIFT 8
+#define EMAC_HOLDTIME_MASK 0x7
+#define EMAC_HOLDTIME(v) (((v) & EMAC_HOLDTIME_MASK) << \
+ EMAC_HOLDTIME_SHIFT)
+
+/*
+ * The Address organisation for the MAC device. All addresses are split into
+ * two 32-bit register fields. The first one (bottom) is the lower 32-bits of
+ * the address and the other field are the high order bits - this may be 16-bits
+ * in the case of MAC addresses, or 32-bits for the hash address.
+ * In terms of memory storage, the first item (bottom) is assumed to be at a
+ * lower address location than 'top'. i.e. top should be at address location of
+ * 'bottom' + 4 bytes.
+ */
+struct pfe_mac_addr {
+ u32 bottom; /* Lower 32-bits of address. */
+ u32 top; /* Upper 32-bits of address. */
+};
+
+/*
+ * The following is the organisation of the address filters section of the MAC
+ * registers. The Cadence MAC contains four possible specific address match
+ * addresses, if an incoming frame corresponds to any one of these four
+ * addresses then the frame will be copied to memory.
+ * It is not necessary for all four of the address match registers to be
+ * programmed, this is application dependent.
+ */
+struct spec_addr {
+ struct pfe_mac_addr one; /* Specific address register 1. */
+ struct pfe_mac_addr two; /* Specific address register 2. */
+ struct pfe_mac_addr three; /* Specific address register 3. */
+ struct pfe_mac_addr four; /* Specific address register 4. */
+};
+
+struct gemac_cfg {
+ u32 mode;
+ u32 speed;
+ u32 duplex;
+};
+
+/* EMAC Hash size */
+#define EMAC_HASH_REG_BITS 64
+
+#define EMAC_SPEC_ADDR_MAX 4
+
+#endif /* _EMAC_H_ */
diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus/gpi.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus/gpi.h
new file mode 100644
index 000000000..3ebdef926
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus/gpi.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef _GPI_H_
+#define _GPI_H_
+
+/* Generic Packet Interface:The generic packet interface block interfaces
+ * to block like ethernet, Host interfac and covert data into WSP internal
+ * structures
+ */
+
+#define GPI_VERSION 0x00
+#define GPI_CTRL 0x04
+#define GPI_RX_CONFIG 0x08
+#define GPI_HDR_SIZE 0x0c
+#define GPI_BUF_SIZE 0x10
+#define GPI_LMEM_ALLOC_ADDR 0x14
+#define GPI_LMEM_FREE_ADDR 0x18
+#define GPI_DDR_ALLOC_ADDR 0x1c
+#define GPI_DDR_FREE_ADDR 0x20
+#define GPI_CLASS_ADDR 0x24
+#define GPI_DRX_FIFO 0x28
+#define GPI_TRX_FIFO 0x2c
+#define GPI_INQ_PKTPTR 0x30
+#define GPI_DDR_DATA_OFFSET 0x34
+#define GPI_LMEM_DATA_OFFSET 0x38
+#define GPI_TMLF_TX 0x4c
+#define GPI_DTX_ASEQ 0x50
+#define GPI_FIFO_STATUS 0x54
+#define GPI_FIFO_DEBUG 0x58
+#define GPI_TX_PAUSE_TIME 0x5c
+#define GPI_LMEM_SEC_BUF_DATA_OFFSET 0x60
+#define GPI_DDR_SEC_BUF_DATA_OFFSET 0x64
+#define GPI_TOE_CHKSUM_EN 0x68
+#define GPI_OVERRUN_DROPCNT 0x6c
+#define GPI_CSR_MTIP_PAUSE_REG 0x74
+#define GPI_CSR_MTIP_PAUSE_QUANTUM 0x78
+#define GPI_CSR_RX_CNT 0x7c
+#define GPI_CSR_TX_CNT 0x80
+#define GPI_CSR_DEBUG1 0x84
+#define GPI_CSR_DEBUG2 0x88
+
+struct gpi_cfg {
+ u32 lmem_rtry_cnt;
+ u32 tmlf_txthres;
+ u32 aseq_len;
+ u32 mtip_pause_reg;
+};
+
+/* GPI commons defines */
+#define GPI_LMEM_BUF_EN 0x1
+#define GPI_DDR_BUF_EN 0x1
+
+/* EGPI 1 defines */
+#define EGPI1_LMEM_RTRY_CNT 0x40
+#define EGPI1_TMLF_TXTHRES 0xBC
+#define EGPI1_ASEQ_LEN 0x50
+
+/* EGPI 2 defines */
+#define EGPI2_LMEM_RTRY_CNT 0x40
+#define EGPI2_TMLF_TXTHRES 0xBC
+#define EGPI2_ASEQ_LEN 0x40
+
+/* EGPI 3 defines */
+#define EGPI3_LMEM_RTRY_CNT 0x40
+#define EGPI3_TMLF_TXTHRES 0xBC
+#define EGPI3_ASEQ_LEN 0x40
+
+/* HGPI defines */
+#define HGPI_LMEM_RTRY_CNT 0x40
+#define HGPI_TMLF_TXTHRES 0xBC
+#define HGPI_ASEQ_LEN 0x40
+
+#define EGPI_PAUSE_TIME 0x000007D0
+#define EGPI_PAUSE_ENABLE 0x40000000
+#endif /* _GPI_H_ */
diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus/hif.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus/hif.h
new file mode 100644
index 000000000..be821c2db
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus/hif.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef _HIF_H_
+#define _HIF_H_
+
+/* @file hif.h.
+ * hif - PFE hif block control and status register.
+ * Mapped on CBUS and accessible from all PE's and ARM.
+ */
+#define HIF_VERSION (HIF_BASE_ADDR + 0x00)
+#define HIF_TX_CTRL (HIF_BASE_ADDR + 0x04)
+#define HIF_TX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x08)
+#define HIF_TX_ALLOC (HIF_BASE_ADDR + 0x0c)
+#define HIF_TX_BDP_ADDR (HIF_BASE_ADDR + 0x10)
+#define HIF_TX_STATUS (HIF_BASE_ADDR + 0x14)
+#define HIF_RX_CTRL (HIF_BASE_ADDR + 0x20)
+#define HIF_RX_BDP_ADDR (HIF_BASE_ADDR + 0x24)
+#define HIF_RX_STATUS (HIF_BASE_ADDR + 0x30)
+#define HIF_INT_SRC (HIF_BASE_ADDR + 0x34)
+#define HIF_INT_ENABLE (HIF_BASE_ADDR + 0x38)
+#define HIF_POLL_CTRL (HIF_BASE_ADDR + 0x3c)
+#define HIF_RX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x40)
+#define HIF_RX_ALLOC (HIF_BASE_ADDR + 0x44)
+#define HIF_TX_DMA_STATUS (HIF_BASE_ADDR + 0x48)
+#define HIF_RX_DMA_STATUS (HIF_BASE_ADDR + 0x4c)
+#define HIF_INT_COAL (HIF_BASE_ADDR + 0x50)
+
+/* HIF_INT_SRC/ HIF_INT_ENABLE control bits */
+#define HIF_INT BIT(0)
+#define HIF_RXBD_INT BIT(1)
+#define HIF_RXPKT_INT BIT(2)
+#define HIF_TXBD_INT BIT(3)
+#define HIF_TXPKT_INT BIT(4)
+
+/* HIF_TX_CTRL bits */
+#define HIF_CTRL_DMA_EN BIT(0)
+#define HIF_CTRL_BDP_POLL_CTRL_EN BIT(1)
+#define HIF_CTRL_BDP_CH_START_WSTB BIT(2)
+
+/* HIF_RX_STATUS bits */
+#define BDP_CSR_RX_DMA_ACTV BIT(16)
+
+/* HIF_INT_ENABLE bits */
+#define HIF_INT_EN BIT(0)
+#define HIF_RXBD_INT_EN BIT(1)
+#define HIF_RXPKT_INT_EN BIT(2)
+#define HIF_TXBD_INT_EN BIT(3)
+#define HIF_TXPKT_INT_EN BIT(4)
+
+/* HIF_POLL_CTRL bits*/
+#define HIF_RX_POLL_CTRL_CYCLE 0x0400
+#define HIF_TX_POLL_CTRL_CYCLE 0x0400
+
+/* HIF_INT_COAL bits*/
+#define HIF_INT_COAL_ENABLE BIT(31)
+
+/* Buffer descriptor control bits */
+#define BD_CTRL_BUFLEN_MASK 0x3fff
+#define BD_BUF_LEN(x) ((x) & BD_CTRL_BUFLEN_MASK)
+#define BD_CTRL_CBD_INT_EN BIT(16)
+#define BD_CTRL_PKT_INT_EN BIT(17)
+#define BD_CTRL_LIFM BIT(18)
+#define BD_CTRL_LAST_BD BIT(19)
+#define BD_CTRL_DIR BIT(20)
+#define BD_CTRL_LMEM_CPY BIT(21) /* Valid only for HIF_NOCPY */
+#define BD_CTRL_PKT_XFER BIT(24)
+#define BD_CTRL_DESC_EN BIT(31)
+#define BD_CTRL_PARSE_DISABLE BIT(25)
+#define BD_CTRL_BRFETCH_DISABLE BIT(26)
+#define BD_CTRL_RTFETCH_DISABLE BIT(27)
+
+/* Buffer descriptor status bits*/
+#define BD_STATUS_CONN_ID(x) ((x) & 0xffff)
+#define BD_STATUS_DIR_PROC_ID BIT(16)
+#define BD_STATUS_CONN_ID_EN BIT(17)
+#define BD_STATUS_PE2PROC_ID(x) (((x) & 7) << 18)
+#define BD_STATUS_LE_DATA BIT(21)
+#define BD_STATUS_CHKSUM_EN BIT(22)
+
+/* HIF Buffer descriptor status bits */
+#define DIR_PROC_ID BIT(16)
+#define PROC_ID(id) ((id) << 18)
+
+#endif /* _HIF_H_ */
diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus/hif_nocpy.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus/hif_nocpy.h
new file mode 100644
index 000000000..f98f1a6be
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus/hif_nocpy.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef _HIF_NOCPY_H_
+#define _HIF_NOCPY_H_
+
+#define HIF_NOCPY_VERSION (HIF_NOCPY_BASE_ADDR + 0x00)
+#define HIF_NOCPY_TX_CTRL (HIF_NOCPY_BASE_ADDR + 0x04)
+#define HIF_NOCPY_TX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x08)
+#define HIF_NOCPY_TX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x0c)
+#define HIF_NOCPY_TX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x10)
+#define HIF_NOCPY_TX_STATUS (HIF_NOCPY_BASE_ADDR + 0x14)
+#define HIF_NOCPY_RX_CTRL (HIF_NOCPY_BASE_ADDR + 0x20)
+#define HIF_NOCPY_RX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x24)
+#define HIF_NOCPY_RX_STATUS (HIF_NOCPY_BASE_ADDR + 0x30)
+#define HIF_NOCPY_INT_SRC (HIF_NOCPY_BASE_ADDR + 0x34)
+#define HIF_NOCPY_INT_ENABLE (HIF_NOCPY_BASE_ADDR + 0x38)
+#define HIF_NOCPY_POLL_CTRL (HIF_NOCPY_BASE_ADDR + 0x3c)
+#define HIF_NOCPY_RX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x40)
+#define HIF_NOCPY_RX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x44)
+#define HIF_NOCPY_TX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x48)
+#define HIF_NOCPY_RX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x4c)
+#define HIF_NOCPY_RX_INQ0_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x50)
+#define HIF_NOCPY_RX_INQ1_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x54)
+#define HIF_NOCPY_TX_PORT_NO (HIF_NOCPY_BASE_ADDR + 0x60)
+#define HIF_NOCPY_LMEM_ALLOC_ADDR (HIF_NOCPY_BASE_ADDR + 0x64)
+#define HIF_NOCPY_CLASS_ADDR (HIF_NOCPY_BASE_ADDR + 0x68)
+#define HIF_NOCPY_TMU_PORT0_ADDR (HIF_NOCPY_BASE_ADDR + 0x70)
+#define HIF_NOCPY_TMU_PORT1_ADDR (HIF_NOCPY_BASE_ADDR + 0x74)
+#define HIF_NOCPY_TMU_PORT2_ADDR (HIF_NOCPY_BASE_ADDR + 0x7c)
+#define HIF_NOCPY_TMU_PORT3_ADDR (HIF_NOCPY_BASE_ADDR + 0x80)
+#define HIF_NOCPY_TMU_PORT4_ADDR (HIF_NOCPY_BASE_ADDR + 0x84)
+#define HIF_NOCPY_INT_COAL (HIF_NOCPY_BASE_ADDR + 0x90)
+
+#endif /* _HIF_NOCPY_H_ */
diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus/tmu_csr.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus/tmu_csr.h
new file mode 100644
index 000000000..fd25a166f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus/tmu_csr.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef _TMU_CSR_H_
+#define _TMU_CSR_H_
+
+#define TMU_VERSION (TMU_CSR_BASE_ADDR + 0x000)
+#define TMU_INQ_WATERMARK (TMU_CSR_BASE_ADDR + 0x004)
+#define TMU_PHY_INQ_PKTPTR (TMU_CSR_BASE_ADDR + 0x008)
+#define TMU_PHY_INQ_PKTINFO (TMU_CSR_BASE_ADDR + 0x00c)
+#define TMU_PHY_INQ_FIFO_CNT (TMU_CSR_BASE_ADDR + 0x010)
+#define TMU_SYS_GENERIC_CONTROL (TMU_CSR_BASE_ADDR + 0x014)
+#define TMU_SYS_GENERIC_STATUS (TMU_CSR_BASE_ADDR + 0x018)
+#define TMU_SYS_GEN_CON0 (TMU_CSR_BASE_ADDR + 0x01c)
+#define TMU_SYS_GEN_CON1 (TMU_CSR_BASE_ADDR + 0x020)
+#define TMU_SYS_GEN_CON2 (TMU_CSR_BASE_ADDR + 0x024)
+#define TMU_SYS_GEN_CON3 (TMU_CSR_BASE_ADDR + 0x028)
+#define TMU_SYS_GEN_CON4 (TMU_CSR_BASE_ADDR + 0x02c)
+#define TMU_TEQ_DISABLE_DROPCHK (TMU_CSR_BASE_ADDR + 0x030)
+#define TMU_TEQ_CTRL (TMU_CSR_BASE_ADDR + 0x034)
+#define TMU_TEQ_QCFG (TMU_CSR_BASE_ADDR + 0x038)
+#define TMU_TEQ_DROP_STAT (TMU_CSR_BASE_ADDR + 0x03c)
+#define TMU_TEQ_QAVG (TMU_CSR_BASE_ADDR + 0x040)
+#define TMU_TEQ_WREG_PROB (TMU_CSR_BASE_ADDR + 0x044)
+#define TMU_TEQ_TRANS_STAT (TMU_CSR_BASE_ADDR + 0x048)
+#define TMU_TEQ_HW_PROB_CFG0 (TMU_CSR_BASE_ADDR + 0x04c)
+#define TMU_TEQ_HW_PROB_CFG1 (TMU_CSR_BASE_ADDR + 0x050)
+#define TMU_TEQ_HW_PROB_CFG2 (TMU_CSR_BASE_ADDR + 0x054)
+#define TMU_TEQ_HW_PROB_CFG3 (TMU_CSR_BASE_ADDR + 0x058)
+#define TMU_TEQ_HW_PROB_CFG4 (TMU_CSR_BASE_ADDR + 0x05c)
+#define TMU_TEQ_HW_PROB_CFG5 (TMU_CSR_BASE_ADDR + 0x060)
+#define TMU_TEQ_HW_PROB_CFG6 (TMU_CSR_BASE_ADDR + 0x064)
+#define TMU_TEQ_HW_PROB_CFG7 (TMU_CSR_BASE_ADDR + 0x068)
+#define TMU_TEQ_HW_PROB_CFG8 (TMU_CSR_BASE_ADDR + 0x06c)
+#define TMU_TEQ_HW_PROB_CFG9 (TMU_CSR_BASE_ADDR + 0x070)
+#define TMU_TEQ_HW_PROB_CFG10 (TMU_CSR_BASE_ADDR + 0x074)
+#define TMU_TEQ_HW_PROB_CFG11 (TMU_CSR_BASE_ADDR + 0x078)
+#define TMU_TEQ_HW_PROB_CFG12 (TMU_CSR_BASE_ADDR + 0x07c)
+#define TMU_TEQ_HW_PROB_CFG13 (TMU_CSR_BASE_ADDR + 0x080)
+#define TMU_TEQ_HW_PROB_CFG14 (TMU_CSR_BASE_ADDR + 0x084)
+#define TMU_TEQ_HW_PROB_CFG15 (TMU_CSR_BASE_ADDR + 0x088)
+#define TMU_TEQ_HW_PROB_CFG16 (TMU_CSR_BASE_ADDR + 0x08c)
+#define TMU_TEQ_HW_PROB_CFG17 (TMU_CSR_BASE_ADDR + 0x090)
+#define TMU_TEQ_HW_PROB_CFG18 (TMU_CSR_BASE_ADDR + 0x094)
+#define TMU_TEQ_HW_PROB_CFG19 (TMU_CSR_BASE_ADDR + 0x098)
+#define TMU_TEQ_HW_PROB_CFG20 (TMU_CSR_BASE_ADDR + 0x09c)
+#define TMU_TEQ_HW_PROB_CFG21 (TMU_CSR_BASE_ADDR + 0x0a0)
+#define TMU_TEQ_HW_PROB_CFG22 (TMU_CSR_BASE_ADDR + 0x0a4)
+#define TMU_TEQ_HW_PROB_CFG23 (TMU_CSR_BASE_ADDR + 0x0a8)
+#define TMU_TEQ_HW_PROB_CFG24 (TMU_CSR_BASE_ADDR + 0x0ac)
+#define TMU_TEQ_HW_PROB_CFG25 (TMU_CSR_BASE_ADDR + 0x0b0)
+#define TMU_TDQ_IIFG_CFG (TMU_CSR_BASE_ADDR + 0x0b4)
+/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
+ * This is a global Enable for all schedulers in PHY0
+ */
+#define TMU_TDQ0_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x0b8)
+
+#define TMU_LLM_CTRL (TMU_CSR_BASE_ADDR + 0x0bc)
+#define TMU_LLM_BASE_ADDR (TMU_CSR_BASE_ADDR + 0x0c0)
+#define TMU_LLM_QUE_LEN (TMU_CSR_BASE_ADDR + 0x0c4)
+#define TMU_LLM_QUE_HEADPTR (TMU_CSR_BASE_ADDR + 0x0c8)
+#define TMU_LLM_QUE_TAILPTR (TMU_CSR_BASE_ADDR + 0x0cc)
+#define TMU_LLM_QUE_DROPCNT (TMU_CSR_BASE_ADDR + 0x0d0)
+#define TMU_INT_EN (TMU_CSR_BASE_ADDR + 0x0d4)
+#define TMU_INT_SRC (TMU_CSR_BASE_ADDR + 0x0d8)
+#define TMU_INQ_STAT (TMU_CSR_BASE_ADDR + 0x0dc)
+#define TMU_CTRL (TMU_CSR_BASE_ADDR + 0x0e0)
+
+/* [31] Mem Access Command. 0 = Internal Memory Read, 1 = Internal memory
+ * Write [27:24] Byte Enables of the Internal memory access [23:0] Address of
+ * the internal memory. This address is used to access both the PM and DM of
+ * all the PE's
+ */
+#define TMU_MEM_ACCESS_ADDR (TMU_CSR_BASE_ADDR + 0x0e4)
+
+/* Internal Memory Access Write Data */
+#define TMU_MEM_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x0e8)
+/* Internal Memory Access Read Data. The commands are blocked
+ * at the mem_access only
+ */
+#define TMU_MEM_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x0ec)
+
+/* [31:0] PHY0 in queue address (must be initialized with one of the
+ * xxx_INQ_PKTPTR cbus addresses)
+ */
+#define TMU_PHY0_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f0)
+/* [31:0] PHY1 in queue address (must be initialized with one of the
+ * xxx_INQ_PKTPTR cbus addresses)
+ */
+#define TMU_PHY1_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f4)
+/* [31:0] PHY2 in queue address (must be initialized with one of the
+ * xxx_INQ_PKTPTR cbus addresses)
+ */
+#define TMU_PHY2_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f8)
+/* [31:0] PHY3 in queue address (must be initialized with one of the
+ * xxx_INQ_PKTPTR cbus addresses)
+ */
+#define TMU_PHY3_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0fc)
+#define TMU_BMU_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x100)
+#define TMU_TX_CTRL (TMU_CSR_BASE_ADDR + 0x104)
+
+#define TMU_BUS_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x108)
+#define TMU_BUS_ACCESS (TMU_CSR_BASE_ADDR + 0x10c)
+#define TMU_BUS_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x110)
+
+#define TMU_PE_SYS_CLK_RATIO (TMU_CSR_BASE_ADDR + 0x114)
+#define TMU_PE_STATUS (TMU_CSR_BASE_ADDR + 0x118)
+#define TMU_TEQ_MAX_THRESHOLD (TMU_CSR_BASE_ADDR + 0x11c)
+/* [31:0] PHY4 in queue address (must be initialized with one of the
+ * xxx_INQ_PKTPTR cbus addresses)
+ */
+#define TMU_PHY4_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x134)
+/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
+ * This is a global Enable for all schedulers in PHY1
+ */
+#define TMU_TDQ1_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x138)
+/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
+ * This is a global Enable for all schedulers in PHY2
+ */
+#define TMU_TDQ2_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x13c)
+/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
+ * This is a global Enable for all schedulers in PHY3
+ */
+#define TMU_TDQ3_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x140)
+#define TMU_BMU_BUF_SIZE (TMU_CSR_BASE_ADDR + 0x144)
+/* [31:0] PHY5 in queue address (must be initialized with one of the
+ * xxx_INQ_PKTPTR cbus addresses)
+ */
+#define TMU_PHY5_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x148)
+
+#define SW_RESET BIT(0) /* Global software reset */
+#define INQ_RESET BIT(2)
+#define TEQ_RESET BIT(3)
+#define TDQ_RESET BIT(4)
+#define PE_RESET BIT(5)
+#define MEM_INIT BIT(6)
+#define MEM_INIT_DONE BIT(7)
+#define LLM_INIT BIT(8)
+#define LLM_INIT_DONE BIT(9)
+#define ECC_MEM_INIT_DONE BIT(10)
+
+struct tmu_cfg {
+ u32 pe_sys_clk_ratio;
+ unsigned long llm_base_addr;
+ u32 llm_queue_len;
+};
+
+/* Not HW related for pfe_ctrl / pfe common defines */
+#define DEFAULT_MAX_QDEPTH 80
+#define DEFAULT_Q0_QDEPTH 511 /*We keep one large queue for host tx qos */
+#define DEFAULT_TMU3_QDEPTH 127
+
+#endif /* _TMU_CSR_H_ */
diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus/util_csr.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus/util_csr.h
new file mode 100644
index 000000000..7a4124ab6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus/util_csr.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef _UTIL_CSR_H_
+#define _UTIL_CSR_H_
+
+#define UTIL_VERSION (UTIL_CSR_BASE_ADDR + 0x000)
+#define UTIL_TX_CTRL (UTIL_CSR_BASE_ADDR + 0x004)
+#define UTIL_INQ_PKTPTR (UTIL_CSR_BASE_ADDR + 0x010)
+
+#define UTIL_HDR_SIZE (UTIL_CSR_BASE_ADDR + 0x014)
+
+#define UTIL_PE0_QB_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x020)
+#define UTIL_PE0_QB_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x024)
+#define UTIL_PE0_RO_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x060)
+#define UTIL_PE0_RO_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x064)
+
+#define UTIL_MEM_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x100)
+#define UTIL_MEM_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x104)
+#define UTIL_MEM_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x108)
+
+#define UTIL_TM_INQ_ADDR (UTIL_CSR_BASE_ADDR + 0x114)
+#define UTIL_PE_STATUS (UTIL_CSR_BASE_ADDR + 0x118)
+
+#define UTIL_PE_SYS_CLK_RATIO (UTIL_CSR_BASE_ADDR + 0x200)
+#define UTIL_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x204)
+#define UTIL_GAP_BETWEEN_READS (UTIL_CSR_BASE_ADDR + 0x208)
+#define UTIL_MAX_BUF_CNT (UTIL_CSR_BASE_ADDR + 0x20c)
+#define UTIL_TSQ_FIFO_THRES (UTIL_CSR_BASE_ADDR + 0x210)
+#define UTIL_TSQ_MAX_CNT (UTIL_CSR_BASE_ADDR + 0x214)
+#define UTIL_IRAM_DATA_0 (UTIL_CSR_BASE_ADDR + 0x218)
+#define UTIL_IRAM_DATA_1 (UTIL_CSR_BASE_ADDR + 0x21c)
+#define UTIL_IRAM_DATA_2 (UTIL_CSR_BASE_ADDR + 0x220)
+#define UTIL_IRAM_DATA_3 (UTIL_CSR_BASE_ADDR + 0x224)
+
+#define UTIL_BUS_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x228)
+#define UTIL_BUS_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x22c)
+#define UTIL_BUS_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x230)
+
+#define UTIL_INQ_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x234)
+
+struct util_cfg {
+ u32 pe_sys_clk_ratio;
+};
+
+#endif /* _UTIL_CSR_H_ */
diff --git a/src/spdk/dpdk/drivers/net/pfe/base/pfe.h b/src/spdk/dpdk/drivers/net/pfe/base/pfe.h
new file mode 100644
index 000000000..72741ba4a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/base/pfe.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef _PFE_H_
+#define _PFE_H_
+
+#include "cbus.h"
+
+/*
+ * WARNING: non atomic version.
+ */
+static inline void
+set_bit(unsigned long nr, void *addr)
+{
+ int *m = ((int *)addr) + (nr >> 5);
+ *m |= 1 << (nr & 31);
+}
+
+static inline int
+test_bit(int nr, const void *addr)
+{
+ return (1UL & (((const int *)addr)[nr >> 5] >> (nr & 31))) != 0UL;
+}
+
+/*
+ * WARNING: non atomic version.
+ */
+static inline void
+clear_bit(unsigned long nr, void *addr)
+{
+ int *m = ((int *)addr) + (nr >> 5);
+ *m &= ~(1 << (nr & 31));
+}
+
+/*
+ * WARNING: non atomic version.
+ */
+static inline int
+test_and_clear_bit(unsigned long nr, void *addr)
+{
+ unsigned long mask = 1 << (nr & 0x1f);
+ int *m = ((int *)addr) + (nr >> 5);
+ int old = *m;
+
+ *m = old & ~mask;
+ return (old & mask) != 0;
+}
+
+/*
+ * WARNING: non atomic version.
+ */
+static inline int
+test_and_set_bit(unsigned long nr, void *addr)
+{
+ unsigned long mask = 1 << (nr & 0x1f);
+ int *m = ((int *)addr) + (nr >> 5);
+ int old = *m;
+
+ *m = old | mask;
+ return (old & mask) != 0;
+}
+
+#ifndef BIT
+#define BIT(nr) (1UL << (nr))
+#endif
+#define CLASS_DMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
+/*
+ * Only valid for mem access register interface
+ */
+#define CLASS_IMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
+#define CLASS_DMEM_SIZE 0x00002000
+#define CLASS_IMEM_SIZE 0x00008000
+
+#define TMU_DMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
+/*
+ * Only valid for mem access register interface
+ */
+#define TMU_IMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
+#define TMU_DMEM_SIZE 0x00000800
+#define TMU_IMEM_SIZE 0x00002000
+
+#define UTIL_DMEM_BASE_ADDR 0x00000000
+#define UTIL_DMEM_SIZE 0x00002000
+
+#define PE_LMEM_BASE_ADDR 0xc3010000
+#define PE_LMEM_SIZE 0x8000
+#define PE_LMEM_END (PE_LMEM_BASE_ADDR + PE_LMEM_SIZE)
+
+#define DMEM_BASE_ADDR 0x00000000
+#define DMEM_SIZE 0x2000 /* TMU has less... */
+#define DMEM_END (DMEM_BASE_ADDR + DMEM_SIZE)
+
+#define PMEM_BASE_ADDR 0x00010000
+#define PMEM_SIZE 0x8000 /* TMU has less... */
+#define PMEM_END (PMEM_BASE_ADDR + PMEM_SIZE)
+
+#define writel(v, p) ({*(volatile unsigned int *)(p) = (v); })
+#define readl(p) (*(const volatile unsigned int *)(p))
+
+/* These check memory ranges from PE point of view/memory map */
+#define IS_DMEM(addr, len) \
+ ({ typeof(addr) addr_ = (addr); \
+ ((unsigned long)(addr_) >= DMEM_BASE_ADDR) && \
+ (((unsigned long)(addr_) + (len)) <= DMEM_END); })
+
+#define IS_PMEM(addr, len) \
+ ({ typeof(addr) addr_ = (addr); \
+ ((unsigned long)(addr_) >= PMEM_BASE_ADDR) && \
+ (((unsigned long)(addr_) + (len)) <= PMEM_END); })
+
+#define IS_PE_LMEM(addr, len) \
+ ({ typeof(addr) addr_ = (addr); \
+ ((unsigned long)(addr_) >= \
+ PE_LMEM_BASE_ADDR) && \
+ (((unsigned long)(addr_) + \
+ (len)) <= PE_LMEM_END); })
+
+#define IS_PFE_LMEM(addr, len) \
+ ({ typeof(addr) addr_ = (addr); \
+ ((unsigned long)(addr_) >= \
+ CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR)) && \
+ (((unsigned long)(addr_) + (len)) <= \
+ CBUS_VIRT_TO_PFE(LMEM_END)); })
+
+#define __IS_PHYS_DDR(addr, len) \
+ ({ typeof(addr) addr_ = (addr); \
+ ((unsigned long)(addr_) >= \
+ DDR_PHYS_BASE_ADDR) && \
+ (((unsigned long)(addr_) + (len)) <= \
+ DDR_PHYS_END); })
+
+#define IS_PHYS_DDR(addr, len) __IS_PHYS_DDR(DDR_PFE_TO_PHYS(addr), len)
+
+/*
+ * If using a run-time virtual address for the cbus base address use this code
+ */
+extern void *cbus_base_addr;
+extern void *ddr_base_addr;
+extern unsigned long ddr_phys_base_addr;
+extern unsigned int ddr_size;
+
+#define CBUS_BASE_ADDR cbus_base_addr
+#define DDR_PHYS_BASE_ADDR ddr_phys_base_addr
+#define DDR_BASE_ADDR ddr_base_addr
+#define DDR_SIZE ddr_size
+
+#define DDR_PHYS_END (DDR_PHYS_BASE_ADDR + DDR_SIZE)
+
+#define LS1012A_PFE_RESET_WA /*
+ * PFE doesn't have global reset and re-init
+ * should takecare few things to make PFE
+ * functional after reset
+ */
+#define PFE_CBUS_PHYS_BASE_ADDR 0xc0000000 /* CBUS physical base address
+ * as seen by PE's.
+ */
+/* CBUS physical base address as seen by PE's. */
+#define PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE 0xc0000000
+
+#define DDR_PHYS_TO_PFE(p) (((unsigned long)(p)) & 0x7FFFFFFF)
+#define DDR_PFE_TO_PHYS(p) (((unsigned long)(p)) | 0x80000000)
+#define CBUS_PHYS_TO_PFE(p) (((p) - PFE_CBUS_PHYS_BASE_ADDR) + \
+ PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE)
+/* Translates to PFE address map */
+
+#define DDR_PHYS_TO_VIRT(p) (((p) - DDR_PHYS_BASE_ADDR) + DDR_BASE_ADDR)
+#define DDR_VIRT_TO_PHYS(v) (((v) - DDR_BASE_ADDR) + DDR_PHYS_BASE_ADDR)
+#define DDR_VIRT_TO_PFE(p) (DDR_PHYS_TO_PFE(DDR_VIRT_TO_PHYS(p)))
+
+#define CBUS_VIRT_TO_PFE(v) (((v) - CBUS_BASE_ADDR) + \
+ PFE_CBUS_PHYS_BASE_ADDR)
+#define CBUS_PFE_TO_VIRT(p) (((unsigned long)(p) - \
+ PFE_CBUS_PHYS_BASE_ADDR) + CBUS_BASE_ADDR)
+
+/* The below part of the code is used in QOS control driver from host */
+#define TMU_APB_BASE_ADDR 0xc1000000 /* TMU base address seen by
+ * pe's
+ */
+
+enum {
+ CLASS0_ID = 0,
+ CLASS1_ID,
+ CLASS2_ID,
+ CLASS3_ID,
+ CLASS4_ID,
+ CLASS5_ID,
+ TMU0_ID,
+ TMU1_ID,
+ TMU2_ID,
+ TMU3_ID,
+#if !defined(CONFIG_FSL_PFE_UTIL_DISABLED)
+ UTIL_ID,
+#endif
+ MAX_PE
+};
+
+#define CLASS_MASK (BIT(CLASS0_ID) | BIT(CLASS1_ID) |\
+ BIT(CLASS2_ID) | BIT(CLASS3_ID) |\
+ BIT(CLASS4_ID) | BIT(CLASS5_ID))
+#define CLASS_MAX_ID CLASS5_ID
+
+#define TMU_MASK (BIT(TMU0_ID) | BIT(TMU1_ID) |\
+ BIT(TMU3_ID))
+
+#define TMU_MAX_ID TMU3_ID
+
+#if !defined(CONFIG_FSL_PFE_UTIL_DISABLED)
+#define UTIL_MASK BIT(UTIL_ID)
+#endif
+
+struct pe_status {
+ u32 cpu_state;
+ u32 activity_counter;
+ u32 rx;
+ union {
+ u32 tx;
+ u32 tmu_qstatus;
+ };
+ u32 drop;
+#if defined(CFG_PE_DEBUG)
+ u32 debug_indicator;
+ u32 debug[16];
+#endif
+} __rte_aligned(16);
+
+struct pe_sync_mailbox {
+ u32 stop;
+ u32 stopped;
+};
+
+/* Drop counter definitions */
+
+#define CLASS_NUM_DROP_COUNTERS 13
+#define UTIL_NUM_DROP_COUNTERS 8
+
+/* PE information.
+ * Structure containing PE's specific information. It is used to create
+ * generic C functions common to all PE's.
+ * Before using the library functions this structure needs to be initialized
+ * with the different registers virtual addresses
+ * (according to the ARM MMU mmaping). The default initialization supports a
+ * virtual == physical mapping.
+ */
+struct pe_info {
+ u32 dmem_base_addr; /* PE's dmem base address */
+ u32 pmem_base_addr; /* PE's pmem base address */
+ u32 pmem_size; /* PE's pmem size */
+
+ void *mem_access_wdata; /* PE's _MEM_ACCESS_WDATA register
+ * address
+ */
+ void *mem_access_addr; /* PE's _MEM_ACCESS_ADDR register
+ * address
+ */
+ void *mem_access_rdata; /* PE's _MEM_ACCESS_RDATA register
+ * address
+ */
+};
+
+void pe_lmem_read(u32 *dst, u32 len, u32 offset);
+void pe_lmem_write(u32 *src, u32 len, u32 offset);
+
+void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
+void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
+
+u32 pe_pmem_read(int id, u32 addr, u8 size);
+
+void pe_dmem_write(int id, u32 val, u32 addr, u8 size);
+u32 pe_dmem_read(int id, u32 addr, u8 size);
+void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len);
+void class_pe_lmem_memset(u32 dst, int val, unsigned int len);
+void class_bus_write(u32 val, u32 addr, u8 size);
+u32 class_bus_read(u32 addr, u8 size);
+
+#define class_bus_readl(addr) class_bus_read(addr, 4)
+#define class_bus_readw(addr) class_bus_read(addr, 2)
+#define class_bus_readb(addr) class_bus_read(addr, 1)
+
+#define class_bus_writel(val, addr) class_bus_write(val, addr, 4)
+#define class_bus_writew(val, addr) class_bus_write(val, addr, 2)
+#define class_bus_writeb(val, addr) class_bus_write(val, addr, 1)
+
+#define pe_dmem_readl(id, addr) pe_dmem_read(id, addr, 4)
+#define pe_dmem_readw(id, addr) pe_dmem_read(id, addr, 2)
+#define pe_dmem_readb(id, addr) pe_dmem_read(id, addr, 1)
+
+#define pe_dmem_writel(id, val, addr) pe_dmem_write(id, val, addr, 4)
+#define pe_dmem_writew(id, val, addr) pe_dmem_write(id, val, addr, 2)
+#define pe_dmem_writeb(id, val, addr) pe_dmem_write(id, val, addr, 1)
+
+/*int pe_load_elf_section(int id, const void *data, elf32_shdr *shdr); */
+//int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
+// struct device *dev);
+
+void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
+ unsigned int ddr_size);
+void bmu_init(void *base, struct BMU_CFG *cfg);
+void bmu_reset(void *base);
+void bmu_enable(void *base);
+void bmu_disable(void *base);
+void bmu_set_config(void *base, struct BMU_CFG *cfg);
+
+/*
+ * An enumerated type for loopback values. This can be one of three values, no
+ * loopback -normal operation, local loopback with internal loopback module of
+ * MAC or PHY loopback which is through the external PHY.
+ */
+#ifndef __MAC_LOOP_ENUM__
+#define __MAC_LOOP_ENUM__
+enum mac_loop {LB_NONE, LB_EXT, LB_LOCAL};
+#endif
+
+void gemac_init(void *base, void *config);
+void gemac_disable_rx_checksum_offload(void *base);
+void gemac_enable_rx_checksum_offload(void *base);
+void gemac_set_mdc_div(void *base, int mdc_div);
+void gemac_set_speed(void *base, enum mac_speed gem_speed);
+void gemac_set_duplex(void *base, int duplex);
+void gemac_set_mode(void *base, int mode);
+void gemac_enable(void *base);
+void gemac_tx_disable(void *base);
+void gemac_tx_enable(void *base);
+void gemac_disable(void *base);
+void gemac_reset(void *base);
+void gemac_set_address(void *base, struct spec_addr *addr);
+struct spec_addr gemac_get_address(void *base);
+void gemac_set_loop(void *base, enum mac_loop gem_loop);
+void gemac_set_laddr1(void *base, struct pfe_mac_addr *address);
+void gemac_set_laddr2(void *base, struct pfe_mac_addr *address);
+void gemac_set_laddr3(void *base, struct pfe_mac_addr *address);
+void gemac_set_laddr4(void *base, struct pfe_mac_addr *address);
+void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
+ unsigned int entry_index);
+void gemac_clear_laddr1(void *base);
+void gemac_clear_laddr2(void *base);
+void gemac_clear_laddr3(void *base);
+void gemac_clear_laddr4(void *base);
+void gemac_clear_laddrN(void *base, unsigned int entry_index);
+struct pfe_mac_addr gemac_get_hash(void *base);
+void gemac_set_hash(void *base, struct pfe_mac_addr *hash);
+struct pfe_mac_addr gem_get_laddr1(void *base);
+struct pfe_mac_addr gem_get_laddr2(void *base);
+struct pfe_mac_addr gem_get_laddr3(void *base);
+struct pfe_mac_addr gem_get_laddr4(void *base);
+struct pfe_mac_addr gem_get_laddrN(void *base, unsigned int entry_index);
+void gemac_set_config(void *base, struct gemac_cfg *cfg);
+void gemac_allow_broadcast(void *base);
+void gemac_no_broadcast(void *base);
+void gemac_enable_1536_rx(void *base);
+void gemac_disable_1536_rx(void *base);
+int gemac_set_rx(void *base, int mtu);
+void gemac_enable_rx_jmb(void *base);
+void gemac_disable_rx_jmb(void *base);
+void gemac_enable_stacked_vlan(void *base);
+void gemac_disable_stacked_vlan(void *base);
+void gemac_enable_pause_rx(void *base);
+void gemac_disable_pause_rx(void *base);
+void gemac_enable_pause_tx(void *base);
+void gemac_disable_pause_tx(void *base);
+void gemac_enable_copy_all(void *base);
+void gemac_disable_copy_all(void *base);
+void gemac_set_bus_width(void *base, int width);
+void gemac_set_wol(void *base, u32 wol_conf);
+
+void gpi_init(void *base, struct gpi_cfg *cfg);
+void gpi_reset(void *base);
+void gpi_enable(void *base);
+void gpi_disable(void *base);
+void gpi_set_config(void *base, struct gpi_cfg *cfg);
+
+void hif_init(void);
+void hif_tx_enable(void);
+void hif_tx_disable(void);
+void hif_rx_enable(void);
+void hif_rx_disable(void);
+
+/* Get Chip Revision level
+ *
+ */
+static inline unsigned int CHIP_REVISION(void)
+{
+ /*For LS1012A return always 1 */
+ return 1;
+}
+
+/* Start HIF rx DMA
+ *
+ */
+static inline void hif_rx_dma_start(void)
+{
+ writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_RX_CTRL);
+}
+
+/* Start HIF tx DMA
+ *
+ */
+static inline void hif_tx_dma_start(void)
+{
+ writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_TX_CTRL);
+}
+
+
+static inline void *pfe_mem_ptov(phys_addr_t paddr)
+{
+ return rte_mem_iova2virt(paddr);
+}
+
+static phys_addr_t pfe_mem_vtop(uint64_t vaddr) __rte_unused;
+
+static inline phys_addr_t pfe_mem_vtop(uint64_t vaddr)
+{
+ const struct rte_memseg *memseg;
+
+ memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL);
+ if (memseg)
+ return memseg->phys_addr + RTE_PTR_DIFF(vaddr, memseg->addr);
+
+ return (size_t)NULL;
+}
+
+#endif /* _PFE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/pfe/meson.build b/src/spdk/dpdk/drivers/net/pfe/meson.build
new file mode 100644
index 000000000..3e1a228a3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2019 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+deps += ['common_dpaax']
+
+sources = files('pfe_ethdev.c',
+ 'pfe_hal.c',
+ 'pfe_hif_lib.c',
+ 'pfe_hif.c')
+
+if cc.has_argument('-Wno-pointer-arith')
+ cflags += '-Wno-pointer-arith'
+endif
+
+includes += include_directories('base')
diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_eth.h b/src/spdk/dpdk/drivers/net/pfe/pfe_eth.h
new file mode 100644
index 000000000..9820d7bf4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/pfe_eth.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef _PFE_ETH_H_
+#define _PFE_ETH_H_
+
+#include <compat.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
+
+#define ETH_ALEN 6
+#define GEMAC_NO_PHY BIT(0)
+
+#define PFE_SOC_ID_FILE "/sys/devices/soc0/soc_id"
+extern unsigned int pfe_svr;
+#define SVR_LS1012A_REV2 0x87040020
+#define SVR_LS1012A_REV1 0x87040010
+
+#define PFE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
+#define MAX_MTU_ON_REV1 1878
+struct ls1012a_eth_platform_data {
+ /* device specific information */
+ u32 device_flags;
+ char name[16];
+
+ /* board specific information */
+ u32 mii_config;
+ u32 phy_flags;
+ u32 gem_id;
+ u32 bus_id;
+ u32 phy_id;
+ u32 mdio_muxval;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct ls1012a_mdio_platform_data {
+ int enabled;
+ int irq[32];
+ u32 phy_mask;
+ int mdc_div;
+};
+
+struct ls1012a_pfe_platform_data {
+ struct ls1012a_eth_platform_data ls1012a_eth_pdata[3];
+ struct ls1012a_mdio_platform_data ls1012a_mdio_pdata[3];
+};
+
+#define EMAC_TXQ_CNT 16
+#define EMAC_TXQ_DEPTH (HIF_TX_DESC_NT)
+
+#define JUMBO_FRAME_SIZE 10258
+#define EMAC_RXQ_CNT 1
+#define EMAC_RXQ_DEPTH HIF_RX_DESC_NT
+
+struct pfe_eth_priv_s {
+ struct pfe *pfe;
+ struct hif_client_s client;
+ int low_tmu_q;
+ int high_tmu_q;
+ struct rte_eth_dev *ndev;
+ struct rte_eth_stats stats;
+ int id;
+ int promisc;
+ int link_fd;
+
+ spinlock_t lock; /* protect member variables */
+ void *EMAC_baseaddr;
+ /* This points to the EMAC base from where we access PHY */
+ void *PHY_baseaddr;
+ void *GPI_baseaddr;
+
+ struct ls1012a_eth_platform_data *einfo;
+};
+
+#endif /* _PFE_ETH_H_ */
diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_ethdev.c b/src/spdk/dpdk/drivers/net/pfe/pfe_ethdev.c
new file mode 100644
index 000000000..b1de866d3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/pfe_ethdev.c
@@ -0,0 +1,1190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+#include <rte_kvargs.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_bus_vdev.h>
+#include <rte_ether.h>
+#include <dpaa_of.h>
+
+#include "pfe_logs.h"
+#include "pfe_mod.h"
+
+#define PFE_MAX_MACS 1 /* we can support up to 4 MACs per IF */
+#define PFE_VDEV_GEM_ID_ARG "intf"
+
+struct pfe_vdev_init_params {
+ int8_t gem_id;
+};
+static struct pfe *g_pfe;
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+
+/* Supported Tx offloads */
+static uint64_t dev_tx_offloads_sup =
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+/* TODO: make pfe_svr a runtime option.
+ * Driver should be able to get the SVR
+ * information from HW.
+ */
+unsigned int pfe_svr = SVR_LS1012A_REV1;
+static void *cbus_emac_base[3];
+static void *cbus_gpi_base[3];
+
+int pfe_logtype_pmd;
+
+/* pfe_gemac_init
+ */
+static int
+pfe_gemac_init(struct pfe_eth_priv_s *priv)
+{
+ struct gemac_cfg cfg;
+
+ cfg.speed = SPEED_1000M;
+ cfg.duplex = DUPLEX_FULL;
+
+ gemac_set_config(priv->EMAC_baseaddr, &cfg);
+ gemac_allow_broadcast(priv->EMAC_baseaddr);
+ gemac_enable_1536_rx(priv->EMAC_baseaddr);
+ gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
+ gemac_enable_pause_rx(priv->EMAC_baseaddr);
+ gemac_set_bus_width(priv->EMAC_baseaddr, 64);
+ gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
+
+ return 0;
+}
+
+static void
+pfe_soc_version_get(void)
+{
+ FILE *svr_file = NULL;
+ unsigned int svr_ver = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ svr_file = fopen(PFE_SOC_ID_FILE, "r");
+ if (!svr_file) {
+ PFE_PMD_ERR("Unable to open SoC device");
+ return; /* Not supported on this infra */
+ }
+
+ if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
+ pfe_svr = svr_ver;
+ else
+ PFE_PMD_ERR("Unable to read SoC device");
+
+ fclose(svr_file);
+}
+
+static int pfe_eth_start(struct pfe_eth_priv_s *priv)
+{
+ gpi_enable(priv->GPI_baseaddr);
+ gemac_enable(priv->EMAC_baseaddr);
+
+ return 0;
+}
+
+static void
+pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
+ __rte_unused from_tx, __rte_unused int n_desc)
+{
+ struct rte_mbuf *mbuf;
+ unsigned int flags;
+
+ /* Clean HIF and client queue */
+ while ((mbuf = hif_lib_tx_get_next_complete(&priv->client,
+ tx_q_num, &flags,
+ HIF_TX_DESC_NT))) {
+ if (mbuf) {
+ mbuf->next = NULL;
+ mbuf->nb_segs = 1;
+ rte_pktmbuf_free(mbuf);
+ }
+ }
+}
+
+
+static void
+pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
+{
+ unsigned int ii;
+
+ for (ii = 0; ii < emac_txq_cnt; ii++)
+ pfe_eth_flush_txQ(priv, ii, 0, 0);
+}
+
+static int
+pfe_eth_event_handler(void *data, int event, __rte_unused int qno)
+{
+ struct pfe_eth_priv_s *priv = data;
+
+ switch (event) {
+ case EVENT_TXDONE_IND:
+ pfe_eth_flush_tx(priv);
+ hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
+ break;
+ case EVENT_HIGH_RX_WM:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static uint16_t
+pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct hif_client_rx_queue *queue = rxq;
+ struct pfe_eth_priv_s *priv = queue->priv;
+ struct epoll_event epoll_ev;
+ uint64_t ticks = 1; /* 1 msec */
+ int ret;
+ int have_something, work_done;
+
+#define RESET_STATUS (HIF_INT | HIF_RXPKT_INT)
+
+ /*TODO can we remove this cleanup from here?*/
+ pfe_tx_do_cleanup(priv->pfe);
+ have_something = pfe_hif_rx_process(priv->pfe, nb_pkts);
+ work_done = hif_lib_receive_pkt(rxq, priv->pfe->hif.shm->pool,
+ rx_pkts, nb_pkts);
+
+ if (!have_something || !work_done) {
+ writel(RESET_STATUS, HIF_INT_SRC);
+ writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE);
+ ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks);
+ if (ret < 0 && errno != EINTR)
+ PFE_PMD_ERR("epoll_wait fails with %d\n", errno);
+ }
+
+ return work_done;
+}
+
+static uint16_t
+pfe_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct hif_client_rx_queue *queue = rxq;
+ struct pfe_eth_priv_s *priv = queue->priv;
+ struct rte_mempool *pool;
+
+ /*TODO can we remove this cleanup from here?*/
+ pfe_tx_do_cleanup(priv->pfe);
+ pfe_hif_rx_process(priv->pfe, nb_pkts);
+ pool = priv->pfe->hif.shm->pool;
+
+ return hif_lib_receive_pkt(rxq, pool, rx_pkts, nb_pkts);
+}
+
+static uint16_t
+pfe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct hif_client_tx_queue *queue = tx_queue;
+ struct pfe_eth_priv_s *priv = queue->priv;
+ struct rte_eth_stats *stats = &priv->stats;
+ int i;
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (tx_pkts[i]->nb_segs > 1) {
+ struct rte_mbuf *mbuf;
+ int j;
+
+ hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+ (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
+ tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
+ tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER,
+ tx_pkts[i]);
+
+ mbuf = tx_pkts[i]->next;
+ for (j = 0; j < (tx_pkts[i]->nb_segs - 2); j++) {
+ hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+ (void *)(size_t)rte_pktmbuf_iova(mbuf),
+ mbuf->buf_addr + mbuf->data_off,
+ mbuf->data_len,
+ 0x0, 0x0, mbuf);
+ mbuf = mbuf->next;
+ }
+
+ hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+ (void *)(size_t)rte_pktmbuf_iova(mbuf),
+ mbuf->buf_addr + mbuf->data_off,
+ mbuf->data_len,
+ 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
+ mbuf);
+ } else {
+ hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+ (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
+ tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
+ tx_pkts[i]->pkt_len, 0 /*ctrl*/,
+ HIF_FIRST_BUFFER | HIF_LAST_BUFFER |
+ HIF_DATA_VALID,
+ tx_pkts[i]);
+ }
+ stats->obytes += tx_pkts[i]->pkt_len;
+ hif_tx_dma_start();
+ }
+ stats->opackets += nb_pkts;
+ pfe_tx_do_cleanup(priv->pfe);
+
+ return nb_pkts;
+}
+
+static uint16_t
+pfe_dummy_xmit_pkts(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static uint16_t
+pfe_dummy_recv_pkts(__rte_unused void *rxq,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static int
+pfe_eth_open(struct rte_eth_dev *dev)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+ struct hif_client_s *client;
+ struct hif_shm *hif_shm;
+ int rc;
+
+ /* Register client driver with HIF */
+ client = &priv->client;
+
+ if (client->pfe) {
+ hif_shm = client->pfe->hif.shm;
+ /* TODO please remove the below code of if block, once we add
+ * the proper cleanup in eth_close
+ */
+ if (!test_bit(PFE_CL_GEM0 + priv->id,
+ &hif_shm->g_client_status[0])) {
+ /* Register client driver with HIF */
+ memset(client, 0, sizeof(*client));
+ client->id = PFE_CL_GEM0 + priv->id;
+ client->tx_qn = emac_txq_cnt;
+ client->rx_qn = EMAC_RXQ_CNT;
+ client->priv = priv;
+ client->pfe = priv->pfe;
+ client->port_id = dev->data->port_id;
+ client->event_handler = pfe_eth_event_handler;
+
+ client->tx_qsize = EMAC_TXQ_DEPTH;
+ client->rx_qsize = EMAC_RXQ_DEPTH;
+
+ rc = hif_lib_client_register(client);
+ if (rc) {
+ PFE_PMD_ERR("hif_lib_client_register(%d)"
+ " failed", client->id);
+ goto err0;
+ }
+ } else {
+ /* Freeing the packets if already exists */
+ int ret = 0;
+ struct rte_mbuf *rx_pkts[32];
+ /* TODO multiqueue support */
+ ret = hif_lib_receive_pkt(&client->rx_q[0],
+ hif_shm->pool, rx_pkts, 32);
+ while (ret) {
+ int i;
+ for (i = 0; i < ret; i++)
+ rte_pktmbuf_free(rx_pkts[i]);
+ ret = hif_lib_receive_pkt(&client->rx_q[0],
+ hif_shm->pool,
+ rx_pkts, 32);
+ }
+ }
+ } else {
+ /* Register client driver with HIF */
+ memset(client, 0, sizeof(*client));
+ client->id = PFE_CL_GEM0 + priv->id;
+ client->tx_qn = emac_txq_cnt;
+ client->rx_qn = EMAC_RXQ_CNT;
+ client->priv = priv;
+ client->pfe = priv->pfe;
+ client->port_id = dev->data->port_id;
+ client->event_handler = pfe_eth_event_handler;
+
+ client->tx_qsize = EMAC_TXQ_DEPTH;
+ client->rx_qsize = EMAC_RXQ_DEPTH;
+
+ rc = hif_lib_client_register(client);
+ if (rc) {
+ PFE_PMD_ERR("hif_lib_client_register(%d) failed",
+ client->id);
+ goto err0;
+ }
+ }
+ rc = pfe_eth_start(priv);
+ dev->rx_pkt_burst = &pfe_recv_pkts;
+ dev->tx_pkt_burst = &pfe_xmit_pkts;
+ /* If no prefetch is configured. */
+ if (getenv("PFE_INTR_SUPPORT")) {
+ dev->rx_pkt_burst = &pfe_recv_pkts_on_intr;
+ PFE_PMD_INFO("PFE INTERRUPT Mode enabled");
+ }
+
+
+err0:
+ return rc;
+}
+
+static int
+pfe_eth_open_cdev(struct pfe_eth_priv_s *priv)
+{
+ int pfe_cdev_fd;
+
+ if (priv == NULL)
+ return -1;
+
+ pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDONLY);
+ if (pfe_cdev_fd < 0) {
+ PFE_PMD_WARN("Unable to open PFE device file (%s).\n",
+ PFE_CDEV_PATH);
+ PFE_PMD_WARN("Link status update will not be available.\n");
+ priv->link_fd = PFE_CDEV_INVALID_FD;
+ return -1;
+ }
+
+ priv->link_fd = pfe_cdev_fd;
+
+ return 0;
+}
+
+static void
+pfe_eth_close_cdev(struct pfe_eth_priv_s *priv)
+{
+ if (priv == NULL)
+ return;
+
+ if (priv->link_fd != PFE_CDEV_INVALID_FD) {
+ close(priv->link_fd);
+ priv->link_fd = PFE_CDEV_INVALID_FD;
+ }
+}
+
+static void
+pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+ gemac_disable(priv->EMAC_baseaddr);
+ gpi_disable(priv->GPI_baseaddr);
+
+ dev->rx_pkt_burst = &pfe_dummy_recv_pkts;
+ dev->tx_pkt_burst = &pfe_dummy_xmit_pkts;
+}
+
+static void
+pfe_eth_exit(struct rte_eth_dev *dev, struct pfe *pfe)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ pfe_eth_stop(dev);
+ /* Close the device file for link status */
+ pfe_eth_close_cdev(dev->data->dev_private);
+
+ rte_eth_dev_release_port(dev);
+ pfe->nb_devs--;
+}
+
+static void
+pfe_eth_close(struct rte_eth_dev *dev)
+{
+ if (!dev)
+ return;
+
+ if (!g_pfe)
+ return;
+
+ pfe_eth_exit(dev, g_pfe);
+
+ if (g_pfe->nb_devs == 0) {
+ pfe_hif_exit(g_pfe);
+ pfe_hif_lib_exit(g_pfe);
+ rte_free(g_pfe);
+ g_pfe = NULL;
+ }
+}
+
+static int
+pfe_eth_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ return 0;
+}
+
+static int
+pfe_eth_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct pfe_eth_priv_s *internals = dev->data->dev_private;
+
+ dev_info->if_index = internals->id;
+ dev_info->max_mac_addrs = PFE_MAX_MACS;
+ dev_info->max_rx_queues = dev->data->nb_rx_queues;
+ dev_info->max_tx_queues = dev->data->nb_tx_queues;
+ dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ dev_info->rx_offload_capa = dev_rx_offloads_sup;
+ dev_info->tx_offload_capa = dev_tx_offloads_sup;
+ if (pfe_svr == SVR_LS1012A_REV1) {
+ dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD;
+ dev_info->max_mtu = MAX_MTU_ON_REV1;
+ } else {
+ dev_info->max_rx_pktlen = JUMBO_FRAME_SIZE;
+ dev_info->max_mtu = JUMBO_FRAME_SIZE - PFE_ETH_OVERHEAD;
+ }
+
+ return 0;
+}
+
+/* Only first mb_pool given on first call of this API will be used
+ * in whole system, also nb_rx_desc and rx_conf are unused params
+ */
+static int
+pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ int rc = 0;
+ struct pfe *pfe;
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+ pfe = priv->pfe;
+
+ if (queue_idx >= EMAC_RXQ_CNT) {
+ PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
+ queue_idx, EMAC_RXQ_CNT);
+ return -1;
+ }
+
+ if (!pfe->hif.setuped) {
+ rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool);
+ if (rc) {
+ PFE_PMD_ERR("Could not allocate buffer descriptors");
+ return -1;
+ }
+
+ pfe->hif.shm->pool = mb_pool;
+ if (pfe_hif_init_buffers(&pfe->hif)) {
+ PFE_PMD_ERR("Could not initialize buffer descriptors");
+ return -1;
+ }
+ hif_init();
+ hif_rx_enable();
+ hif_tx_enable();
+ pfe->hif.setuped = 1;
+ }
+ dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx];
+ priv->client.rx_q[queue_idx].queue_id = queue_idx;
+
+ return 0;
+}
+
+static void
+pfe_rx_queue_release(void *q __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static void
+pfe_tx_queue_release(void *q __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static int
+pfe_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ __rte_unused uint16_t nb_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+ if (queue_idx >= emac_txq_cnt) {
+ PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
+ queue_idx, emac_txq_cnt);
+ return -1;
+ }
+ dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx];
+ priv->client.tx_q[queue_idx].queue_id = queue_idx;
+ return 0;
+}
+
+static const uint32_t *
+pfe_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /*todo -= add more types */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_SCTP
+ };
+
+ if (dev->rx_pkt_burst == pfe_recv_pkts ||
+ dev->rx_pkt_burst == pfe_recv_pkts_on_intr)
+ return ptypes;
+ return NULL;
+}
+
+static inline int
+pfe_eth_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &dev->data->dev_link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+pfe_eth_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &dev->data->dev_link;
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
+{
+ int ret, ioctl_cmd = 0;
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+ struct rte_eth_link link, old;
+ unsigned int lstatus = 1;
+
+ if (dev == NULL) {
+ PFE_PMD_ERR("Invalid device in link_update.\n");
+ return 0;
+ }
+
+ memset(&old, 0, sizeof(old));
+ memset(&link, 0, sizeof(struct rte_eth_link));
+
+ pfe_eth_atomic_read_link_status(dev, &old);
+
+ /* Read from PFE CDEV, status of link, if file was successfully
+ * opened.
+ */
+ if (priv->link_fd != PFE_CDEV_INVALID_FD) {
+ if (priv->id == 0)
+ ioctl_cmd = PFE_CDEV_ETH0_STATE_GET;
+ if (priv->id == 1)
+ ioctl_cmd = PFE_CDEV_ETH1_STATE_GET;
+
+ ret = ioctl(priv->link_fd, ioctl_cmd, &lstatus);
+ if (ret != 0) {
+ PFE_PMD_ERR("Unable to fetch link status (ioctl)\n");
+ /* use dummy link value */
+ link.link_status = 1;
+ }
+ PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.\n",
+ lstatus, priv->id);
+ }
+
+ if (old.link_status == lstatus) {
+ /* no change in status */
+ PFE_PMD_DEBUG("No change in link status; Not updating.\n");
+ return -1;
+ }
+
+ link.link_status = lstatus;
+ link.link_speed = ETH_LINK_SPEED_1G;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = ETH_LINK_AUTONEG;
+
+ pfe_eth_atomic_write_link_status(dev, &link);
+
+ PFE_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
+ link.link_status ? "up" : "down");
+
+ return 0;
+}
+
+static int
+pfe_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+ priv->promisc = 1;
+ dev->data->promiscuous = 1;
+ gemac_enable_copy_all(priv->EMAC_baseaddr);
+
+ return 0;
+}
+
+static int
+pfe_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+ priv->promisc = 0;
+ dev->data->promiscuous = 0;
+ gemac_disable_copy_all(priv->EMAC_baseaddr);
+
+ return 0;
+}
+
+static int
+pfe_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+ struct pfe_mac_addr hash_addr; /* hash register structure */
+
+ /* Set the hash to rx all multicast frames */
+ hash_addr.bottom = 0xFFFFFFFF;
+ hash_addr.top = 0xFFFFFFFF;
+ gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
+ dev->data->all_multicast = 1;
+
+ return 0;
+}
+
+static int
+pfe_link_down(struct rte_eth_dev *dev)
+{
+ pfe_eth_stop(dev);
+ return 0;
+}
+
+static int
+pfe_link_up(struct rte_eth_dev *dev)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+ pfe_eth_start(priv);
+ return 0;
+}
+
+static int
+pfe_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ int ret;
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+ uint16_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ /*TODO Support VLAN*/
+ ret = gemac_set_rx(priv->EMAC_baseaddr, frame_size);
+ if (!ret)
+ dev->data->mtu = mtu;
+
+ return ret;
+}
+
+/* pfe_eth_enet_addr_byte_mac
+ */
+static int
+pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
+ struct pfe_mac_addr *enet_addr)
+{
+ if (!enet_byte_addr || !enet_addr) {
+ return -1;
+
+ } else {
+ enet_addr->bottom = enet_byte_addr[0] |
+ (enet_byte_addr[1] << 8) |
+ (enet_byte_addr[2] << 16) |
+ (enet_byte_addr[3] << 24);
+ enet_addr->top = enet_byte_addr[4] |
+ (enet_byte_addr[5] << 8);
+ return 0;
+ }
+}
+
+static int
+pfe_dev_set_mac_addr(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+ struct pfe_mac_addr spec_addr;
+ int ret;
+
+ ret = pfe_eth_enet_addr_byte_mac(addr->addr_bytes, &spec_addr);
+ if (ret)
+ return ret;
+
+ gemac_set_laddrN(priv->EMAC_baseaddr,
+ (struct pfe_mac_addr *)&spec_addr, 1);
+ rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
+ return 0;
+}
+
+static int
+pfe_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+ struct rte_eth_stats *eth_stats = &priv->stats;
+
+ if (stats == NULL)
+ return -1;
+
+ memset(stats, 0, sizeof(struct rte_eth_stats));
+
+ stats->ipackets = eth_stats->ipackets;
+ stats->ibytes = eth_stats->ibytes;
+ stats->opackets = eth_stats->opackets;
+ stats->obytes = eth_stats->obytes;
+
+ return 0;
+}
+
+static const struct eth_dev_ops ops = {
+ .dev_start = pfe_eth_open,
+ .dev_stop = pfe_eth_stop,
+ .dev_close = pfe_eth_close,
+ .dev_configure = pfe_eth_configure,
+ .dev_infos_get = pfe_eth_info,
+ .rx_queue_setup = pfe_rx_queue_setup,
+ .rx_queue_release = pfe_rx_queue_release,
+ .tx_queue_setup = pfe_tx_queue_setup,
+ .tx_queue_release = pfe_tx_queue_release,
+ .dev_supported_ptypes_get = pfe_supported_ptypes_get,
+ .link_update = pfe_eth_link_update,
+ .promiscuous_enable = pfe_promiscuous_enable,
+ .promiscuous_disable = pfe_promiscuous_disable,
+ .allmulticast_enable = pfe_allmulticast_enable,
+ .dev_set_link_down = pfe_link_down,
+ .dev_set_link_up = pfe_link_up,
+ .mtu_set = pfe_mtu_set,
+ .mac_addr_set = pfe_dev_set_mac_addr,
+ .stats_get = pfe_stats_get,
+};
+
+static int
+pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
+{
+ struct rte_eth_dev *eth_dev = NULL;
+ struct pfe_eth_priv_s *priv = NULL;
+ struct ls1012a_eth_platform_data *einfo;
+ struct ls1012a_pfe_platform_data *pfe_info;
+ struct rte_ether_addr addr;
+ int err;
+
+ eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*priv));
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ /* Extract pltform data */
+ pfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data;
+ if (!pfe_info) {
+ PFE_PMD_ERR("pfe missing additional platform data");
+ err = -ENODEV;
+ goto err0;
+ }
+
+ einfo = (struct ls1012a_eth_platform_data *)pfe_info->ls1012a_eth_pdata;
+
+ /* einfo never be NULL, but no harm in having this check */
+ if (!einfo) {
+ PFE_PMD_ERR("pfe missing additional gemacs platform data");
+ err = -ENODEV;
+ goto err0;
+ }
+
+ priv = eth_dev->data->dev_private;
+ priv->ndev = eth_dev;
+ priv->id = einfo[id].gem_id;
+ priv->pfe = pfe;
+
+ pfe->eth.eth_priv[id] = priv;
+
+ /* Set the info in the priv to the current info */
+ priv->einfo = &einfo[id];
+ priv->EMAC_baseaddr = cbus_emac_base[id];
+ priv->PHY_baseaddr = cbus_emac_base[id];
+ priv->GPI_baseaddr = cbus_gpi_base[id];
+
+#define HIF_GEMAC_TMUQ_BASE 6
+ priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2);
+ priv->high_tmu_q = priv->low_tmu_q + 1;
+
+ rte_spinlock_init(&priv->lock);
+
+ /* Copy the station address into the dev structure, */
+ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
+ ETHER_ADDR_LEN * PFE_MAX_MACS, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PFE_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
+ ETHER_ADDR_LEN * PFE_MAX_MACS);
+ err = -ENOMEM;
+ goto err0;
+ }
+
+ memcpy(addr.addr_bytes, priv->einfo->mac_addr,
+ ETH_ALEN);
+
+ pfe_dev_set_mac_addr(eth_dev, &addr);
+ rte_ether_addr_copy(&addr, &eth_dev->data->mac_addrs[0]);
+
+ eth_dev->data->mtu = 1500;
+ eth_dev->dev_ops = &ops;
+ pfe_eth_stop(eth_dev);
+ pfe_gemac_init(priv);
+
+ eth_dev->data->nb_rx_queues = 1;
+ eth_dev->data->nb_tx_queues = 1;
+
+ /* For link status, open the PFE CDEV; Error from this function
+ * is silently ignored; In case of error, the link status will not
+ * be available.
+ */
+ pfe_eth_open_cdev(priv);
+ rte_eth_dev_probing_finish(eth_dev);
+
+ return 0;
+err0:
+ rte_eth_dev_release_port(eth_dev);
+ return err;
+}
+
+static int
+pfe_get_gemac_if_proprties(struct pfe *pfe,
+ __rte_unused const struct device_node *parent,
+ unsigned int port, unsigned int if_cnt,
+ struct ls1012a_pfe_platform_data *pdata)
+{
+ const struct device_node *gem = NULL;
+ size_t size;
+ unsigned int ii = 0, phy_id = 0;
+ const u32 *addr;
+ const void *mac_addr;
+
+ for (ii = 0; ii < if_cnt; ii++) {
+ gem = of_get_next_child(parent, gem);
+ if (!gem)
+ goto err;
+ addr = of_get_property(gem, "reg", &size);
+ if (addr && (rte_be_to_cpu_32((unsigned int)*addr) == port))
+ break;
+ }
+
+ if (ii >= if_cnt) {
+ PFE_PMD_ERR("Failed to find interface = %d", if_cnt);
+ goto err;
+ }
+
+ pdata->ls1012a_eth_pdata[port].gem_id = port;
+
+ mac_addr = of_get_mac_address(gem);
+
+ if (mac_addr) {
+ memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
+ ETH_ALEN);
+ }
+
+ addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
+ if (!addr) {
+ PFE_PMD_ERR("Invalid mdio-mux-val....");
+ } else {
+ phy_id = rte_be_to_cpu_32((unsigned int)*addr);
+ pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
+ }
+ if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
+ pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
+ pdata->ls1012a_eth_pdata[port].mdio_muxval;
+
+ return 0;
+
+err:
+ return -1;
+}
+
+/* Parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int i;
+ char *end;
+ errno = 0;
+
+ i = strtol(value, &end, 10);
+ if (*end != 0 || errno != 0 || i < 0 || i > 1) {
+ PFE_PMD_ERR("Supported Port IDS are 0 and 1");
+ return -EINVAL;
+ }
+
+ *((uint32_t *)extra_args) = i;
+
+ return 0;
+}
+
+static int
+pfe_parse_vdev_init_params(struct pfe_vdev_init_params *params,
+ struct rte_vdev_device *dev)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ static const char * const pfe_vdev_valid_params[] = {
+ PFE_VDEV_GEM_ID_ARG,
+ NULL
+ };
+
+ const char *input_args = rte_vdev_device_args(dev);
+
+ if (!input_args)
+ return -1;
+
+ kvlist = rte_kvargs_parse(input_args, pfe_vdev_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ ret = rte_kvargs_process(kvlist,
+ PFE_VDEV_GEM_ID_ARG,
+ &parse_integer_arg,
+ &params->gem_id);
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+pmd_pfe_probe(struct rte_vdev_device *vdev)
+{
+ const u32 *prop;
+ const struct device_node *np;
+ const char *name;
+ const uint32_t *addr;
+ uint64_t cbus_addr, ddr_size, cbus_size;
+ int rc = -1, fd = -1, gem_id;
+ unsigned int ii, interface_count = 0;
+ size_t size = 0;
+ struct pfe_vdev_init_params init_params = {
+ .gem_id = -1
+ };
+
+ name = rte_vdev_device_name(vdev);
+ rc = pfe_parse_vdev_init_params(&init_params, vdev);
+ if (rc < 0)
+ return -EINVAL;
+
+ PFE_PMD_LOG(INFO, "Initializing pmd_pfe for %s Given gem-id %d",
+ name, init_params.gem_id);
+
+ if (g_pfe) {
+ if (g_pfe->nb_devs >= g_pfe->max_intf) {
+ PFE_PMD_ERR("PFE %d dev already created Max is %d",
+ g_pfe->nb_devs, g_pfe->max_intf);
+ return -EINVAL;
+ }
+ goto eth_init;
+ }
+
+ g_pfe = rte_zmalloc(NULL, sizeof(*g_pfe), RTE_CACHE_LINE_SIZE);
+ if (g_pfe == NULL)
+ return -EINVAL;
+
+ /* Load the device-tree driver */
+ rc = of_init();
+ if (rc) {
+ PFE_PMD_ERR("of_init failed with ret: %d", rc);
+ goto err;
+ }
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,pfe");
+ if (!np) {
+ PFE_PMD_ERR("Invalid device node");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ addr = of_get_address(np, 0, &cbus_size, NULL);
+ if (!addr) {
+ PFE_PMD_ERR("of_get_address cannot return qman address\n");
+ goto err;
+ }
+ cbus_addr = of_translate_address(np, addr);
+ if (!cbus_addr) {
+ PFE_PMD_ERR("of_translate_address failed\n");
+ goto err;
+ }
+
+ addr = of_get_address(np, 1, &ddr_size, NULL);
+ if (!addr) {
+ PFE_PMD_ERR("of_get_address cannot return qman address\n");
+ goto err;
+ }
+
+ g_pfe->ddr_phys_baseaddr = of_translate_address(np, addr);
+ if (!g_pfe->ddr_phys_baseaddr) {
+ PFE_PMD_ERR("of_translate_address failed\n");
+ goto err;
+ }
+
+ g_pfe->ddr_baseaddr = pfe_mem_ptov(g_pfe->ddr_phys_baseaddr);
+ g_pfe->ddr_size = ddr_size;
+ g_pfe->cbus_size = cbus_size;
+
+ fd = open("/dev/mem", O_RDWR);
+ g_pfe->cbus_baseaddr = mmap(NULL, cbus_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, cbus_addr);
+ close(fd);
+ if (g_pfe->cbus_baseaddr == MAP_FAILED) {
+ PFE_PMD_ERR("Can not map cbus base");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ /* Read interface count */
+ prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
+ if (!prop) {
+ PFE_PMD_ERR("Failed to read number of interfaces");
+ rc = -ENXIO;
+ goto err_prop;
+ }
+
+ interface_count = rte_be_to_cpu_32((unsigned int)*prop);
+ if (interface_count <= 0) {
+ PFE_PMD_ERR("No ethernet interface count : %d",
+ interface_count);
+ rc = -ENXIO;
+ goto err_prop;
+ }
+ PFE_PMD_INFO("num interfaces = %d ", interface_count);
+
+ g_pfe->max_intf = interface_count;
+ g_pfe->platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
+
+ for (ii = 0; ii < interface_count; ii++) {
+ pfe_get_gemac_if_proprties(g_pfe, np, ii, interface_count,
+ &g_pfe->platform_data);
+ }
+
+ pfe_lib_init(g_pfe->cbus_baseaddr, g_pfe->ddr_baseaddr,
+ g_pfe->ddr_phys_baseaddr, g_pfe->ddr_size);
+
+ PFE_PMD_INFO("CLASS version: %x", readl(CLASS_VERSION));
+ PFE_PMD_INFO("TMU version: %x", readl(TMU_VERSION));
+
+ PFE_PMD_INFO("BMU1 version: %x", readl(BMU1_BASE_ADDR + BMU_VERSION));
+ PFE_PMD_INFO("BMU2 version: %x", readl(BMU2_BASE_ADDR + BMU_VERSION));
+
+ PFE_PMD_INFO("EGPI1 version: %x", readl(EGPI1_BASE_ADDR + GPI_VERSION));
+ PFE_PMD_INFO("EGPI2 version: %x", readl(EGPI2_BASE_ADDR + GPI_VERSION));
+ PFE_PMD_INFO("HGPI version: %x", readl(HGPI_BASE_ADDR + GPI_VERSION));
+
+ PFE_PMD_INFO("HIF version: %x", readl(HIF_VERSION));
+ PFE_PMD_INFO("HIF NOPCY version: %x", readl(HIF_NOCPY_VERSION));
+
+ cbus_emac_base[0] = EMAC1_BASE_ADDR;
+ cbus_emac_base[1] = EMAC2_BASE_ADDR;
+
+ cbus_gpi_base[0] = EGPI1_BASE_ADDR;
+ cbus_gpi_base[1] = EGPI2_BASE_ADDR;
+
+ rc = pfe_hif_lib_init(g_pfe);
+ if (rc < 0)
+ goto err_hif_lib;
+
+ rc = pfe_hif_init(g_pfe);
+ if (rc < 0)
+ goto err_hif;
+ pfe_soc_version_get();
+eth_init:
+ if (init_params.gem_id < 0)
+ gem_id = g_pfe->nb_devs;
+ else
+ gem_id = init_params.gem_id;
+
+ PFE_PMD_LOG(INFO, "Init pmd_pfe for %s gem-id %d(given =%d)",
+ name, gem_id, init_params.gem_id);
+
+ rc = pfe_eth_init(vdev, g_pfe, gem_id);
+ if (rc < 0)
+ goto err_eth;
+ else
+ g_pfe->nb_devs++;
+
+ return 0;
+
+err_eth:
+ pfe_hif_exit(g_pfe);
+
+err_hif:
+ pfe_hif_lib_exit(g_pfe);
+
+err_hif_lib:
+err_prop:
+ munmap(g_pfe->cbus_baseaddr, cbus_size);
+err:
+ rte_free(g_pfe);
+ return rc;
+}
+
+static int
+pmd_pfe_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ struct rte_eth_dev *eth_dev = NULL;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ PFE_PMD_INFO("Closing eventdev sw device %s", name);
+
+ if (!g_pfe)
+ return 0;
+
+ eth_dev = rte_eth_dev_allocated(name);
+ if (eth_dev == NULL)
+ return -ENODEV;
+
+ pfe_eth_exit(eth_dev, g_pfe);
+ munmap(g_pfe->cbus_baseaddr, g_pfe->cbus_size);
+
+ if (g_pfe->nb_devs == 0) {
+ pfe_hif_exit(g_pfe);
+ pfe_hif_lib_exit(g_pfe);
+ rte_free(g_pfe);
+ g_pfe = NULL;
+ }
+ return 0;
+}
+
+static
+struct rte_vdev_driver pmd_pfe_drv = {
+ .probe = pmd_pfe_probe,
+ .remove = pmd_pfe_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(PFE_NAME_PMD, pmd_pfe_drv);
+RTE_PMD_REGISTER_PARAM_STRING(PFE_NAME_PMD, PFE_VDEV_GEM_ID_ARG "=<int> ");
+
+RTE_INIT(pfe_pmd_init_log)
+{
+ pfe_logtype_pmd = rte_log_register("pmd.net.pfe");
+ if (pfe_logtype_pmd >= 0)
+ rte_log_set_level(pfe_logtype_pmd, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_hal.c b/src/spdk/dpdk/drivers/net/pfe/pfe_hal.c
new file mode 100644
index 000000000..0d25ec052
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/pfe_hal.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#include "pfe_logs.h"
+#include "pfe_mod.h"
+
+#define PFE_MTU_RESET_MASK 0xC000FFFF
+
+void *cbus_base_addr;
+void *ddr_base_addr;
+unsigned long ddr_phys_base_addr;
+unsigned int ddr_size;
+static struct pe_info pe[MAX_PE];
+
+/* Initializes the PFE library.
+ * Must be called before using any of the library functions.
+ *
+ * @param[in] cbus_base CBUS virtual base address (as mapped in
+ * the host CPU address space)
+ * @param[in] ddr_base PFE DDR range virtual base address (as
+ * mapped in the host CPU address space)
+ * @param[in] ddr_phys_base PFE DDR range physical base address (as
+ * mapped in platform)
+ * @param[in] size PFE DDR range size (as defined by the host
+ * software)
+ */
+void
+pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
+ unsigned int size)
+{
+ cbus_base_addr = cbus_base;
+ ddr_base_addr = ddr_base;
+ ddr_phys_base_addr = ddr_phys_base;
+ ddr_size = size;
+
+ pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0);
+ pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0);
+ pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE;
+ pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
+ pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
+ pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
+
+ pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1);
+ pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1);
+ pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE;
+ pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
+ pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
+ pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
+
+ pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2);
+ pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2);
+ pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE;
+ pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
+ pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
+ pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
+
+ pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3);
+ pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3);
+ pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE;
+ pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
+ pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
+ pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
+
+ pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4);
+ pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4);
+ pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE;
+ pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
+ pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
+ pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
+
+ pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5);
+ pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5);
+ pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE;
+ pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
+ pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
+ pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
+
+ pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0);
+ pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0);
+ pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE;
+ pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
+ pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
+ pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
+
+ pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1);
+ pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1);
+ pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE;
+ pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
+ pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
+ pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
+
+ pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3);
+ pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3);
+ pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE;
+ pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
+ pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
+ pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
+
+#if !defined(CONFIG_FSL_PFE_UTIL_DISABLED)
+ pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR;
+ pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA;
+ pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR;
+ pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA;
+#endif
+}
+
+/**************************** MTIP GEMAC ***************************/
+
+/* Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
+ * TCP or UDP checksums are discarded
+ *
+ * @param[in] base GEMAC base address.
+ */
+void
+gemac_enable_rx_checksum_offload(__rte_unused void *base)
+{
+ /*Do not find configuration to do this */
+}
+
+/* Disable Rx Checksum Engine.
+ *
+ * @param[in] base GEMAC base address.
+ */
+void
+gemac_disable_rx_checksum_offload(__rte_unused void *base)
+{
+ /*Do not find configuration to do this */
+}
+
+/* GEMAC set speed.
+ * @param[in] base GEMAC base address
+ * @param[in] speed GEMAC speed (10, 100 or 1000 Mbps)
+ */
+void
+gemac_set_speed(void *base, enum mac_speed gem_speed)
+{
+ u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED;
+ u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T;
+
+ switch (gem_speed) {
+ case SPEED_10M:
+ rcr |= EMAC_RCNTRL_RMII_10T;
+ break;
+
+ case SPEED_1000M:
+ ecr |= EMAC_ECNTRL_SPEED;
+ break;
+
+ case SPEED_100M:
+ default:
+ /*It is in 100M mode */
+ break;
+ }
+ writel(ecr, (base + EMAC_ECNTRL_REG));
+ writel(rcr, (base + EMAC_RCNTRL_REG));
+}
+
+/* GEMAC set duplex.
+ * @param[in] base GEMAC base address
+ * @param[in] duplex GEMAC duplex mode (Full, Half)
+ */
+void
+gemac_set_duplex(void *base, int duplex)
+{
+ if (duplex == DUPLEX_HALF) {
+ writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base
+ + EMAC_TCNTRL_REG);
+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base
+ + EMAC_RCNTRL_REG));
+ } else {
+ writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base
+ + EMAC_TCNTRL_REG);
+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base
+ + EMAC_RCNTRL_REG));
+ }
+}
+
+/* GEMAC set mode.
+ * @param[in] base GEMAC base address
+ * @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII)
+ */
+void
+gemac_set_mode(void *base, __rte_unused int mode)
+{
+ u32 val = readl(base + EMAC_RCNTRL_REG);
+
+ /*Remove loopbank*/
+ val &= ~EMAC_RCNTRL_LOOP;
+
+ /*Enable flow control and MII mode*/
+ val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE | EMAC_RCNTRL_CRC_FWD);
+
+ writel(val, base + EMAC_RCNTRL_REG);
+}
+
+/* GEMAC enable function.
+ * @param[in] base GEMAC base address
+ */
+void
+gemac_enable(void *base)
+{
+ writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base +
+ EMAC_ECNTRL_REG);
+}
+
+/* GEMAC disable function.
+ * @param[in] base GEMAC base address
+ */
+void
+gemac_disable(void *base)
+{
+ writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base +
+ EMAC_ECNTRL_REG);
+}
+
+/* GEMAC TX disable function.
+ * @param[in] base GEMAC base address
+ */
+void
+gemac_tx_disable(void *base)
+{
+ writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base +
+ EMAC_TCNTRL_REG);
+}
+
+void
+gemac_tx_enable(void *base)
+{
+ writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_GTS, base +
+ EMAC_TCNTRL_REG);
+}
+
+/* Sets the hash register of the MAC.
+ * This register is used for matching unicast and multicast frames.
+ *
+ * @param[in] base GEMAC base address.
+ * @param[in] hash 64-bit hash to be configured.
+ */
+void
+gemac_set_hash(void *base, struct pfe_mac_addr *hash)
+{
+ writel(hash->bottom, base + EMAC_GALR);
+ writel(hash->top, base + EMAC_GAUR);
+}
+
+void
+gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
+ unsigned int entry_index)
+{
+ if (entry_index < 1 || entry_index > EMAC_SPEC_ADDR_MAX)
+ return;
+
+ entry_index = entry_index - 1;
+ if (entry_index < 1) {
+ writel(htonl(address->bottom), base + EMAC_PHY_ADDR_LOW);
+ writel((htonl(address->top) | 0x8808), base +
+ EMAC_PHY_ADDR_HIGH);
+ } else {
+ writel(htonl(address->bottom), base + ((entry_index - 1) * 8)
+ + EMAC_SMAC_0_0);
+ writel((htonl(address->top) | 0x8808), base + ((entry_index -
+ 1) * 8) + EMAC_SMAC_0_1);
+ }
+}
+
+void
+gemac_clear_laddrN(void *base, unsigned int entry_index)
+{
+ if (entry_index < 1 || entry_index > EMAC_SPEC_ADDR_MAX)
+ return;
+
+ entry_index = entry_index - 1;
+ if (entry_index < 1) {
+ writel(0, base + EMAC_PHY_ADDR_LOW);
+ writel(0, base + EMAC_PHY_ADDR_HIGH);
+ } else {
+ writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
+ writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
+ }
+}
+
+/* Set the loopback mode of the MAC. This can be either no loopback for
+ * normal operation, local loopback through MAC internal loopback module or PHY
+ * loopback for external loopback through a PHY. This asserts the external
+ * loop pin.
+ *
+ * @param[in] base GEMAC base address.
+ * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC
+ * Loopback,
+ * LB_EXT - PHY Loopback.
+ */
+void
+gemac_set_loop(void *base, __rte_unused enum mac_loop gem_loop)
+{
+ pr_info("%s()\n", __func__);
+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base +
+ EMAC_RCNTRL_REG));
+}
+
+/* GEMAC allow frames
+ * @param[in] base GEMAC base address
+ */
+void
+gemac_enable_copy_all(void *base)
+{
+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base +
+ EMAC_RCNTRL_REG));
+}
+
+/* GEMAC do not allow frames
+ * @param[in] base GEMAC base address
+ */
+void
+gemac_disable_copy_all(void *base)
+{
+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base +
+ EMAC_RCNTRL_REG));
+}
+
+/* GEMAC allow broadcast function.
+ * @param[in] base GEMAC base address
+ */
+void
+gemac_allow_broadcast(void *base)
+{
+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base +
+ EMAC_RCNTRL_REG);
+}
+
+/* GEMAC no broadcast function.
+ * @param[in] base GEMAC base address
+ */
+void
+gemac_no_broadcast(void *base)
+{
+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base +
+ EMAC_RCNTRL_REG);
+}
+
+/* GEMAC enable 1536 rx function.
+ * @param[in] base GEMAC base address
+ */
+void
+gemac_enable_1536_rx(void *base)
+{
+ /* Set 1536 as Maximum frame length */
+ writel((readl(base + EMAC_RCNTRL_REG) & PFE_MTU_RESET_MASK)
+ | (1536 << 16),
+ base + EMAC_RCNTRL_REG);
+}
+
+/* GEMAC set Max rx function.
+ * @param[in] base GEMAC base address
+ */
+int
+gemac_set_rx(void *base, int mtu)
+{
+ if (mtu < HIF_RX_PKT_MIN_SIZE || mtu > JUMBO_FRAME_SIZE) {
+ PFE_PMD_ERR("Invalid or not support MTU size");
+ return -1;
+ }
+
+ if (pfe_svr == SVR_LS1012A_REV1 &&
+ mtu > (MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD)) {
+ PFE_PMD_ERR("Max supported MTU on Rev1 is %d", MAX_MTU_ON_REV1);
+ return -1;
+ }
+
+ writel((readl(base + EMAC_RCNTRL_REG) & PFE_MTU_RESET_MASK)
+ | (mtu << 16),
+ base + EMAC_RCNTRL_REG);
+ return 0;
+}
+
+/* GEMAC enable jumbo function.
+ * @param[in] base GEMAC base address
+ */
+void
+gemac_enable_rx_jmb(void *base)
+{
+ if (pfe_svr == SVR_LS1012A_REV1) {
+ PFE_PMD_ERR("Jumbo not supported on Rev1");
+ return;
+ }
+
+ writel((readl(base + EMAC_RCNTRL_REG) & PFE_MTU_RESET_MASK) |
+ (JUMBO_FRAME_SIZE << 16), base + EMAC_RCNTRL_REG);
+}
+
+/* GEMAC enable stacked vlan function.
+ * @param[in] base GEMAC base address
+ */
+void
+gemac_enable_stacked_vlan(__rte_unused void *base)
+{
+ /* MTIP doesn't support stacked vlan */
+}
+
+/* GEMAC enable pause rx function.
+ * @param[in] base GEMAC base address
+ */
+void
+gemac_enable_pause_rx(void *base)
+{
+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE,
+ base + EMAC_RCNTRL_REG);
+}
+
+/* GEMAC disable pause rx function.
+ * @param[in] base GEMAC base address
+ */
+void
+gemac_disable_pause_rx(void *base)
+{
+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE,
+ base + EMAC_RCNTRL_REG);
+}
+
+/* GEMAC enable pause tx function.
+ * @param[in] base GEMAC base address
+ */
+void
+gemac_enable_pause_tx(void *base)
+{
+ writel(EMAC_RX_SECTION_EMPTY_V, base + EMAC_RX_SECTION_EMPTY);
+}
+
+/* GEMAC disable pause tx function.
+ * @param[in] base GEMAC base address
+ */
+void
+gemac_disable_pause_tx(void *base)
+{
+ writel(0x0, base + EMAC_RX_SECTION_EMPTY);
+}
+
+/* GEMAC wol configuration
+ * @param[in] base GEMAC base address
+ * @param[in] wol_conf WoL register configuration
+ */
+void
+gemac_set_wol(void *base, u32 wol_conf)
+{
+ u32 val = readl(base + EMAC_ECNTRL_REG);
+
+ if (wol_conf)
+ val |= (EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
+ else
+ val &= ~(EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
+ writel(val, base + EMAC_ECNTRL_REG);
+}
+
+/* Sets Gemac bus width to 64bit
+ * @param[in] base GEMAC base address
+ * @param[in] width gemac bus width to be set possible values are 32/64/128
+ */
+void
+gemac_set_bus_width(__rte_unused void *base, __rte_unused int width)
+{
+}
+
+/* Sets Gemac configuration.
+ * @param[in] base GEMAC base address
+ * @param[in] cfg GEMAC configuration
+ */
+void
+gemac_set_config(void *base, struct gemac_cfg *cfg)
+{
+ /*GEMAC config taken from VLSI */
+ writel(0x00000004, base + EMAC_TFWR_STR_FWD);
+ writel(0x00000005, base + EMAC_RX_SECTION_FULL);
+
+ if (pfe_svr == SVR_LS1012A_REV1)
+ writel(0x00000768, base + EMAC_TRUNC_FL);
+ else
+ writel(0x00003fff, base + EMAC_TRUNC_FL);
+
+ writel(0x00000030, base + EMAC_TX_SECTION_EMPTY);
+ writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG);
+
+ gemac_set_mode(base, cfg->mode);
+
+ gemac_set_speed(base, cfg->speed);
+
+ gemac_set_duplex(base, cfg->duplex);
+}
+
+/**************************** GPI ***************************/
+
+/* Initializes a GPI block.
+ * @param[in] base GPI base address
+ * @param[in] cfg GPI configuration
+ */
+void
+gpi_init(void *base, struct gpi_cfg *cfg)
+{
+ gpi_reset(base);
+
+ gpi_disable(base);
+
+ gpi_set_config(base, cfg);
+}
+
+/* Resets a GPI block.
+ * @param[in] base GPI base address
+ */
+void
+gpi_reset(void *base)
+{
+ writel(CORE_SW_RESET, base + GPI_CTRL);
+}
+
+/* Enables a GPI block.
+ * @param[in] base GPI base address
+ */
+void
+gpi_enable(void *base)
+{
+ writel(CORE_ENABLE, base + GPI_CTRL);
+}
+
+/* Disables a GPI block.
+ * @param[in] base GPI base address
+ */
+void
+gpi_disable(void *base)
+{
+ writel(CORE_DISABLE, base + GPI_CTRL);
+}
+
+/* Sets the configuration of a GPI block.
+ * @param[in] base GPI base address
+ * @param[in] cfg GPI configuration
+ */
+void
+gpi_set_config(void *base, struct gpi_cfg *cfg)
+{
+ writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base
+ + GPI_LMEM_ALLOC_ADDR);
+ writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base
+ + GPI_LMEM_FREE_ADDR);
+ writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base
+ + GPI_DDR_ALLOC_ADDR);
+ writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base
+ + GPI_DDR_FREE_ADDR);
+ writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
+ writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
+ writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
+ writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
+ writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
+ writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE);
+ writel((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE);
+
+ writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
+ GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
+ writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
+ writel(cfg->aseq_len, base + GPI_DTX_ASEQ);
+ writel(1, base + GPI_TOE_CHKSUM_EN);
+
+ if (cfg->mtip_pause_reg) {
+ writel(cfg->mtip_pause_reg, base + GPI_CSR_MTIP_PAUSE_REG);
+ writel(EGPI_PAUSE_TIME, base + GPI_TX_PAUSE_TIME);
+ }
+}
+
+/**************************** HIF ***************************/
+/* Initializes HIF copy block.
+ *
+ */
+void
+hif_init(void)
+{
+ /*Initialize HIF registers*/
+ writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE,
+ HIF_POLL_CTRL);
+}
+
+/* Enable hif tx DMA and interrupt
+ *
+ */
+void
+hif_tx_enable(void)
+{
+ writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
+ writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN),
+ HIF_INT_ENABLE);
+}
+
+/* Disable hif tx DMA and interrupt
+ *
+ */
+void
+hif_tx_disable(void)
+{
+ u32 hif_int;
+
+ writel(0, HIF_TX_CTRL);
+
+ hif_int = readl(HIF_INT_ENABLE);
+ hif_int &= HIF_TXPKT_INT_EN;
+ writel(hif_int, HIF_INT_ENABLE);
+}
+
+/* Enable hif rx DMA and interrupt
+ *
+ */
+void
+hif_rx_enable(void)
+{
+ hif_rx_dma_start();
+ writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN),
+ HIF_INT_ENABLE);
+}
+
+/* Disable hif rx DMA and interrupt
+ *
+ */
+void
+hif_rx_disable(void)
+{
+ u32 hif_int;
+
+ writel(0, HIF_RX_CTRL);
+
+ hif_int = readl(HIF_INT_ENABLE);
+ hif_int &= HIF_RXPKT_INT_EN;
+ writel(hif_int, HIF_INT_ENABLE);
+}
diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_hif.c b/src/spdk/dpdk/drivers/net/pfe/pfe_hif.c
new file mode 100644
index 000000000..be5b2ada1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/pfe_hif.c
@@ -0,0 +1,868 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#include "pfe_logs.h"
+#include "pfe_mod.h"
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+#include <sys/eventfd.h>
+
+static int
+pfe_hif_alloc_descr(struct pfe_hif *hif)
+{
+ void *addr;
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ addr = rte_zmalloc(NULL, HIF_RX_DESC_NT * sizeof(struct hif_desc) +
+ HIF_TX_DESC_NT * sizeof(struct hif_desc), RTE_CACHE_LINE_SIZE);
+ if (!addr) {
+ PFE_PMD_ERR("Could not allocate buffer descriptors!");
+ err = -ENOMEM;
+ goto err0;
+ }
+
+ hif->descr_baseaddr_p = pfe_mem_vtop((uintptr_t)addr);
+ hif->descr_baseaddr_v = addr;
+ hif->rx_ring_size = HIF_RX_DESC_NT;
+ hif->tx_ring_size = HIF_TX_DESC_NT;
+
+ return 0;
+
+err0:
+ return err;
+}
+
+static void
+pfe_hif_free_descr(struct pfe_hif *hif)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rte_free(hif->descr_baseaddr_v);
+}
+
+/* pfe_hif_release_buffers */
+static void
+pfe_hif_release_buffers(struct pfe_hif *hif)
+{
+ struct hif_desc *desc;
+ uint32_t i = 0;
+ struct rte_mbuf *mbuf;
+ struct rte_pktmbuf_pool_private *mb_priv;
+
+ hif->rx_base = hif->descr_baseaddr_v;
+
+ /*Free Rx buffers */
+ desc = hif->rx_base;
+ mb_priv = rte_mempool_get_priv(hif->shm->pool);
+ for (i = 0; i < hif->rx_ring_size; i++) {
+ if (readl(&desc->data)) {
+ if (i < hif->shm->rx_buf_pool_cnt &&
+ !hif->shm->rx_buf_pool[i]) {
+ mbuf = hif->rx_buf_vaddr[i] + PFE_PKT_HEADER_SZ
+ - sizeof(struct rte_mbuf)
+ - RTE_PKTMBUF_HEADROOM
+ - mb_priv->mbuf_priv_size;
+ hif->shm->rx_buf_pool[i] = mbuf;
+ }
+ }
+ writel(0, &desc->data);
+ writel(0, &desc->status);
+ writel(0, &desc->ctrl);
+ desc++;
+ }
+}
+
+/*
+ * pfe_hif_init_buffers
+ * This function initializes the HIF Rx/Tx ring descriptors and
+ * initialize Rx queue with buffers.
+ */
+int
+pfe_hif_init_buffers(struct pfe_hif *hif)
+{
+ struct hif_desc *desc, *first_desc_p;
+ uint32_t i = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Check enough Rx buffers available in the shared memory */
+ if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
+ return -ENOMEM;
+
+ hif->rx_base = hif->descr_baseaddr_v;
+ memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
+
+ /*Initialize Rx descriptors */
+ desc = hif->rx_base;
+ first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
+
+ for (i = 0; i < hif->rx_ring_size; i++) {
+ /* Initialize Rx buffers from the shared memory */
+ struct rte_mbuf *mbuf =
+ (struct rte_mbuf *)hif->shm->rx_buf_pool[i];
+
+ /* PFE mbuf structure is as follow:
+ * ----------------------------------------------------------+
+ * | mbuf | priv | headroom (annotation + PFE data) | data |
+ * ----------------------------------------------------------+
+ *
+ * As we are expecting additional information like parse
+ * results, eth id, queue id from PFE block along with data.
+ * so we have to provide additional memory for each packet to
+ * HIF rx rings so that PFE block can write its headers.
+ * so, we are giving the data pointor to HIF rings whose
+ * calculation is as below:
+ * mbuf->data_pointor - Required_header_size
+ *
+ * We are utilizing the HEADROOM area to receive the PFE
+ * block headers. On packet reception, HIF driver will use
+ * PFE headers information based on which it will decide
+ * the clients and fill the parse results.
+ * after that application can use/overwrite the HEADROOM area.
+ */
+ hif->rx_buf_vaddr[i] =
+ (void *)((size_t)mbuf->buf_addr + mbuf->data_off -
+ PFE_PKT_HEADER_SZ);
+ hif->rx_buf_addr[i] =
+ (void *)(size_t)(rte_pktmbuf_iova(mbuf) -
+ PFE_PKT_HEADER_SZ);
+ hif->rx_buf_len[i] = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
+
+ hif->shm->rx_buf_pool[i] = NULL;
+
+ writel(DDR_PHYS_TO_PFE(hif->rx_buf_addr[i]),
+ &desc->data);
+ writel(0, &desc->status);
+
+ /*
+ * Ensure everything else is written to DDR before
+ * writing bd->ctrl
+ */
+ rte_wmb();
+
+ writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
+ | BD_CTRL_DIR | BD_CTRL_DESC_EN
+ | BD_BUF_LEN(hif->rx_buf_len[i])), &desc->ctrl);
+
+ /* Chain descriptors */
+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
+ desc++;
+ }
+
+ /* Overwrite last descriptor to chain it to first one*/
+ desc--;
+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
+
+ hif->rxtoclean_index = 0;
+
+ /*Initialize Rx buffer descriptor ring base address */
+ writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
+
+ hif->tx_base = hif->rx_base + hif->rx_ring_size;
+ first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
+ hif->rx_ring_size;
+ memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
+
+ /*Initialize tx descriptors */
+ desc = hif->tx_base;
+
+ for (i = 0; i < hif->tx_ring_size; i++) {
+ /* Chain descriptors */
+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
+ writel(0, &desc->ctrl);
+ desc++;
+ }
+
+ /* Overwrite last descriptor to chain it to first one */
+ desc--;
+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
+ hif->txavail = hif->tx_ring_size;
+ hif->txtosend = 0;
+ hif->txtoclean = 0;
+ hif->txtoflush = 0;
+
+ /*Initialize Tx buffer descriptor ring base address */
+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
+
+ return 0;
+}
+
+/*
+ * pfe_hif_client_register
+ *
+ * This function used to register a client driver with the HIF driver.
+ *
+ * Return value:
+ * 0 - on Successful registration
+ */
+static int
+pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
+ struct hif_client_shm *client_shm)
+{
+ struct hif_client *client = &hif->client[client_id];
+ u32 i, cnt;
+ struct rx_queue_desc *rx_qbase;
+ struct tx_queue_desc *tx_qbase;
+ struct hif_rx_queue *rx_queue;
+ struct hif_tx_queue *tx_queue;
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_spinlock_lock(&hif->tx_lock);
+
+ if (test_bit(client_id, &hif->shm->g_client_status[0])) {
+ PFE_PMD_ERR("client %d already registered", client_id);
+ err = -1;
+ goto unlock;
+ }
+
+ memset(client, 0, sizeof(struct hif_client));
+
+ /* Initialize client Rx queues baseaddr, size */
+
+ cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
+ /* Check if client is requesting for more queues than supported */
+ if (cnt > HIF_CLIENT_QUEUES_MAX)
+ cnt = HIF_CLIENT_QUEUES_MAX;
+
+ client->rx_qn = cnt;
+ rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
+ for (i = 0; i < cnt; i++) {
+ rx_queue = &client->rx_q[i];
+ rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
+ rx_queue->size = client_shm->rx_qsize;
+ rx_queue->write_idx = 0;
+ }
+
+ /* Initialize client Tx queues baseaddr, size */
+ cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
+
+ /* Check if client is requesting for more queues than supported */
+ if (cnt > HIF_CLIENT_QUEUES_MAX)
+ cnt = HIF_CLIENT_QUEUES_MAX;
+
+ client->tx_qn = cnt;
+ tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
+ for (i = 0; i < cnt; i++) {
+ tx_queue = &client->tx_q[i];
+ tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
+ tx_queue->size = client_shm->tx_qsize;
+ tx_queue->ack_idx = 0;
+ }
+
+ set_bit(client_id, &hif->shm->g_client_status[0]);
+
+unlock:
+ rte_spinlock_unlock(&hif->tx_lock);
+
+ return err;
+}
+
+/*
+ * pfe_hif_client_unregister
+ *
+ * This function used to unregister a client from the HIF driver.
+ *
+ */
+static void
+pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ /*
+ * Mark client as no longer available (which prevents further packet
+ * receive for this client)
+ */
+ rte_spinlock_lock(&hif->tx_lock);
+
+ if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
+ PFE_PMD_ERR("client %d not registered", client_id);
+
+ rte_spinlock_unlock(&hif->tx_lock);
+ return;
+ }
+
+ clear_bit(client_id, &hif->shm->g_client_status[0]);
+
+ rte_spinlock_unlock(&hif->tx_lock);
+}
+
+/*
+ * client_put_rxpacket-
+ */
+static struct rte_mbuf *
+client_put_rxpacket(struct hif_rx_queue *queue,
+ void *pkt, u32 len,
+ u32 flags, u32 client_ctrl,
+ struct rte_mempool *pool,
+ u32 *rem_len)
+{
+ struct rx_queue_desc *desc = queue->base + queue->write_idx;
+ struct rte_mbuf *mbuf = NULL;
+
+
+ if (readl(&desc->ctrl) & CL_DESC_OWN) {
+ mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(pool));
+ if (unlikely(!mbuf)) {
+ PFE_PMD_WARN("Buffer allocation failure\n");
+ return NULL;
+ }
+
+ desc->data = pkt;
+ desc->client_ctrl = client_ctrl;
+ /*
+ * Ensure everything else is written to DDR before
+ * writing bd->ctrl
+ */
+ rte_wmb();
+ writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
+ queue->write_idx = (queue->write_idx + 1)
+ & (queue->size - 1);
+
+ *rem_len = mbuf->buf_len;
+ }
+
+ return mbuf;
+}
+
+/*
+ * pfe_hif_rx_process-
+ * This function does pfe hif rx queue processing.
+ * Dequeue packet from Rx queue and send it to corresponding client queue
+ */
+int
+pfe_hif_rx_process(struct pfe *pfe, int budget)
+{
+ struct hif_desc *desc;
+ struct hif_hdr *pkt_hdr;
+ struct __hif_hdr hif_hdr;
+ void *free_buf;
+ int rtc, len, rx_processed = 0;
+ struct __hif_desc local_desc;
+ int flags = 0, wait_for_last = 0, retry = 0;
+ unsigned int buf_size = 0;
+ struct rte_mbuf *mbuf = NULL;
+ struct pfe_hif *hif = &pfe->hif;
+
+ rte_spinlock_lock(&hif->lock);
+
+ rtc = hif->rxtoclean_index;
+
+ while (rx_processed < budget) {
+ desc = hif->rx_base + rtc;
+
+ __memcpy12(&local_desc, desc);
+
+ /* ACK pending Rx interrupt */
+ if (local_desc.ctrl & BD_CTRL_DESC_EN) {
+ if (unlikely(wait_for_last))
+ continue;
+ else
+ break;
+ }
+
+ len = BD_BUF_LEN(local_desc.ctrl);
+ pkt_hdr = (struct hif_hdr *)hif->rx_buf_vaddr[rtc];
+
+ /* Track last HIF header received */
+ if (!hif->started) {
+ hif->started = 1;
+
+ __memcpy8(&hif_hdr, pkt_hdr);
+
+ hif->qno = hif_hdr.hdr.q_num;
+ hif->client_id = hif_hdr.hdr.client_id;
+ hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
+ hif_hdr.hdr.client_ctrl;
+ flags = CL_DESC_FIRST;
+
+ } else {
+ flags = 0;
+ }
+
+ if (local_desc.ctrl & BD_CTRL_LIFM) {
+ flags |= CL_DESC_LAST;
+ wait_for_last = 0;
+ } else {
+ wait_for_last = 1;
+ }
+
+ /* Check for valid client id and still registered */
+ if (hif->client_id >= HIF_CLIENTS_MAX ||
+ !(test_bit(hif->client_id,
+ &hif->shm->g_client_status[0]))) {
+ PFE_PMD_INFO("packet with invalid client id %d qnum %d",
+ hif->client_id, hif->qno);
+
+ free_buf = hif->rx_buf_addr[rtc];
+
+ goto pkt_drop;
+ }
+
+ /* Check to valid queue number */
+ if (hif->client[hif->client_id].rx_qn <= hif->qno) {
+ PFE_DP_LOG(DEBUG, "packet with invalid queue: %d",
+ hif->qno);
+ hif->qno = 0;
+ }
+
+retry:
+ mbuf =
+ client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
+ (void *)pkt_hdr, len, flags,
+ hif->client_ctrl, hif->shm->pool,
+ &buf_size);
+
+ if (unlikely(!mbuf)) {
+ if (!retry) {
+ pfe_tx_do_cleanup(pfe);
+ retry = 1;
+ goto retry;
+ }
+ rx_processed = budget;
+
+ if (flags & CL_DESC_FIRST)
+ hif->started = 0;
+
+ PFE_DP_LOG(DEBUG, "No buffers");
+ break;
+ }
+
+ retry = 0;
+
+ free_buf = (void *)(size_t)rte_pktmbuf_iova(mbuf);
+ free_buf = free_buf - PFE_PKT_HEADER_SZ;
+
+ /*Fill free buffer in the descriptor */
+ hif->rx_buf_addr[rtc] = free_buf;
+ hif->rx_buf_vaddr[rtc] = (void *)((size_t)mbuf->buf_addr +
+ mbuf->data_off - PFE_PKT_HEADER_SZ);
+ hif->rx_buf_len[rtc] = buf_size - RTE_PKTMBUF_HEADROOM;
+
+pkt_drop:
+ writel(DDR_PHYS_TO_PFE(free_buf), &desc->data);
+ /*
+ * Ensure everything else is written to DDR before
+ * writing bd->ctrl
+ */
+ rte_wmb();
+ writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
+ BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
+ &desc->ctrl);
+
+ rtc = (rtc + 1) & (hif->rx_ring_size - 1);
+
+ if (local_desc.ctrl & BD_CTRL_LIFM) {
+ if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED))
+ rx_processed++;
+
+ hif->started = 0;
+ }
+ }
+
+
+ hif->rxtoclean_index = rtc;
+ rte_spinlock_unlock(&hif->lock);
+
+ /* we made some progress, re-start rx dma in case it stopped */
+ hif_rx_dma_start();
+
+ return rx_processed;
+}
+
+/*
+ * client_ack_txpacket-
+ * This function ack the Tx packet in the give client Tx queue by resetting
+ * ownership bit in the descriptor.
+ */
+static int
+client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
+ unsigned int q_no)
+{
+ struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
+ struct tx_queue_desc *desc = queue->base + queue->ack_idx;
+
+ if (readl(&desc->ctrl) & CL_DESC_OWN) {
+ writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
+ queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
+
+ return 0;
+
+ } else {
+ /*This should not happen */
+ PFE_PMD_ERR("%d %d %d %d %d %p %d",
+ hif->txtosend, hif->txtoclean, hif->txavail,
+ client_id, q_no, queue, queue->ack_idx);
+ return 1;
+ }
+}
+
+static void
+__hif_tx_done_process(struct pfe *pfe, int count)
+{
+ struct hif_desc *desc;
+ struct hif_desc_sw *desc_sw;
+ unsigned int ttc, tx_avl;
+ int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
+ struct pfe_hif *hif = &pfe->hif;
+
+ ttc = hif->txtoclean;
+ tx_avl = hif->txavail;
+
+ while ((tx_avl < hif->tx_ring_size) && count--) {
+ desc = hif->tx_base + ttc;
+
+ if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
+ break;
+
+ desc_sw = &hif->tx_sw_queue[ttc];
+
+ if (desc_sw->client_id > HIF_CLIENTS_MAX)
+ PFE_PMD_ERR("Invalid cl id %d", desc_sw->client_id);
+
+ pkts_done[desc_sw->client_id]++;
+
+ client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
+
+ ttc = (ttc + 1) & (hif->tx_ring_size - 1);
+ tx_avl++;
+ }
+
+ if (pkts_done[0])
+ hif_lib_indicate_client(pfe->hif_client[0], EVENT_TXDONE_IND,
+ 0);
+ if (pkts_done[1])
+ hif_lib_indicate_client(pfe->hif_client[1], EVENT_TXDONE_IND,
+ 0);
+ hif->txtoclean = ttc;
+ hif->txavail = tx_avl;
+}
+
+static inline void
+hif_tx_done_process(struct pfe *pfe, int count)
+{
+ struct pfe_hif *hif = &pfe->hif;
+ rte_spinlock_lock(&hif->tx_lock);
+ __hif_tx_done_process(pfe, count);
+ rte_spinlock_unlock(&hif->tx_lock);
+}
+
+void
+pfe_tx_do_cleanup(struct pfe *pfe)
+{
+ hif_tx_done_process(pfe, HIF_TX_DESC_NT);
+}
+
+/*
+ * __hif_xmit_pkt -
+ * This function puts one packet in the HIF Tx queue
+ */
+void
+hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
+ q_no, void *data, u32 len, unsigned int flags)
+{
+ struct hif_desc *desc;
+ struct hif_desc_sw *desc_sw;
+
+ desc = hif->tx_base + hif->txtosend;
+ desc_sw = &hif->tx_sw_queue[hif->txtosend];
+
+ desc_sw->len = len;
+ desc_sw->client_id = client_id;
+ desc_sw->q_no = q_no;
+ desc_sw->flags = flags;
+
+ writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
+
+ hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
+ hif->txavail--;
+
+ if ((!((flags & HIF_DATA_VALID) && (flags &
+ HIF_LAST_BUFFER))))
+ goto skip_tx;
+
+ /*
+ * Ensure everything else is written to DDR before
+ * writing bd->ctrl
+ */
+ rte_wmb();
+
+ do {
+ desc_sw = &hif->tx_sw_queue[hif->txtoflush];
+ desc = hif->tx_base + hif->txtoflush;
+
+ if (desc_sw->flags & HIF_LAST_BUFFER) {
+ writel((BD_CTRL_LIFM |
+ BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
+ | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
+ BD_BUF_LEN(desc_sw->len)),
+ &desc->ctrl);
+ } else {
+ writel((BD_CTRL_DESC_EN |
+ BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
+ }
+ hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
+ }
+ while (hif->txtoflush != hif->txtosend)
+ ;
+
+skip_tx:
+ return;
+}
+
+void
+hif_process_client_req(struct pfe_hif *hif, int req,
+ int data1, __rte_unused int data2)
+{
+ unsigned int client_id = data1;
+
+ if (client_id >= HIF_CLIENTS_MAX) {
+ PFE_PMD_ERR("client id %d out of bounds", client_id);
+ return;
+ }
+
+ switch (req) {
+ case REQUEST_CL_REGISTER:
+ /* Request for register a client */
+ PFE_PMD_INFO("register client_id %d", client_id);
+ pfe_hif_client_register(hif, client_id, (struct
+ hif_client_shm *)&hif->shm->client[client_id]);
+ break;
+
+ case REQUEST_CL_UNREGISTER:
+ PFE_PMD_INFO("unregister client_id %d", client_id);
+
+ /* Request for unregister a client */
+ pfe_hif_client_unregister(hif, client_id);
+
+ break;
+
+ default:
+ PFE_PMD_ERR("unsupported request %d", req);
+ break;
+ }
+
+ /*
+ * Process client Tx queues
+ * Currently we don't have checking for tx pending
+ */
+}
+
+#if defined(LS1012A_PFE_RESET_WA)
+static void
+pfe_hif_disable_rx_desc(struct pfe_hif *hif)
+{
+ u32 ii;
+ struct hif_desc *desc = hif->rx_base;
+
+ /*Mark all descriptors as LAST_BD */
+ for (ii = 0; ii < hif->rx_ring_size; ii++) {
+ desc->ctrl |= BD_CTRL_LAST_BD;
+ desc++;
+ }
+}
+
+struct class_rx_hdr_t {
+ u32 next_ptr; /* ptr to the start of the first DDR buffer */
+ u16 length; /* total packet length */
+ u16 phyno; /* input physical port number */
+ u32 status; /* gemac status bits */
+ u32 status2; /* reserved for software usage */
+};
+
+/* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
+ * except overflow
+ */
+#define STATUS_BAD_FRAME_ERR BIT(16)
+#define STATUS_LENGTH_ERR BIT(17)
+#define STATUS_CRC_ERR BIT(18)
+#define STATUS_TOO_SHORT_ERR BIT(19)
+#define STATUS_TOO_LONG_ERR BIT(20)
+#define STATUS_CODE_ERR BIT(21)
+#define STATUS_MC_HASH_MATCH BIT(22)
+#define STATUS_CUMULATIVE_ARC_HIT BIT(23)
+#define STATUS_UNICAST_HASH_MATCH BIT(24)
+#define STATUS_IP_CHECKSUM_CORRECT BIT(25)
+#define STATUS_TCP_CHECKSUM_CORRECT BIT(26)
+#define STATUS_UDP_CHECKSUM_CORRECT BIT(27)
+#define STATUS_OVERFLOW_ERR BIT(28) /* GPI error */
+#define MIN_PKT_SIZE 64
+#define DUMMY_PKT_COUNT 128
+
+static inline void
+copy_to_lmem(u32 *dst, u32 *src, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i += sizeof(u32)) {
+ *dst = htonl(*src);
+ dst++; src++;
+ }
+}
+#if defined(RTE_TOOLCHAIN_GCC)
+__attribute__ ((optimize(1)))
+#endif
+static void
+send_dummy_pkt_to_hif(void)
+{
+ void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
+ u64 physaddr;
+ struct class_rx_hdr_t local_hdr;
+ static u32 dummy_pkt[] = {
+ 0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
+ 0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
+ 0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
+
+ ddr_ptr = (void *)(size_t)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL);
+ if (!ddr_ptr)
+ return;
+
+ lmem_ptr = (void *)(size_t)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL);
+ if (!lmem_ptr)
+ return;
+
+ PFE_PMD_INFO("Sending a dummy pkt to HIF %p %p", ddr_ptr, lmem_ptr);
+ physaddr = DDR_VIRT_TO_PFE(ddr_ptr);
+
+ lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long)lmem_ptr);
+
+ local_hdr.phyno = htons(0); /* RX_PHY_0 */
+ local_hdr.length = htons(MIN_PKT_SIZE);
+
+ local_hdr.next_ptr = htonl((u32)physaddr);
+ /*Mark checksum is correct */
+ local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
+ STATUS_UDP_CHECKSUM_CORRECT |
+ STATUS_TCP_CHECKSUM_CORRECT |
+ STATUS_UNICAST_HASH_MATCH |
+ STATUS_CUMULATIVE_ARC_HIT));
+ copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
+ sizeof(local_hdr));
+
+ copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
+ 0x40);
+
+ writel((unsigned long)lmem_ptr, CLASS_INQ_PKTPTR);
+}
+
+void
+pfe_hif_rx_idle(struct pfe_hif *hif)
+{
+ int hif_stop_loop = DUMMY_PKT_COUNT;
+ u32 rx_status;
+
+ pfe_hif_disable_rx_desc(hif);
+ PFE_PMD_INFO("Bringing hif to idle state...");
+ writel(0, HIF_INT_ENABLE);
+ /*If HIF Rx BDP is busy send a dummy packet */
+ do {
+ rx_status = readl(HIF_RX_STATUS);
+ if (rx_status & BDP_CSR_RX_DMA_ACTV)
+ send_dummy_pkt_to_hif();
+
+ sleep(1);
+ } while (--hif_stop_loop);
+
+ if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
+ PFE_PMD_ERR("Failed\n");
+ else
+ PFE_PMD_INFO("Done\n");
+}
+#endif
+
+/*
+ * pfe_hif_init
+ * This function initializes the baseaddresses and irq, etc.
+ */
+int
+pfe_hif_init(struct pfe *pfe)
+{
+ struct pfe_hif *hif = &pfe->hif;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+#if defined(LS1012A_PFE_RESET_WA)
+ pfe_hif_rx_idle(hif);
+#endif
+
+ err = pfe_hif_alloc_descr(hif);
+ if (err)
+ goto err0;
+
+ rte_spinlock_init(&hif->tx_lock);
+ rte_spinlock_init(&hif->lock);
+
+ gpi_enable(HGPI_BASE_ADDR);
+ if (getenv("PFE_INTR_SUPPORT")) {
+ struct epoll_event epoll_ev;
+ int event_fd = -1, epoll_fd, pfe_cdev_fd;
+
+ pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDWR);
+ if (pfe_cdev_fd < 0) {
+ PFE_PMD_WARN("Unable to open PFE device file (%s).\n",
+ PFE_CDEV_PATH);
+ pfe->cdev_fd = PFE_CDEV_INVALID_FD;
+ return -1;
+ }
+ pfe->cdev_fd = pfe_cdev_fd;
+
+ event_fd = eventfd(0, EFD_NONBLOCK);
+ /* hif interrupt enable */
+ err = ioctl(pfe->cdev_fd, PFE_CDEV_HIF_INTR_EN, &event_fd);
+ if (err) {
+ PFE_PMD_ERR("\nioctl failed for intr enable err: %d\n",
+ errno);
+ goto err0;
+ }
+ epoll_fd = epoll_create(1);
+ epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET;
+ epoll_ev.data.fd = event_fd;
+ err = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event_fd, &epoll_ev);
+ if (err < 0) {
+ PFE_PMD_ERR("epoll_ctl failed with err = %d\n", errno);
+ goto err0;
+ }
+ pfe->hif.epoll_fd = epoll_fd;
+ }
+ return 0;
+err0:
+ return err;
+}
+
+/* pfe_hif_exit- */
+void
+pfe_hif_exit(struct pfe *pfe)
+{
+ struct pfe_hif *hif = &pfe->hif;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_spinlock_lock(&hif->lock);
+ hif->shm->g_client_status[0] = 0;
+ /* Make sure all clients are disabled*/
+ hif->shm->g_client_status[1] = 0;
+
+ rte_spinlock_unlock(&hif->lock);
+
+ if (hif->setuped) {
+#if defined(LS1012A_PFE_RESET_WA)
+ pfe_hif_rx_idle(hif);
+#endif
+ /*Disable Rx/Tx */
+ hif_rx_disable();
+ hif_tx_disable();
+
+ pfe_hif_release_buffers(hif);
+ pfe_hif_shm_clean(hif->shm);
+
+ pfe_hif_free_descr(hif);
+ pfe->hif.setuped = 0;
+ }
+ gpi_disable(HGPI_BASE_ADDR);
+}
diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_hif.h b/src/spdk/dpdk/drivers/net/pfe/pfe_hif.h
new file mode 100644
index 000000000..6aaf904bb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/pfe_hif.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef _PFE_HIF_H_
+#define _PFE_HIF_H_
+
+#define HIF_CLIENT_QUEUES_MAX 16
+#define HIF_RX_PKT_MIN_SIZE RTE_CACHE_LINE_SIZE
+/*
+ * HIF_TX_DESC_NT value should be always greter than 4,
+ * Otherwise HIF_TX_POLL_MARK will become zero.
+ */
+#define HIF_RX_DESC_NT 64
+#define HIF_TX_DESC_NT 2048
+
+#define HIF_FIRST_BUFFER BIT(0)
+#define HIF_LAST_BUFFER BIT(1)
+#define HIF_DONT_DMA_MAP BIT(2)
+#define HIF_DATA_VALID BIT(3)
+#define HIF_TSO BIT(4)
+
+enum {
+ PFE_CL_GEM0 = 0,
+ PFE_CL_GEM1,
+ HIF_CLIENTS_MAX
+};
+
+/*structure to store client queue info */
+struct hif_rx_queue {
+ struct rx_queue_desc *base;
+ u32 size;
+ u32 write_idx;
+};
+
+struct hif_tx_queue {
+ struct tx_queue_desc *base;
+ u32 size;
+ u32 ack_idx;
+};
+
+/*Structure to store the client info */
+struct hif_client {
+ unsigned int rx_qn;
+ struct hif_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
+ unsigned int tx_qn;
+ struct hif_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
+};
+
+/*HIF hardware buffer descriptor */
+struct hif_desc {
+ u32 ctrl;
+ u32 status;
+ u32 data;
+ u32 next;
+};
+
+struct __hif_desc {
+ u32 ctrl;
+ u32 status;
+ u32 data;
+};
+
+struct hif_desc_sw {
+ dma_addr_t data;
+ u16 len;
+ u8 client_id;
+ u8 q_no;
+ u16 flags;
+};
+
+struct hif_hdr {
+ u8 client_id;
+ u8 q_num;
+ u16 client_ctrl;
+ u16 client_ctrl1;
+};
+
+struct __hif_hdr {
+ union {
+ struct hif_hdr hdr;
+ u32 word[2];
+ };
+};
+
+struct hif_ipsec_hdr {
+ u16 sa_handle[2];
+} __packed;
+
+struct pfe_parse {
+ unsigned int packet_type;
+ uint16_t hash;
+ uint16_t parse_incomplete;
+ unsigned long long ol_flags;
+};
+
+/* HIF_CTRL_TX... defines */
+#define HIF_CTRL_TX_CHECKSUM BIT(2)
+
+/* HIF_CTRL_RX... defines */
+#define HIF_CTRL_RX_OFFSET_OFST (24)
+#define HIF_CTRL_RX_CHECKSUMMED BIT(2)
+#define HIF_CTRL_RX_CONTINUED BIT(1)
+
+struct pfe_hif {
+ /* To store registered clients in hif layer */
+ struct hif_client client[HIF_CLIENTS_MAX];
+ struct hif_shm *shm;
+
+ void *descr_baseaddr_v;
+ unsigned long descr_baseaddr_p;
+
+ struct hif_desc *rx_base;
+ u32 rx_ring_size;
+ u32 rxtoclean_index;
+ void *rx_buf_addr[HIF_RX_DESC_NT];
+ void *rx_buf_vaddr[HIF_RX_DESC_NT];
+ int rx_buf_len[HIF_RX_DESC_NT];
+ unsigned int qno;
+ unsigned int client_id;
+ unsigned int client_ctrl;
+ unsigned int started;
+ unsigned int setuped;
+
+ struct hif_desc *tx_base;
+ u32 tx_ring_size;
+ u32 txtosend;
+ u32 txtoclean;
+ u32 txavail;
+ u32 txtoflush;
+ struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT];
+ int32_t epoll_fd; /**< File descriptor created for interrupt polling */
+
+/* tx_lock synchronizes hif packet tx as well as pfe_hif structure access */
+ rte_spinlock_t tx_lock;
+/* lock synchronizes hif rx queue processing */
+ rte_spinlock_t lock;
+ struct rte_device *dev;
+};
+
+void hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
+ q_no, void *data, u32 len, unsigned int flags);
+void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
+ data2);
+int pfe_hif_init(struct pfe *pfe);
+void pfe_hif_exit(struct pfe *pfe);
+void pfe_hif_rx_idle(struct pfe_hif *hif);
+int pfe_hif_rx_process(struct pfe *pfe, int budget);
+int pfe_hif_init_buffers(struct pfe_hif *hif);
+void pfe_tx_do_cleanup(struct pfe *pfe);
+
+#define __memcpy8(dst, src) memcpy(dst, src, 8)
+#define __memcpy12(dst, src) memcpy(dst, src, 12)
+#define __memcpy(dst, src, len) memcpy(dst, src, len)
+
+#endif /* _PFE_HIF_H_ */
diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.c b/src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.c
new file mode 100644
index 000000000..799050dce
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.c
@@ -0,0 +1,576 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#include "pfe_logs.h"
+#include "pfe_mod.h"
+
+unsigned int emac_txq_cnt;
+
+/*
+ * @pfe_hal_lib.c
+ * Common functions used by HIF client drivers
+ */
+
+/*HIF shared memory Global variable */
+struct hif_shm ghif_shm;
+
+/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
+ * This function should be called after pfe_hif_exit
+ *
+ * @param[in] hif_shm Shared memory address location in DDR
+ */
+void
+pfe_hif_shm_clean(struct hif_shm *hif_shm)
+{
+ unsigned int i;
+ void *pkt;
+
+ for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
+ pkt = hif_shm->rx_buf_pool[i];
+ if (pkt)
+ rte_pktmbuf_free((struct rte_mbuf *)pkt);
+ }
+}
+
+/* Initialize shared memory used between HIF driver and clients,
+ * allocate rx_buffer_pool required for HIF Rx descriptors.
+ * This function should be called before initializing HIF driver.
+ *
+ * @param[in] hif_shm Shared memory address location in DDR
+ * @rerurn 0 - on succes, <0 on fail to initialize
+ */
+int
+pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool)
+{
+ unsigned int i;
+ struct rte_mbuf *mbuf;
+
+ memset(hif_shm, 0, sizeof(struct hif_shm));
+ hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
+
+ for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
+ mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(mb_pool));
+ if (mbuf)
+ hif_shm->rx_buf_pool[i] = mbuf;
+ else
+ goto err0;
+ }
+
+ return 0;
+
+err0:
+ PFE_PMD_ERR("Low memory");
+ pfe_hif_shm_clean(hif_shm);
+ return -ENOMEM;
+}
+
+/*This function sends indication to HIF driver
+ *
+ * @param[in] hif hif context
+ */
+static void
+hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
+ data2)
+{
+ hif_process_client_req(hif, req, data1, data2);
+}
+
+void
+hif_lib_indicate_client(struct hif_client_s *client, int event_type,
+ int qno)
+{
+ if (!client || event_type >= HIF_EVENT_MAX ||
+ qno >= HIF_CLIENT_QUEUES_MAX)
+ return;
+
+ if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
+ client->event_handler(client->priv, event_type, qno);
+}
+
+/*This function releases Rx queue descriptors memory and pre-filled buffers
+ *
+ * @param[in] client hif_client context
+ */
+static void
+hif_lib_client_release_rx_buffers(struct hif_client_s *client)
+{
+ struct rte_mempool *pool;
+ struct rte_pktmbuf_pool_private *mb_priv;
+ struct rx_queue_desc *desc;
+ unsigned int qno, ii;
+ void *buf;
+
+ pool = client->pfe->hif.shm->pool;
+ mb_priv = rte_mempool_get_priv(pool);
+ for (qno = 0; qno < client->rx_qn; qno++) {
+ desc = client->rx_q[qno].base;
+
+ for (ii = 0; ii < client->rx_q[qno].size; ii++) {
+ buf = (void *)desc->data;
+ if (buf) {
+ /* Data pointor to mbuf pointor calculation:
+ * "Data - User private data - headroom - mbufsize"
+ * Actual data pointor given to HIF BDs was
+ * "mbuf->data_offset - PFE_PKT_HEADER_SZ"
+ */
+ buf = buf + PFE_PKT_HEADER_SZ
+ - sizeof(struct rte_mbuf)
+ - RTE_PKTMBUF_HEADROOM
+ - mb_priv->mbuf_priv_size;
+ rte_pktmbuf_free((struct rte_mbuf *)buf);
+ desc->ctrl = 0;
+ }
+ desc++;
+ }
+ }
+ rte_free(client->rx_qbase);
+}
+
+/*This function allocates memory for the rxq descriptors and pre-fill rx queues
+ * with buffers.
+ * @param[in] client client context
+ * @param[in] q_size size of the rxQ, all queues are of same size
+ */
+static int
+hif_lib_client_init_rx_buffers(struct hif_client_s *client,
+ int q_size)
+{
+ struct rx_queue_desc *desc;
+ struct hif_client_rx_queue *queue;
+ unsigned int ii, qno;
+
+ /*Allocate memory for the client queues */
+ client->rx_qbase = rte_malloc(NULL, client->rx_qn * q_size *
+ sizeof(struct rx_queue_desc), RTE_CACHE_LINE_SIZE);
+ if (!client->rx_qbase)
+ goto err;
+
+ for (qno = 0; qno < client->rx_qn; qno++) {
+ queue = &client->rx_q[qno];
+
+ queue->base = client->rx_qbase + qno * q_size * sizeof(struct
+ rx_queue_desc);
+ queue->size = q_size;
+ queue->read_idx = 0;
+ queue->write_idx = 0;
+ queue->queue_id = 0;
+ queue->port_id = client->port_id;
+ queue->priv = client->priv;
+ PFE_PMD_DEBUG("rx queue: %d, base: %p, size: %d\n", qno,
+ queue->base, queue->size);
+ }
+
+ for (qno = 0; qno < client->rx_qn; qno++) {
+ queue = &client->rx_q[qno];
+ desc = queue->base;
+
+ for (ii = 0; ii < queue->size; ii++) {
+ desc->ctrl = CL_DESC_OWN;
+ desc++;
+ }
+ }
+
+ return 0;
+
+err:
+ return 1;
+}
+
+
+static void
+hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
+{
+ /*
+ * Check if there are any pending packets. Client must flush the tx
+ * queues before unregistering, by calling by calling
+ * hif_lib_tx_get_next_complete()
+ *
+ * Hif no longer calls since we are no longer registered
+ */
+ if (queue->tx_pending)
+ PFE_PMD_ERR("pending transmit packet");
+}
+
+static void
+hif_lib_client_release_tx_buffers(struct hif_client_s *client)
+{
+ unsigned int qno;
+
+ for (qno = 0; qno < client->tx_qn; qno++)
+ hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
+
+ rte_free(client->tx_qbase);
+}
+
+static int
+hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
+ q_size)
+{
+ struct hif_client_tx_queue *queue;
+ unsigned int qno;
+
+ client->tx_qbase = rte_malloc(NULL, client->tx_qn * q_size *
+ sizeof(struct tx_queue_desc), RTE_CACHE_LINE_SIZE);
+ if (!client->tx_qbase)
+ return 1;
+
+ for (qno = 0; qno < client->tx_qn; qno++) {
+ queue = &client->tx_q[qno];
+
+ queue->base = client->tx_qbase + qno * q_size * sizeof(struct
+ tx_queue_desc);
+ queue->size = q_size;
+ queue->read_idx = 0;
+ queue->write_idx = 0;
+ queue->tx_pending = 0;
+ queue->nocpy_flag = 0;
+ queue->prev_tmu_tx_pkts = 0;
+ queue->done_tmu_tx_pkts = 0;
+ queue->priv = client->priv;
+ queue->queue_id = 0;
+ queue->port_id = client->port_id;
+
+ PFE_PMD_DEBUG("tx queue: %d, base: %p, size: %d", qno,
+ queue->base, queue->size);
+ }
+
+ return 0;
+}
+
+static int
+hif_lib_event_dummy(__rte_unused void *priv,
+ __rte_unused int event_type, __rte_unused int qno)
+{
+ return 0;
+}
+
+int
+hif_lib_client_register(struct hif_client_s *client)
+{
+ struct hif_shm *hif_shm;
+ struct hif_client_shm *client_shm;
+ int err, i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /*Allocate memory before spin_lock*/
+ if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
+ err = -ENOMEM;
+ goto err_rx;
+ }
+
+ if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
+ err = -ENOMEM;
+ goto err_tx;
+ }
+
+ rte_spinlock_lock(&client->pfe->hif.lock);
+ if (!(client->pfe) || client->id >= HIF_CLIENTS_MAX ||
+ client->pfe->hif_client[client->id]) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ hif_shm = client->pfe->hif.shm;
+
+ if (!client->event_handler)
+ client->event_handler = hif_lib_event_dummy;
+
+ /*Initialize client specific shared memory */
+ client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
+ client_shm->rx_qbase = (unsigned long)client->rx_qbase;
+ client_shm->rx_qsize = client->rx_qsize;
+ client_shm->tx_qbase = (unsigned long)client->tx_qbase;
+ client_shm->tx_qsize = client->tx_qsize;
+ client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
+ (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
+
+ for (i = 0; i < HIF_EVENT_MAX; i++) {
+ client->queue_mask[i] = 0; /*
+ * By default all events are
+ * unmasked
+ */
+ }
+
+ /*Indicate to HIF driver*/
+ hif_lib_indicate_hif(&client->pfe->hif, REQUEST_CL_REGISTER,
+ client->id, 0);
+
+ PFE_PMD_DEBUG("client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d",
+ client, client->id, client->tx_qsize, client->rx_qsize);
+
+ client->cpu_id = -1;
+
+ client->pfe->hif_client[client->id] = client;
+ rte_spinlock_unlock(&client->pfe->hif.lock);
+
+ return 0;
+
+err:
+ rte_spinlock_unlock(&client->pfe->hif.lock);
+ hif_lib_client_release_tx_buffers(client);
+
+err_tx:
+ hif_lib_client_release_rx_buffers(client);
+
+err_rx:
+ return err;
+}
+
+int
+hif_lib_client_unregister(struct hif_client_s *client)
+{
+ struct pfe *pfe = client->pfe;
+ u32 client_id = client->id;
+
+ PFE_PMD_INFO("client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d",
+ client, client->id, client->tx_qsize, client->rx_qsize);
+
+ rte_spinlock_lock(&pfe->hif.lock);
+ hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
+
+ hif_lib_client_release_tx_buffers(client);
+ hif_lib_client_release_rx_buffers(client);
+ pfe->hif_client[client_id] = NULL;
+ rte_spinlock_unlock(&pfe->hif.lock);
+
+ return 0;
+}
+
+int
+hif_lib_event_handler_start(struct hif_client_s *client, int event,
+ int qno)
+{
+ struct hif_client_rx_queue *queue = &client->rx_q[qno];
+ struct rx_queue_desc *desc = queue->base + queue->read_idx;
+
+ if (event >= HIF_EVENT_MAX || qno >= HIF_CLIENT_QUEUES_MAX) {
+ PFE_PMD_WARN("Unsupported event : %d queue number : %d",
+ event, qno);
+ return -1;
+ }
+
+ test_and_clear_bit(qno, &client->queue_mask[event]);
+
+ switch (event) {
+ case EVENT_RX_PKT_IND:
+ if (!(desc->ctrl & CL_DESC_OWN))
+ hif_lib_indicate_client(client,
+ EVENT_RX_PKT_IND, qno);
+ break;
+
+ case EVENT_HIGH_RX_WM:
+ case EVENT_TXDONE_IND:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef RTE_LIBRTE_PFE_SW_PARSE
+static inline void
+pfe_sw_parse_pkt(struct rte_mbuf *mbuf)
+{
+ struct rte_net_hdr_lens hdr_lens;
+
+ mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
+ RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
+ | RTE_PTYPE_L4_MASK);
+ mbuf->l2_len = hdr_lens.l2_len;
+ mbuf->l3_len = hdr_lens.l3_len;
+}
+#endif
+
+/*
+ * This function gets one packet from the specified client queue
+ * It also refill the rx buffer
+ */
+int
+hif_lib_receive_pkt(struct hif_client_rx_queue *queue,
+ struct rte_mempool *pool, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rx_queue_desc *desc;
+ struct pfe_eth_priv_s *priv = queue->priv;
+ struct rte_pktmbuf_pool_private *mb_priv;
+ struct rte_mbuf *mbuf, *p_mbuf = NULL, *first_mbuf = NULL;
+ struct rte_eth_stats *stats = &priv->stats;
+ int i, wait_for_last = 0;
+#ifndef RTE_LIBRTE_PFE_SW_PARSE
+ struct pfe_parse *parse_res;
+#endif
+
+ for (i = 0; i < nb_pkts;) {
+ do {
+ desc = queue->base + queue->read_idx;
+ if ((desc->ctrl & CL_DESC_OWN)) {
+ stats->ipackets += i;
+ return i;
+ }
+
+ mb_priv = rte_mempool_get_priv(pool);
+
+ mbuf = desc->data + PFE_PKT_HEADER_SZ
+ - sizeof(struct rte_mbuf)
+ - RTE_PKTMBUF_HEADROOM
+ - mb_priv->mbuf_priv_size;
+ mbuf->next = NULL;
+ if (desc->ctrl & CL_DESC_FIRST) {
+ /* TODO size of priv data if present in
+ * descriptor
+ */
+ u16 size = 0;
+ mbuf->pkt_len = CL_DESC_BUF_LEN(desc->ctrl)
+ - PFE_PKT_HEADER_SZ - size;
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = queue->port_id;
+#ifdef RTE_LIBRTE_PFE_SW_PARSE
+ pfe_sw_parse_pkt(mbuf);
+#else
+ parse_res = (struct pfe_parse *)(desc->data +
+ PFE_HIF_SIZE);
+ mbuf->packet_type = parse_res->packet_type;
+#endif
+ mbuf->nb_segs = 1;
+ first_mbuf = mbuf;
+ rx_pkts[i++] = first_mbuf;
+ } else {
+ mbuf->data_len = CL_DESC_BUF_LEN(desc->ctrl);
+ mbuf->data_off = mbuf->data_off -
+ PFE_PKT_HEADER_SZ;
+ first_mbuf->pkt_len += mbuf->data_len;
+ first_mbuf->nb_segs++;
+ p_mbuf->next = mbuf;
+ }
+ stats->ibytes += mbuf->data_len;
+ p_mbuf = mbuf;
+
+ if (desc->ctrl & CL_DESC_LAST)
+ wait_for_last = 0;
+ else
+ wait_for_last = 1;
+ /*
+ * Needed so we don't free a buffer/page
+ * twice on module_exit
+ */
+ desc->data = NULL;
+
+ /*
+ * Ensure everything else is written to DDR before
+ * writing bd->ctrl
+ */
+ rte_wmb();
+
+ desc->ctrl = CL_DESC_OWN;
+ queue->read_idx = (queue->read_idx + 1) &
+ (queue->size - 1);
+ } while (wait_for_last);
+ }
+ stats->ipackets += i;
+ return i;
+}
+
+static inline void
+hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
+ client_id, unsigned int qno,
+ u32 client_ctrl)
+{
+ /* Optimize the write since the destinaton may be non-cacheable */
+ if (!((unsigned long)pkt_hdr & 0x3)) {
+ ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
+ client_id;
+ } else {
+ ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
+ ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
+ }
+}
+
+/*This function puts the given packet in the specific client queue */
+void
+hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno,
+ void *data, void *data1, unsigned int len,
+ u32 client_ctrl, unsigned int flags, void *client_data)
+{
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
+ struct tx_queue_desc *desc = queue->base + queue->write_idx;
+
+ /* First buffer */
+ if (flags & HIF_FIRST_BUFFER) {
+ data1 -= PFE_HIF_SIZE;
+ data -= PFE_HIF_SIZE;
+ len += PFE_HIF_SIZE;
+
+ hif_hdr_write(data1, client->id, qno, client_ctrl);
+ }
+
+ desc->data = client_data;
+ desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
+
+ hif_xmit_pkt(&client->pfe->hif, client->id, qno, data, len, flags);
+
+ queue->write_idx = (queue->write_idx + 1) & (queue->size - 1);
+
+ queue->tx_pending++;
+}
+
+void *
+hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
+ unsigned int *flags, __rte_unused int count)
+{
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
+ struct tx_queue_desc *desc = queue->base + queue->read_idx;
+
+ PFE_DP_LOG(DEBUG, "qno : %d rd_indx: %d pending:%d",
+ qno, queue->read_idx, queue->tx_pending);
+
+ if (!queue->tx_pending)
+ return NULL;
+
+ if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
+ u32 tmu_tx_pkts = 0;
+
+ if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
+ queue->done_tmu_tx_pkts = UINT_MAX -
+ queue->prev_tmu_tx_pkts + tmu_tx_pkts;
+ else
+ queue->done_tmu_tx_pkts = tmu_tx_pkts -
+ queue->prev_tmu_tx_pkts;
+
+ queue->prev_tmu_tx_pkts = tmu_tx_pkts;
+
+ if (!queue->done_tmu_tx_pkts)
+ return NULL;
+ }
+
+ if (desc->ctrl & CL_DESC_OWN)
+ return NULL;
+
+ queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
+ queue->tx_pending--;
+
+ *flags = CL_DESC_GET_FLAGS(desc->ctrl);
+
+ if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
+ queue->done_tmu_tx_pkts--;
+
+ return desc->data;
+}
+
+int
+pfe_hif_lib_init(struct pfe *pfe)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ emac_txq_cnt = EMAC_TXQ_CNT;
+ pfe->hif.shm = &ghif_shm;
+
+ return 0;
+}
+
+void
+pfe_hif_lib_exit(__rte_unused struct pfe *pfe)
+{
+ PMD_INIT_FUNC_TRACE();
+}
diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.h b/src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.h
new file mode 100644
index 000000000..d7c060694
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef _PFE_HIF_LIB_H_
+#define _PFE_HIF_LIB_H_
+
+#include "pfe_hif.h"
+
+#define HIF_CL_REQ_TIMEOUT 10
+#define GFP_DMA_PFE 0
+
+enum {
+ REQUEST_CL_REGISTER = 0,
+ REQUEST_CL_UNREGISTER,
+ HIF_REQUEST_MAX
+};
+
+enum {
+ /* Event to indicate that client rx queue is reached water mark level */
+ EVENT_HIGH_RX_WM = 0,
+ /* Event to indicate that, packet received for client */
+ EVENT_RX_PKT_IND,
+ /* Event to indicate that, packet tx done for client */
+ EVENT_TXDONE_IND,
+ HIF_EVENT_MAX
+};
+
+/*structure to store client queue info */
+
+/*structure to store client queue info */
+struct hif_client_rx_queue {
+ struct rx_queue_desc *base;
+ u32 size;
+ u32 read_idx;
+ u32 write_idx;
+ u16 queue_id;
+ u16 port_id;
+ void *priv;
+};
+
+struct hif_client_tx_queue {
+ struct tx_queue_desc *base;
+ u32 size;
+ u32 read_idx;
+ u32 write_idx;
+ u32 tx_pending;
+ unsigned long jiffies_last_packet;
+ u32 nocpy_flag;
+ u32 prev_tmu_tx_pkts;
+ u32 done_tmu_tx_pkts;
+ u16 queue_id;
+ u16 port_id;
+ void *priv;
+};
+
+struct hif_client_s {
+ int id;
+ unsigned int tx_qn;
+ unsigned int rx_qn;
+ void *rx_qbase;
+ void *tx_qbase;
+ int tx_qsize;
+ int rx_qsize;
+ int cpu_id;
+ int port_id;
+ struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
+ struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
+ int (*event_handler)(void *data, int event, int qno);
+ unsigned long queue_mask[HIF_EVENT_MAX];
+ struct pfe *pfe;
+ void *priv;
+};
+
+/*
+ * Client specific shared memory
+ * It contains number of Rx/Tx queues, base addresses and queue sizes
+ */
+struct hif_client_shm {
+ u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */
+ unsigned long rx_qbase; /*Rx queue base address */
+ u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */
+ unsigned long tx_qbase; /* Tx queue base address */
+ u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */
+};
+
+/*Client shared memory ctrl bit description */
+#define CLIENT_CTRL_RX_Q_CNT_OFST 0
+#define CLIENT_CTRL_TX_Q_CNT_OFST 8
+#define CLIENT_CTRL_RX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) \
+ & 0xFF)
+#define CLIENT_CTRL_TX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) \
+ & 0xFF)
+
+/*
+ * Shared memory used to communicate between HIF driver and host/client drivers
+ * Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be
+ * initialized with host buffers and buffers count in the pool.
+ * rx_buf_pool_cnt should be >= HIF_RX_DESC_NT.
+ *
+ */
+struct hif_shm {
+ u32 rx_buf_pool_cnt; /*Number of rx buffers available*/
+ /*Rx buffers required to initialize HIF rx descriptors */
+ struct rte_mempool *pool;
+ void *rx_buf_pool[HIF_RX_DESC_NT];
+ unsigned long g_client_status[2]; /*Global client status bit mask */
+ /* Client specific shared memory */
+ struct hif_client_shm client[HIF_CLIENTS_MAX];
+};
+
+#define CL_DESC_OWN BIT(31)
+/* This sets owner ship to HIF driver */
+#define CL_DESC_LAST BIT(30)
+/* This indicates last packet for multi buffers handling */
+#define CL_DESC_FIRST BIT(29)
+/* This indicates first packet for multi buffers handling */
+
+#define CL_DESC_BUF_LEN(x) ((x) & 0xFFFF)
+#define CL_DESC_FLAGS(x) (((x) & 0xF) << 16)
+#define CL_DESC_GET_FLAGS(x) (((x) >> 16) & 0xF)
+
+struct rx_queue_desc {
+ void *data;
+ u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
+ u32 client_ctrl;
+};
+
+struct tx_queue_desc {
+ void *data;
+ u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
+};
+
+/* HIF Rx is not working properly for 2-byte aligned buffers and
+ * ip_header should be 4byte aligned for better iperformance.
+ * "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned.
+ * In case HW parse support:
+ * "ip_header = 64 + 6(hif_header) + 16 (parse) + 14 (MAC Header)" will be
+ * 4byte aligned.
+ */
+#define PFE_HIF_SIZE sizeof(struct hif_hdr)
+
+#ifdef RTE_LIBRTE_PFE_SW_PARSE
+#define PFE_PKT_HEADER_SZ PFE_HIF_SIZE
+#else
+#define PFE_PKT_HEADER_SZ (PFE_HIF_SIZE + sizeof(struct pfe_parse))
+#endif
+
+#define MAX_L2_HDR_SIZE 14 /* Not correct for VLAN/PPPoE */
+#define MAX_L3_HDR_SIZE 20 /* Not correct for IPv6 */
+#define MAX_L4_HDR_SIZE 60 /* TCP with maximum options */
+#define MAX_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE \
+ + MAX_L4_HDR_SIZE)
+/* Used in page mode to clamp packet size to the maximum supported by the hif
+ *hw interface (<16KiB)
+ */
+#define MAX_PFE_PKT_SIZE 16380UL
+
+extern unsigned int emac_txq_cnt;
+
+int pfe_hif_lib_init(struct pfe *pfe);
+void pfe_hif_lib_exit(struct pfe *pfe);
+int hif_lib_client_register(struct hif_client_s *client);
+int hif_lib_client_unregister(struct hif_client_s *client);
+void hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno,
+ void *data, void *data1, unsigned int len,
+ u32 client_ctrl, unsigned int flags, void *client_data);
+void hif_lib_indicate_client(struct hif_client_s *client, int event, int data);
+int hif_lib_event_handler_start(struct hif_client_s *client, int event, int
+ data);
+void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
+ unsigned int *flags, int count);
+int pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool);
+void pfe_hif_shm_clean(struct hif_shm *hif_shm);
+
+int hif_lib_receive_pkt(struct hif_client_rx_queue *queue,
+ struct rte_mempool *pool,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+#endif /* _PFE_HIF_LIB_H_ */
diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_logs.h b/src/spdk/dpdk/drivers/net/pfe/pfe_logs.h
new file mode 100644
index 000000000..58d5e8e7c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/pfe_logs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef _PFE_LOGS_H_
+#define _PFE_LOGS_H_
+
+extern int pfe_logtype_pmd;
+
+/* PMD related logs */
+#define PFE_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, pfe_logtype_pmd, "pfe_net: %s()" \
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() PFE_PMD_LOG(DEBUG, " >>")
+
+#define PFE_PMD_DEBUG(fmt, args...) \
+ PFE_PMD_LOG(DEBUG, fmt, ## args)
+#define PFE_PMD_ERR(fmt, args...) \
+ PFE_PMD_LOG(ERR, fmt, ## args)
+#define PFE_PMD_INFO(fmt, args...) \
+ PFE_PMD_LOG(INFO, fmt, ## args)
+
+#define PFE_PMD_WARN(fmt, args...) \
+ PFE_PMD_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define PFE_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#endif /* _PFE_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_mod.h b/src/spdk/dpdk/drivers/net/pfe/pfe_mod.h
new file mode 100644
index 000000000..88d3d6ffc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/pfe_mod.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef _PFE_MOD_H_
+#define _PFE_MOD_H_
+
+struct pfe;
+
+#include <rte_ethdev.h>
+
+#include "pfe.h"
+#include "pfe_hif.h"
+#include "pfe_hif_lib.h"
+#include "pfe_eth.h"
+
+#define PHYID_MAX_VAL 32
+
+/* PFE DPDK driver supports two interfaces.
+ */
+#define PFE_CDEV_ETH_COUNT 2
+
+/* PFE DPDK driver needs a kernel module named "pfe.ko", This module
+ * is required for PHY initialisation and creates a character device
+ * "pfe_us_cdev" for IOCTL support. PFE DPDK driver uses this character
+ * device for link status.
+ */
+#define PFE_CDEV_PATH "/dev/pfe_us_cdev"
+#define PFE_CDEV_INVALID_FD -1
+#define PFE_NAME_PMD net_pfe
+
+/* used when 'read' call is issued, returning PFE_CDEV_ETH_COUNT number of
+ * pfe_shared_info as array.
+ */
+struct pfe_shared_info {
+ uint32_t phy_id; /* Link phy ID */
+ uint8_t state; /* Has either 0 or 1 */
+};
+
+struct pfe_eth {
+ struct pfe_eth_priv_s *eth_priv[PFE_CDEV_ETH_COUNT];
+};
+
+struct pfe {
+ uint64_t ddr_phys_baseaddr;
+ void *ddr_baseaddr;
+ uint64_t ddr_size;
+ void *cbus_baseaddr;
+ uint64_t cbus_size;
+ struct ls1012a_pfe_platform_data platform_data;
+ struct pfe_hif hif;
+ struct pfe_eth eth;
+ struct hif_client_s *hif_client[HIF_CLIENTS_MAX];
+ int mdio_muxval[PHYID_MAX_VAL];
+ uint8_t nb_devs;
+ uint8_t max_intf;
+ int cdev_fd;
+};
+
+/* IOCTL Commands */
+#define PFE_CDEV_ETH0_STATE_GET _IOR('R', 0, int)
+#define PFE_CDEV_ETH1_STATE_GET _IOR('R', 1, int)
+#define PFE_CDEV_HIF_INTR_EN _IOWR('R', 2, int)
+#endif /* _PFE_MOD_H */
diff --git a/src/spdk/dpdk/drivers/net/pfe/rte_pmd_pfe_version.map b/src/spdk/dpdk/drivers/net/pfe/rte_pmd_pfe_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/pfe/rte_pmd_pfe_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};