summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/net/ipn3ke
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/drivers/net/ipn3ke
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/net/ipn3ke')
-rw-r--r--src/spdk/dpdk/drivers/net/ipn3ke/Makefile38
-rw-r--r--src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c596
-rw-r--r--src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h1078
-rw-r--r--src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c1380
-rw-r--r--src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.h106
-rw-r--r--src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_logs.h30
-rw-r--r--src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_rawdev_api.h74
-rw-r--r--src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c2985
-rw-r--r--src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_tm.c2055
-rw-r--r--src/spdk/dpdk/drivers/net/ipn3ke/meson.build28
-rw-r--r--src/spdk/dpdk/drivers/net/ipn3ke/rte_pmd_ipn3ke_version.map9
11 files changed, 8379 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/Makefile b/src/spdk/dpdk/drivers/net/ipn3ke/Makefile
new file mode 100644
index 000000000..40696dbde
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ipn3ke/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2019 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ipn3ke.a
+
+#
+# Add the experimenatal APIs called from this PMD
+# rte_eth_switch_domain_alloc()
+# rte_eth_dev_create()
+# rte_eth_dev_destroy()
+# rte_eth_switch_domain_free()
+#
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/bus/ifpga
+CFLAGS += -I$(RTE_SDK)/drivers/raw/ifpga
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_ifpga
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lpthread
+
+EXPORT_MAP := rte_pmd_ipn3ke_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_representor.c
+SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_flow.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c
new file mode 100644
index 000000000..5b5510f08
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c
@@ -0,0 +1,596 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <stdint.h>
+
+#include <rte_bus_pci.h>
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+
+#include <rte_mbuf.h>
+#include <rte_sched.h>
+#include <rte_ethdev_driver.h>
+
+#include <rte_io.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+#include <rte_bus_ifpga.h>
+#include <ifpga_common.h>
+#include <ifpga_logs.h>
+#include <ifpga_rawdev.h>
+
+#include "ipn3ke_rawdev_api.h"
+#include "ipn3ke_flow.h"
+#include "ipn3ke_logs.h"
+#include "ipn3ke_ethdev.h"
+
+int ipn3ke_afu_logtype;
+
+static const struct rte_afu_uuid afu_uuid_ipn3ke_map[] = {
+ { MAP_UUID_10G_LOW, MAP_UUID_10G_HIGH },
+ { IPN3KE_UUID_10G_LOW, IPN3KE_UUID_10G_HIGH },
+ { IPN3KE_UUID_VBNG_LOW, IPN3KE_UUID_VBNG_HIGH},
+ { IPN3KE_UUID_25G_LOW, IPN3KE_UUID_25G_HIGH },
+ { 0, 0 /* sentinel */ },
+};
+
+struct ipn3ke_pub_func ipn3ke_bridge_func;
+
+static int
+ipn3ke_indirect_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
+ uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
+{
+ uint32_t i, try_cnt;
+ uint64_t indirect_value;
+ volatile void *indirect_addrs;
+ uint64_t target_addr;
+ uint64_t read_data = 0;
+
+ if (eth_group_sel != 0 && eth_group_sel != 1)
+ return -1;
+
+ target_addr = addr | dev_sel << 17;
+
+ indirect_value = RCMD | target_addr << 32;
+ indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
+
+ rte_delay_us(10);
+
+ rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
+
+ i = 0;
+ try_cnt = 10;
+ indirect_addrs = hw->eth_group_bar[eth_group_sel] +
+ 0x18;
+ do {
+ read_data = rte_read64(indirect_addrs);
+ if ((read_data >> 32) == 1)
+ break;
+ i++;
+ } while (i <= try_cnt);
+ if (i > try_cnt)
+ return -1;
+
+ *rd_data = rte_le_to_cpu_32(read_data);
+ return 0;
+}
+
+static int
+ipn3ke_indirect_write(struct ipn3ke_hw *hw, uint32_t wr_data,
+ uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
+{
+ volatile void *indirect_addrs;
+ uint64_t indirect_value;
+ uint64_t target_addr;
+
+ if (eth_group_sel != 0 && eth_group_sel != 1)
+ return -1;
+
+ target_addr = addr | dev_sel << 17;
+
+ indirect_value = WCMD | target_addr << 32 | wr_data;
+ indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
+
+ rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
+ return 0;
+}
+
+static int
+ipn3ke_indirect_mac_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
+ uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
+{
+ uint32_t dev_sel;
+
+ if (mac_num >= hw->port_num)
+ return -1;
+
+ mac_num &= 0x7;
+ dev_sel = mac_num * 2 + 3;
+
+ return ipn3ke_indirect_read(hw, rd_data, addr, dev_sel, eth_group_sel);
+}
+
+static int
+ipn3ke_indirect_mac_write(struct ipn3ke_hw *hw, uint32_t wr_data,
+ uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
+{
+ uint32_t dev_sel;
+
+ if (mac_num >= hw->port_num)
+ return -1;
+
+ mac_num &= 0x7;
+ dev_sel = mac_num * 2 + 3;
+
+ return ipn3ke_indirect_write(hw, wr_data, addr, dev_sel, eth_group_sel);
+}
+
+static void
+ipn3ke_hw_cap_init(struct ipn3ke_hw *hw)
+{
+ hw->hw_cap.version_number = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0), 0, 0xFFFF);
+ hw->hw_cap.capability_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x8), 0, 0xFFFFFFFF);
+ hw->hw_cap.status_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x10), 0, 0xFFFFFFFF);
+ hw->hw_cap.control_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x18), 0, 0xFFFFFFFF);
+ hw->hw_cap.classify_offset = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x20), 0, 0xFFFFFFFF);
+ hw->hw_cap.classy_size = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x24), 0, 0xFFFF);
+ hw->hw_cap.policer_offset = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x28), 0, 0xFFFFFFFF);
+ hw->hw_cap.policer_entry_size = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x2C), 0, 0xFFFF);
+ hw->hw_cap.rss_key_array_offset = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x30), 0, 0xFFFFFFFF);
+ hw->hw_cap.rss_key_entry_size = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x34), 0, 0xFFFF);
+ hw->hw_cap.rss_indirection_table_array_offset = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x38), 0, 0xFFFFFFFF);
+ hw->hw_cap.rss_indirection_table_entry_size = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x3C), 0, 0xFFFF);
+ hw->hw_cap.dmac_map_offset = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x40), 0, 0xFFFFFFFF);
+ hw->hw_cap.dmac_map_size = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x44), 0, 0xFFFF);
+ hw->hw_cap.qm_offset = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x48), 0, 0xFFFFFFFF);
+ hw->hw_cap.qm_size = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x4C), 0, 0xFFFF);
+ hw->hw_cap.ccb_offset = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x50), 0, 0xFFFFFFFF);
+ hw->hw_cap.ccb_entry_size = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x54), 0, 0xFFFF);
+ hw->hw_cap.qos_offset = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x58), 0, 0xFFFFFFFF);
+ hw->hw_cap.qos_size = IPN3KE_MASK_READ_REG(hw,
+ (IPN3KE_HW_BASE + 0x5C), 0, 0xFFFF);
+
+ hw->hw_cap.num_rx_flow = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
+ 0, 0xFFFF);
+ hw->hw_cap.num_rss_blocks = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
+ 4, 0xFFFF);
+ hw->hw_cap.num_dmac_map = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
+ 8, 0xFFFF);
+ hw->hw_cap.num_tx_flow = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
+ 0xC, 0xFFFF);
+ hw->hw_cap.num_smac_map = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
+ 0x10, 0xFFFF);
+
+ hw->hw_cap.link_speed_mbps = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_STATUS_REGISTERS_BLOCK_OFFSET,
+ 0, 0xFFFFF);
+}
+
+static int
+ipn3ke_vbng_init_done(struct ipn3ke_hw *hw)
+{
+ uint32_t timeout = 10000;
+ while (timeout > 0) {
+ if (IPN3KE_READ_REG(hw, IPN3KE_VBNG_INIT_STS)
+ == IPN3KE_VBNG_INIT_DONE)
+ break;
+ rte_delay_us(1000);
+ timeout--;
+ }
+
+ if (!timeout) {
+ IPN3KE_AFU_PMD_ERR("IPN3KE vBNG INIT timeout.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static uint32_t
+ipn3ke_mtu_cal(uint32_t tx, uint32_t rx)
+{
+ uint32_t tmp;
+ tmp = RTE_MIN(tx, rx);
+ tmp = RTE_MAX(tmp, (uint32_t)RTE_ETHER_MIN_MTU);
+ tmp = RTE_MIN(tmp, (uint32_t)(IPN3KE_MAC_FRAME_SIZE_MAX -
+ IPN3KE_ETH_OVERHEAD));
+ return tmp;
+}
+
+static void
+ipn3ke_mtu_set(struct ipn3ke_hw *hw, uint32_t mac_num,
+ uint32_t eth_group_sel, uint32_t txaddr, uint32_t rxaddr)
+{
+ uint32_t tx;
+ uint32_t rx;
+ uint32_t tmp;
+
+ if (!(*hw->f_mac_read) || !(*hw->f_mac_write))
+ return;
+
+ (*hw->f_mac_read)(hw,
+ &tx,
+ txaddr,
+ mac_num,
+ eth_group_sel);
+
+ (*hw->f_mac_read)(hw,
+ &rx,
+ rxaddr,
+ mac_num,
+ eth_group_sel);
+
+ tmp = ipn3ke_mtu_cal(tx, rx);
+
+ (*hw->f_mac_write)(hw,
+ tmp,
+ txaddr,
+ mac_num,
+ eth_group_sel);
+
+ (*hw->f_mac_write)(hw,
+ tmp,
+ rxaddr,
+ mac_num,
+ eth_group_sel);
+}
+
+static void
+ipn3ke_10G_mtu_setup(struct ipn3ke_hw *hw, uint32_t mac_num,
+ uint32_t eth_group_sel)
+{
+ ipn3ke_mtu_set(hw, mac_num, eth_group_sel,
+ IPN3KE_10G_TX_FRAME_MAXLENGTH, IPN3KE_10G_RX_FRAME_MAXLENGTH);
+}
+
+static void
+ipn3ke_25G_mtu_setup(struct ipn3ke_hw *hw, uint32_t mac_num,
+ uint32_t eth_group_sel)
+{
+ ipn3ke_mtu_set(hw, mac_num, eth_group_sel,
+ IPN3KE_25G_MAX_TX_SIZE_CONFIG, IPN3KE_25G_MAX_RX_SIZE_CONFIG);
+}
+
+static void
+ipn3ke_mtu_setup(struct ipn3ke_hw *hw)
+{
+ int i;
+ if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
+ for (i = 0; i < hw->port_num; i++) {
+ ipn3ke_10G_mtu_setup(hw, i, 0);
+ ipn3ke_10G_mtu_setup(hw, i, 1);
+ }
+ } else if (hw->retimer.mac_type ==
+ IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
+ for (i = 0; i < hw->port_num; i++) {
+ ipn3ke_25G_mtu_setup(hw, i, 0);
+ ipn3ke_25G_mtu_setup(hw, i, 1);
+ }
+ }
+}
+
+static int
+ipn3ke_hw_init(struct rte_afu_device *afu_dev,
+ struct ipn3ke_hw *hw)
+{
+ struct rte_rawdev *rawdev;
+ int ret;
+ int i;
+ uint64_t port_num, mac_type, index;
+
+ rawdev = afu_dev->rawdev;
+
+ hw->afu_id.uuid.uuid_low = afu_dev->id.uuid.uuid_low;
+ hw->afu_id.uuid.uuid_high = afu_dev->id.uuid.uuid_high;
+ hw->afu_id.port = afu_dev->id.port;
+ hw->hw_addr = (uint8_t *)(afu_dev->mem_resource[0].addr);
+ hw->f_mac_read = ipn3ke_indirect_mac_read;
+ hw->f_mac_write = ipn3ke_indirect_mac_write;
+ hw->rawdev = rawdev;
+ rawdev->dev_ops->attr_get(rawdev,
+ "LineSideBARIndex", &index);
+ hw->eth_group_bar[0] = (uint8_t *)(afu_dev->mem_resource[index].addr);
+ rawdev->dev_ops->attr_get(rawdev,
+ "NICSideBARIndex", &index);
+ hw->eth_group_bar[1] = (uint8_t *)(afu_dev->mem_resource[index].addr);
+ rawdev->dev_ops->attr_get(rawdev,
+ "LineSideLinkPortNum", &port_num);
+ hw->retimer.port_num = (int)port_num;
+ hw->port_num = hw->retimer.port_num;
+ rawdev->dev_ops->attr_get(rawdev,
+ "LineSideMACType", &mac_type);
+ hw->retimer.mac_type = (int)mac_type;
+
+ hw->acc_tm = 0;
+ hw->acc_flow = 0;
+
+ if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
+ afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
+ /* After power on, wait until init done */
+ if (ipn3ke_vbng_init_done(hw))
+ return -1;
+
+ ipn3ke_hw_cap_init(hw);
+
+ /* Reset vBNG IP */
+ IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 1);
+ rte_delay_us(10);
+ IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 0);
+
+ /* After reset, wait until init done */
+ if (ipn3ke_vbng_init_done(hw))
+ return -1;
+
+ hw->acc_tm = 1;
+ hw->acc_flow = 1;
+
+ IPN3KE_AFU_PMD_DEBUG("UPL_version is 0x%x\n",
+ IPN3KE_READ_REG(hw, 0));
+ }
+
+ if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
+ /* Enable inter connect channel */
+ for (i = 0; i < hw->port_num; i++) {
+ /* Enable the TX path */
+ ipn3ke_xmac_tx_enable(hw, i, 1);
+
+ /* Disables source address override */
+ ipn3ke_xmac_smac_ovd_dis(hw, i, 1);
+
+ /* Enable the RX path */
+ ipn3ke_xmac_rx_enable(hw, i, 1);
+
+ /* Clear NIC side TX statistics counters */
+ ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 1);
+
+ /* Clear NIC side RX statistics counters */
+ ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 1);
+
+ /* Clear line side TX statistics counters */
+ ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 0);
+
+ /* Clear line RX statistics counters */
+ ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 0);
+ }
+ } else if (hw->retimer.mac_type ==
+ IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
+ /* Enable inter connect channel */
+ for (i = 0; i < hw->port_num; i++) {
+ /* Clear NIC side TX statistics counters */
+ ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 1);
+
+ /* Clear NIC side RX statistics counters */
+ ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 1);
+
+ /* Clear line side TX statistics counters */
+ ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 0);
+
+ /* Clear line side RX statistics counters */
+ ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 0);
+ }
+ }
+
+ /* init mtu */
+ ipn3ke_mtu_setup(hw);
+
+ ret = rte_eth_switch_domain_alloc(&hw->switch_domain_id);
+ if (ret)
+ IPN3KE_AFU_PMD_WARN("failed to allocate switch domain for device %d",
+ ret);
+
+ hw->tm_hw_enable = 0;
+ hw->flow_hw_enable = 0;
+ if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
+ afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
+ ret = ipn3ke_hw_tm_init(hw);
+ if (ret)
+ return ret;
+ hw->tm_hw_enable = 1;
+
+ ret = ipn3ke_flow_init(hw);
+ if (ret)
+ return ret;
+ hw->flow_hw_enable = 1;
+ }
+
+ return 0;
+}
+
+static void
+ipn3ke_hw_uninit(struct ipn3ke_hw *hw)
+{
+ int i;
+
+ if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
+ for (i = 0; i < hw->port_num; i++) {
+ /* Disable the TX path */
+ ipn3ke_xmac_tx_disable(hw, i, 1);
+
+ /* Disable the RX path */
+ ipn3ke_xmac_rx_disable(hw, i, 1);
+
+ /* Clear NIC side TX statistics counters */
+ ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 1);
+
+ /* Clear NIC side RX statistics counters */
+ ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 1);
+
+ /* Clear line side TX statistics counters */
+ ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 0);
+
+ /* Clear line side RX statistics counters */
+ ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 0);
+ }
+ } else if (hw->retimer.mac_type ==
+ IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
+ for (i = 0; i < hw->port_num; i++) {
+ /* Clear NIC side TX statistics counters */
+ ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 1);
+
+ /* Clear NIC side RX statistics counters */
+ ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 1);
+
+ /* Clear line side TX statistics counters */
+ ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 0);
+
+ /* Clear line side RX statistics counters */
+ ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 0);
+ }
+ }
+}
+
+static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev)
+{
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct ipn3ke_hw *hw;
+ struct rte_eth_dev *i40e_eth;
+ struct ifpga_rawdev *ifpga_dev;
+ uint16_t port_id;
+ int i, j, retval;
+ char *fvl_bdf;
+
+ /* check if the AFU device has been probed already */
+ /* allocate shared mcp_vswitch structure */
+ if (!afu_dev->shared.data) {
+ snprintf(name, sizeof(name), "net_%s_hw",
+ afu_dev->device.name);
+ hw = rte_zmalloc_socket(name,
+ sizeof(struct ipn3ke_hw),
+ RTE_CACHE_LINE_SIZE,
+ afu_dev->device.numa_node);
+ if (!hw) {
+ IPN3KE_AFU_PMD_ERR("failed to allocate hardwart data");
+ retval = -ENOMEM;
+ return -ENOMEM;
+ }
+ afu_dev->shared.data = hw;
+
+ rte_spinlock_init(&afu_dev->shared.lock);
+ } else {
+ hw = afu_dev->shared.data;
+ }
+
+ retval = ipn3ke_hw_init(afu_dev, hw);
+ if (retval)
+ return retval;
+
+ if (ipn3ke_bridge_func.get_ifpga_rawdev == NULL)
+ return -ENOMEM;
+ ifpga_dev = ipn3ke_bridge_func.get_ifpga_rawdev(hw->rawdev);
+ if (!ifpga_dev)
+ IPN3KE_AFU_PMD_ERR("failed to find ifpga_device.");
+
+ /* probe representor ports */
+ j = 0;
+ for (i = 0; i < hw->port_num; i++) {
+ struct ipn3ke_rpst rpst = {
+ .port_id = i,
+ .switch_domain_id = hw->switch_domain_id,
+ .hw = hw
+ };
+
+ /* representor port net_bdf_port */
+ snprintf(name, sizeof(name), "net_%s_representor_%d",
+ afu_dev->device.name, i);
+
+ for (; j < 8; j++) {
+ fvl_bdf = ifpga_dev->fvl_bdf[j];
+ retval = rte_eth_dev_get_port_by_name(fvl_bdf,
+ &port_id);
+ if (retval) {
+ continue;
+ } else {
+ i40e_eth = &rte_eth_devices[port_id];
+ rpst.i40e_pf_eth = i40e_eth;
+ rpst.i40e_pf_eth_port_id = port_id;
+
+ j++;
+ break;
+ }
+ }
+
+ retval = rte_eth_dev_create(&afu_dev->device, name,
+ sizeof(struct ipn3ke_rpst), NULL, NULL,
+ ipn3ke_rpst_init, &rpst);
+
+ if (retval)
+ IPN3KE_AFU_PMD_ERR("failed to create ipn3ke representor %s.",
+ name);
+
+ }
+
+ return 0;
+}
+
+static int ipn3ke_vswitch_remove(struct rte_afu_device *afu_dev)
+{
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct ipn3ke_hw *hw;
+ struct rte_eth_dev *ethdev;
+ int i, ret;
+
+ hw = afu_dev->shared.data;
+
+ /* remove representor ports */
+ for (i = 0; i < hw->port_num; i++) {
+ /* representor port net_bdf_port */
+ snprintf(name, sizeof(name), "net_%s_representor_%d",
+ afu_dev->device.name, i);
+
+ ethdev = rte_eth_dev_allocated(afu_dev->device.name);
+ if (!ethdev)
+ return -ENODEV;
+
+ rte_eth_dev_destroy(ethdev, ipn3ke_rpst_uninit);
+ }
+
+ ret = rte_eth_switch_domain_free(hw->switch_domain_id);
+ if (ret)
+ IPN3KE_AFU_PMD_WARN("failed to free switch domain: %d", ret);
+
+ /* hw uninit*/
+ ipn3ke_hw_uninit(hw);
+
+ return 0;
+}
+
+static struct rte_afu_driver afu_ipn3ke_driver = {
+ .id_table = afu_uuid_ipn3ke_map,
+ .probe = ipn3ke_vswitch_probe,
+ .remove = ipn3ke_vswitch_remove,
+};
+
+RTE_PMD_REGISTER_AFU(net_ipn3ke_afu, afu_ipn3ke_driver);
+
+RTE_INIT(ipn3ke_afu_init_log)
+{
+ ipn3ke_afu_logtype = rte_log_register("pmd.afu.ipn3ke");
+ if (ipn3ke_afu_logtype >= 0)
+ rte_log_set_level(ipn3ke_afu_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h
new file mode 100644
index 000000000..9b0cf309c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h
@@ -0,0 +1,1078 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _IPN3KE_ETHDEV_H_
+#define _IPN3KE_ETHDEV_H_
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <limits.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <sys/queue.h>
+
+#include <rte_mbuf.h>
+#include <rte_flow_driver.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_bus_vdev.h>
+#include <rte_kvargs.h>
+#include <rte_spinlock.h>
+
+#include <rte_cycles.h>
+#include <rte_bus_ifpga.h>
+#include <rte_tm_driver.h>
+
+#define IPN3KE_TM_SCRATCH_RW 0
+
+/* TM Levels */
+enum ipn3ke_tm_node_level {
+ IPN3KE_TM_NODE_LEVEL_PORT,
+ IPN3KE_TM_NODE_LEVEL_VT,
+ IPN3KE_TM_NODE_LEVEL_COS,
+ IPN3KE_TM_NODE_LEVEL_MAX,
+};
+
+/* TM Shaper Profile */
+struct ipn3ke_tm_shaper_profile {
+ uint32_t valid;
+ uint32_t m;
+ uint32_t e;
+ uint64_t rate;
+ struct rte_tm_shaper_params params;
+};
+
+TAILQ_HEAD(ipn3ke_tm_shaper_profile_list, ipn3ke_tm_shaper_profile);
+
+
+#define IPN3KE_TDROP_TH1_MASK 0x1ffffff
+#define IPN3KE_TDROP_TH1_SHIFT (25)
+#define IPN3KE_TDROP_TH2_MASK 0x1ffffff
+
+/* TM TDROP Profile */
+struct ipn3ke_tm_tdrop_profile {
+ uint32_t tdrop_profile_id;
+ uint32_t th1;
+ uint32_t th2;
+ uint32_t n_users;
+ uint32_t valid;
+ struct rte_tm_wred_params params;
+};
+
+/* TM node priority */
+enum ipn3ke_tm_node_state {
+ IPN3KE_TM_NODE_STATE_IDLE = 0,
+ IPN3KE_TM_NODE_STATE_CONFIGURED_ADD,
+ IPN3KE_TM_NODE_STATE_CONFIGURED_DEL,
+ IPN3KE_TM_NODE_STATE_COMMITTED,
+ IPN3KE_TM_NODE_STATE_MAX,
+};
+
+TAILQ_HEAD(ipn3ke_tm_node_list, ipn3ke_tm_node);
+
+/* IPN3KE TM Node */
+struct ipn3ke_tm_node {
+ TAILQ_ENTRY(ipn3ke_tm_node) node;
+ uint32_t node_index;
+ uint32_t level;
+ uint32_t tm_id;
+ enum ipn3ke_tm_node_state node_state;
+ uint32_t parent_node_id;
+ uint32_t priority;
+ uint32_t weight;
+ struct ipn3ke_tm_node *parent_node;
+ struct ipn3ke_tm_shaper_profile shaper_profile;
+ struct ipn3ke_tm_tdrop_profile *tdrop_profile;
+ struct rte_tm_node_params params;
+ struct rte_tm_node_stats stats;
+ uint32_t n_children;
+ struct ipn3ke_tm_node_list children_node_list;
+};
+
+/* IPN3KE TM Hierarchy Specification */
+struct ipn3ke_tm_hierarchy {
+ struct ipn3ke_tm_node *port_node;
+ uint32_t n_shaper_profiles;
+ uint32_t n_tdrop_profiles;
+ uint32_t n_vt_nodes;
+ uint32_t n_cos_nodes;
+ struct ipn3ke_tm_node *port_commit_node;
+ struct ipn3ke_tm_node_list vt_commit_node_list;
+ struct ipn3ke_tm_node_list cos_commit_node_list;
+};
+
+struct ipn3ke_tm_internals {
+ /** Hierarchy specification
+ *
+ * -Hierarchy is unfrozen at init and when port is stopped.
+ * -Hierarchy is frozen on successful hierarchy commit.
+ * -Run-time hierarchy changes are not allowed, therefore it makes
+ * sense to keep the hierarchy frozen after the port is started.
+ */
+ struct ipn3ke_tm_hierarchy h;
+ int hierarchy_frozen;
+ int tm_started;
+ uint32_t tm_id;
+};
+
+#define IPN3KE_TM_COS_NODE_NUM (64 * 1024)
+#define IPN3KE_TM_VT_NODE_NUM (IPN3KE_TM_COS_NODE_NUM / 8)
+#define IPN3KE_TM_10G_PORT_NODE_NUM (8)
+#define IPN3KE_TM_25G_PORT_NODE_NUM (4)
+
+#define IPN3KE_TM_NODE_LEVEL_MOD (100000)
+#define IPN3KE_TM_NODE_MOUNT_MAX (8)
+
+#define IPN3KE_TM_TDROP_PROFILE_NUM (2 * 1024)
+
+/* TM node priority */
+enum ipn3ke_tm_node_priority {
+ IPN3KE_TM_NODE_PRIORITY_NORMAL0 = 0,
+ IPN3KE_TM_NODE_PRIORITY_LOW,
+ IPN3KE_TM_NODE_PRIORITY_NORMAL1,
+ IPN3KE_TM_NODE_PRIORITY_HIGHEST,
+};
+
+#define IPN3KE_TM_NODE_WEIGHT_MAX UINT8_MAX
+
+/** Set a bit in the uint32 variable */
+#define IPN3KE_BIT_SET(var, pos) \
+ ((var) |= ((uint32_t)1 << ((pos))))
+
+/** Reset the bit in the variable */
+#define IPN3KE_BIT_RESET(var, pos) \
+ ((var) &= ~((uint32_t)1 << ((pos))))
+
+/** Check the bit is set in the variable */
+#define IPN3KE_BIT_ISSET(var, pos) \
+ (((var) & ((uint32_t)1 << ((pos)))) ? 1 : 0)
+
+struct ipn3ke_hw;
+
+#define IPN3KE_HW_BASE 0x4000000
+
+#define IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET \
+ (IPN3KE_HW_BASE + hw->hw_cap.capability_registers_block_offset)
+
+#define IPN3KE_STATUS_REGISTERS_BLOCK_OFFSET \
+ (IPN3KE_HW_BASE + hw->hw_cap.status_registers_block_offset)
+
+#define IPN3KE_CTRL_RESET \
+ (IPN3KE_HW_BASE + hw->hw_cap.control_registers_block_offset)
+
+#define IPN3KE_CTRL_MTU \
+ (IPN3KE_HW_BASE + hw->hw_cap.control_registers_block_offset + 4)
+
+#define IPN3KE_CLASSIFY_OFFSET \
+ (IPN3KE_HW_BASE + hw->hw_cap.classify_offset)
+
+#define IPN3KE_POLICER_OFFSET \
+ (IPN3KE_HW_BASE + hw->hw_cap.policer_offset)
+
+#define IPN3KE_RSS_KEY_ARRAY_OFFSET \
+ (IPN3KE_HW_BASE + hw->hw_cap.rss_key_array_offset)
+
+#define IPN3KE_RSS_INDIRECTION_TABLE_ARRAY_OFFSET \
+ (IPN3KE_HW_BASE + hw->hw_cap.rss_indirection_table_array_offset)
+
+#define IPN3KE_DMAC_MAP_OFFSET \
+ (IPN3KE_HW_BASE + hw->hw_cap.dmac_map_offset)
+
+#define IPN3KE_QM_OFFSET \
+ (IPN3KE_HW_BASE + hw->hw_cap.qm_offset)
+
+#define IPN3KE_CCB_OFFSET \
+ (IPN3KE_HW_BASE + hw->hw_cap.ccb_offset)
+
+#define IPN3KE_QOS_OFFSET \
+ (IPN3KE_HW_BASE + hw->hw_cap.qos_offset)
+
+struct ipn3ke_hw_cap {
+ uint32_t version_number;
+ uint32_t capability_registers_block_offset;
+ uint32_t status_registers_block_offset;
+ uint32_t control_registers_block_offset;
+ uint32_t classify_offset;
+ uint32_t classy_size;
+ uint32_t policer_offset;
+ uint32_t policer_entry_size;
+ uint32_t rss_key_array_offset;
+ uint32_t rss_key_entry_size;
+ uint32_t rss_indirection_table_array_offset;
+ uint32_t rss_indirection_table_entry_size;
+ uint32_t dmac_map_offset;
+ uint32_t dmac_map_size;
+ uint32_t qm_offset;
+ uint32_t qm_size;
+ uint32_t ccb_offset;
+ uint32_t ccb_entry_size;
+ uint32_t qos_offset;
+ uint32_t qos_size;
+
+ uint32_t num_rx_flow; /* Default: 64K */
+ uint32_t num_rss_blocks; /* Default: 512 */
+ uint32_t num_dmac_map; /* Default: 1K */
+ uint32_t num_tx_flow; /* Default: 64K */
+ uint32_t num_smac_map; /* Default: 1K */
+
+ uint32_t link_speed_mbps;
+};
+
+/**
+ * Strucute to store private data for each representor instance
+ */
+struct ipn3ke_rpst {
+ TAILQ_ENTRY(ipn3ke_rpst) next; /**< Next in device list. */
+ uint16_t switch_domain_id;
+ /**< Switch ID */
+ uint16_t port_id;
+ struct rte_eth_dev *ethdev;
+ /**< Port ID */
+ struct ipn3ke_hw *hw;
+ struct rte_eth_dev *i40e_pf_eth;
+ uint16_t i40e_pf_eth_port_id;
+ struct rte_eth_link ori_linfo;
+ struct ipn3ke_tm_internals tm;
+ /**< Private data store of assocaiated physical function */
+ struct rte_ether_addr mac_addr;
+};
+
+/* UUID IDs */
+#define MAP_UUID_10G_LOW 0xffffffffffffffff
+#define MAP_UUID_10G_HIGH 0xffffffffffffffff
+#define IPN3KE_UUID_10G_LOW 0xc000c9660d824272
+#define IPN3KE_UUID_10G_HIGH 0x9aeffe5f84570612
+#define IPN3KE_UUID_VBNG_LOW 0x8991165349d23ff9
+#define IPN3KE_UUID_VBNG_HIGH 0xb74cf419d15a481f
+#define IPN3KE_UUID_25G_LOW 0xb7d9bac566bfbc80
+#define IPN3KE_UUID_25G_HIGH 0xb07bac1aeef54d67
+
+#define IPN3KE_AFU_BUF_SIZE_MIN 1024
+#define IPN3KE_AFU_FRAME_SIZE_MAX 9728
+
+#define IPN3KE_RAWDEV_ATTR_LEN_MAX (64)
+
+typedef int (*ipn3ke_indirect_mac_read_t)(struct ipn3ke_hw *hw,
+ uint32_t *rd_data, uint32_t addr, uint32_t mac_num,
+ uint32_t eth_wrapper_sel);
+
+typedef int (*ipn3ke_indirect_mac_write_t)(struct ipn3ke_hw *hw,
+ uint32_t wr_data, uint32_t addr, uint32_t mac_num,
+ uint32_t eth_wrapper_sel);
+
+struct ipn3ke_hw {
+ struct rte_eth_dev *eth_dev;
+
+ /* afu info */
+ struct rte_afu_id afu_id;
+ struct rte_rawdev *rawdev;
+
+ struct ipn3ke_hw_cap hw_cap;
+
+ struct ifpga_rawdevg_retimer_info retimer;
+
+ uint16_t switch_domain_id;
+ uint16_t port_num;
+
+ uint32_t tm_hw_enable;
+ uint32_t flow_hw_enable;
+
+ uint32_t acc_tm;
+ uint32_t acc_flow;
+
+ struct ipn3ke_flow_list flow_list;
+ uint32_t flow_max_entries;
+ uint32_t flow_num_entries;
+
+ struct ipn3ke_tm_node *nodes;
+ struct ipn3ke_tm_node *port_nodes;
+ struct ipn3ke_tm_node *vt_nodes;
+ struct ipn3ke_tm_node *cos_nodes;
+
+ struct ipn3ke_tm_tdrop_profile *tdrop_profile;
+ uint32_t tdrop_profile_num;
+
+ uint32_t ccb_status;
+ uint32_t ccb_seg_free;
+ uint32_t ccb_seg_num;
+ uint32_t ccb_seg_k;
+
+ uint8_t *eth_group_bar[2];
+ /**< MAC Register read */
+ ipn3ke_indirect_mac_read_t f_mac_read;
+ /**< MAC Register write */
+ ipn3ke_indirect_mac_write_t f_mac_write;
+
+ uint8_t *hw_addr;
+};
+
+/**
+ * @internal
+ * Helper macro for drivers that need to convert to struct rte_afu_device.
+ */
+#define RTE_DEV_TO_AFU(ptr) \
+ container_of(ptr, struct rte_afu_device, device)
+
+#define RTE_DEV_TO_AFU_CONST(ptr) \
+ container_of(ptr, const struct rte_afu_device, device)
+
+#define RTE_ETH_DEV_TO_AFU(eth_dev) \
+ RTE_DEV_TO_AFU((eth_dev)->device)
+
+/**
+ * PCIe MMIO Access
+ */
+
+#define IPN3KE_PCI_REG(reg) rte_read32(reg)
+#define IPN3KE_PCI_REG_ADDR(a, reg) \
+ ((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
+static inline uint32_t ipn3ke_read_addr(volatile void *addr)
+{
+ return rte_le_to_cpu_32(IPN3KE_PCI_REG(addr));
+}
+
+#define WCMD 0x8000000000000000
+#define RCMD 0x4000000000000000
+#define INDRCT_CTRL 0x30
+#define INDRCT_STS 0x38
+static inline uint32_t _ipn3ke_indrct_read(struct ipn3ke_hw *hw,
+ uint32_t addr)
+{
+ uint64_t word_offset;
+ uint64_t read_data = 0;
+ uint64_t indirect_value;
+ volatile void *indirect_addrs;
+
+ word_offset = (addr & 0x1FFFFFF) >> 2;
+ indirect_value = RCMD | word_offset << 32;
+ indirect_addrs = hw->hw_addr + (uint32_t)(INDRCT_CTRL);
+
+ rte_delay_us(10);
+
+ rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
+
+ indirect_addrs = hw->hw_addr + (uint32_t)(INDRCT_STS);
+ while ((read_data >> 32) != 1)
+ read_data = rte_read64(indirect_addrs);
+
+ return rte_le_to_cpu_32(read_data);
+}
+
+static inline void _ipn3ke_indrct_write(struct ipn3ke_hw *hw,
+ uint32_t addr, uint32_t value)
+{
+ uint64_t word_offset;
+ uint64_t indirect_value;
+ volatile void *indirect_addrs = 0;
+
+ word_offset = (addr & 0x1FFFFFF) >> 2;
+ indirect_value = WCMD | word_offset << 32 | value;
+ indirect_addrs = hw->hw_addr + (uint32_t)(INDRCT_CTRL);
+
+ rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
+ rte_delay_us(10);
+}
+
+#define IPN3KE_PCI_REG_WRITE(reg, value) \
+ rte_write32((rte_cpu_to_le_32(value)), reg)
+
+#define IPN3KE_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((rte_cpu_to_le_32(value)), reg)
+
+#define IPN3KE_READ_REG(hw, reg) \
+ _ipn3ke_indrct_read((hw), (reg))
+
+#define IPN3KE_WRITE_REG(hw, reg, value) \
+ _ipn3ke_indrct_write((hw), (reg), (value))
+
+#define IPN3KE_MASK_READ_REG(hw, reg, x, mask) \
+ ((mask) & IPN3KE_READ_REG((hw), ((reg) + (0x4 * (x)))))
+
+#define IPN3KE_MASK_WRITE_REG(hw, reg, x, value, mask) \
+ IPN3KE_WRITE_REG((hw), ((reg) + (0x4 * (x))), ((mask) & (value)))
+
+#define IPN3KE_DEV_PRIVATE_TO_HW(dev) \
+ (((struct ipn3ke_rpst *)(dev)->data->dev_private)->hw)
+
+#define IPN3KE_DEV_PRIVATE_TO_RPST(dev) \
+ ((struct ipn3ke_rpst *)(dev)->data->dev_private)
+
+#define IPN3KE_DEV_PRIVATE_TO_TM(dev) \
+ (&(((struct ipn3ke_rpst *)(dev)->data->dev_private)->tm))
+
+#define IPN3KE_VBNG_INIT_DONE (0x3)
+#define IPN3KE_VBNG_INIT_STS (0x204)
+
+/* Byte address of IPN3KE internal module */
+#define IPN3KE_TM_VERSION (IPN3KE_QM_OFFSET + 0x0000)
+#define IPN3KE_TM_SCRATCH (IPN3KE_QM_OFFSET + 0x0004)
+#define IPN3KE_TM_STATUS (IPN3KE_QM_OFFSET + 0x0008)
+#define IPN3KE_TM_MISC_STATUS (IPN3KE_QM_OFFSET + 0x0010)
+#define IPN3KE_TM_MISC_WARNING_0 (IPN3KE_QM_OFFSET + 0x0040)
+#define IPN3KE_TM_MISC_MON_0 (IPN3KE_QM_OFFSET + 0x0048)
+#define IPN3KE_TM_MISC_FATAL_0 (IPN3KE_QM_OFFSET + 0x0050)
+#define IPN3KE_TM_BW_MON_CTRL_1 (IPN3KE_QM_OFFSET + 0x0080)
+#define IPN3KE_TM_BW_MON_CTRL_2 (IPN3KE_QM_OFFSET + 0x0084)
+#define IPN3KE_TM_BW_MON_RATE (IPN3KE_QM_OFFSET + 0x0088)
+#define IPN3KE_TM_STATS_CTRL (IPN3KE_QM_OFFSET + 0x0100)
+#define IPN3KE_TM_STATS_DATA_0 (IPN3KE_QM_OFFSET + 0x0110)
+#define IPN3KE_TM_STATS_DATA_1 (IPN3KE_QM_OFFSET + 0x0114)
+#define IPN3KE_QM_UID_CONFIG_CTRL (IPN3KE_QM_OFFSET + 0x0200)
+#define IPN3KE_QM_UID_CONFIG_DATA (IPN3KE_QM_OFFSET + 0x0204)
+
+#define IPN3KE_BM_VERSION (IPN3KE_QM_OFFSET + 0x4000)
+#define IPN3KE_BM_STATUS (IPN3KE_QM_OFFSET + 0x4008)
+#define IPN3KE_BM_STORE_CTRL (IPN3KE_QM_OFFSET + 0x4010)
+#define IPN3KE_BM_STORE_STATUS (IPN3KE_QM_OFFSET + 0x4018)
+#define IPN3KE_BM_STORE_MON (IPN3KE_QM_OFFSET + 0x4028)
+#define IPN3KE_BM_WARNING_0 (IPN3KE_QM_OFFSET + 0x4040)
+#define IPN3KE_BM_MON_0 (IPN3KE_QM_OFFSET + 0x4048)
+#define IPN3KE_BM_FATAL_0 (IPN3KE_QM_OFFSET + 0x4050)
+#define IPN3KE_BM_DRAM_ACCESS_CTRL (IPN3KE_QM_OFFSET + 0x4100)
+#define IPN3KE_BM_DRAM_ACCESS_DATA_0 (IPN3KE_QM_OFFSET + 0x4120)
+#define IPN3KE_BM_DRAM_ACCESS_DATA_1 (IPN3KE_QM_OFFSET + 0x4124)
+#define IPN3KE_BM_DRAM_ACCESS_DATA_2 (IPN3KE_QM_OFFSET + 0x4128)
+#define IPN3KE_BM_DRAM_ACCESS_DATA_3 (IPN3KE_QM_OFFSET + 0x412C)
+#define IPN3KE_BM_DRAM_ACCESS_DATA_4 (IPN3KE_QM_OFFSET + 0x4130)
+#define IPN3KE_BM_DRAM_ACCESS_DATA_5 (IPN3KE_QM_OFFSET + 0x4134)
+#define IPN3KE_BM_DRAM_ACCESS_DATA_6 (IPN3KE_QM_OFFSET + 0x4138)
+
+#define IPN3KE_QM_VERSION (IPN3KE_QM_OFFSET + 0x8000)
+#define IPN3KE_QM_STATUS (IPN3KE_QM_OFFSET + 0x8008)
+#define IPN3KE_QM_LL_TABLE_MON (IPN3KE_QM_OFFSET + 0x8018)
+#define IPN3KE_QM_WARNING_0 (IPN3KE_QM_OFFSET + 0x8040)
+#define IPN3KE_QM_MON_0 (IPN3KE_QM_OFFSET + 0x8048)
+#define IPN3KE_QM_FATAL_0 (IPN3KE_QM_OFFSET + 0x8050)
+#define IPN3KE_QM_FATAL_1 (IPN3KE_QM_OFFSET + 0x8054)
+#define IPN3KE_LL_TABLE_ACCESS_CTRL (IPN3KE_QM_OFFSET + 0x8100)
+#define IPN3KE_LL_TABLE_ACCESS_DATA_0 (IPN3KE_QM_OFFSET + 0x8110)
+#define IPN3KE_LL_TABLE_ACCESS_DATA_1 (IPN3KE_QM_OFFSET + 0x8114)
+
+#define IPN3KE_CCB_ERROR (IPN3KE_CCB_OFFSET + 0x0008)
+#define IPN3KE_CCB_NSEGFREE (IPN3KE_CCB_OFFSET + 0x200000)
+#define IPN3KE_CCB_NSEGFREE_MASK 0x3FFFFF
+#define IPN3KE_CCB_PSEGMAX_COEF (IPN3KE_CCB_OFFSET + 0x200008)
+#define IPN3KE_CCB_PSEGMAX_COEF_MASK 0xFFFFF
+#define IPN3KE_CCB_NSEG_P (IPN3KE_CCB_OFFSET + 0x200080)
+#define IPN3KE_CCB_NSEG_MASK 0x3FFFFF
+#define IPN3KE_CCB_QPROFILE_Q (IPN3KE_CCB_OFFSET + 0x240000)
+#define IPN3KE_CCB_QPROFILE_MASK 0x7FF
+#define IPN3KE_CCB_PROFILE_P (IPN3KE_CCB_OFFSET + 0x280000)
+#define IPN3KE_CCB_PROFILE_MASK 0x1FFFFFF
+#define IPN3KE_CCB_PROFILE_MS (IPN3KE_CCB_OFFSET + 0xC)
+#define IPN3KE_CCB_PROFILE_MS_MASK 0x1FFFFFF
+#define IPN3KE_CCB_LR_LB_DBG_CTRL (IPN3KE_CCB_OFFSET + 0x2C0000)
+#define IPN3KE_CCB_LR_LB_DBG_DONE (IPN3KE_CCB_OFFSET + 0x2C0004)
+#define IPN3KE_CCB_LR_LB_DBG_RDATA (IPN3KE_CCB_OFFSET + 0x2C000C)
+
+#define IPN3KE_QOS_MAP_L1_X (IPN3KE_QOS_OFFSET + 0x000000)
+#define IPN3KE_QOS_MAP_L1_MASK 0x1FFF
+#define IPN3KE_QOS_MAP_L2_X (IPN3KE_QOS_OFFSET + 0x040000)
+#define IPN3KE_QOS_MAP_L2_MASK 0x7
+#define IPN3KE_QOS_TYPE_MASK 0x3
+#define IPN3KE_QOS_TYPE_L1_X (IPN3KE_QOS_OFFSET + 0x200000)
+#define IPN3KE_QOS_TYPE_L2_X (IPN3KE_QOS_OFFSET + 0x240000)
+#define IPN3KE_QOS_TYPE_L3_X (IPN3KE_QOS_OFFSET + 0x280000)
+#define IPN3KE_QOS_SCH_WT_MASK 0xFF
+#define IPN3KE_QOS_SCH_WT_L1_X (IPN3KE_QOS_OFFSET + 0x400000)
+#define IPN3KE_QOS_SCH_WT_L2_X (IPN3KE_QOS_OFFSET + 0x440000)
+#define IPN3KE_QOS_SCH_WT_L3_X (IPN3KE_QOS_OFFSET + 0x480000)
+#define IPN3KE_QOS_SHAP_WT_MASK 0x3FFF
+#define IPN3KE_QOS_SHAP_WT_L1_X (IPN3KE_QOS_OFFSET + 0x600000)
+#define IPN3KE_QOS_SHAP_WT_L2_X (IPN3KE_QOS_OFFSET + 0x640000)
+#define IPN3KE_QOS_SHAP_WT_L3_X (IPN3KE_QOS_OFFSET + 0x680000)
+
+#define IPN3KE_CLF_BASE_DST_MAC_ADDR_HI (IPN3KE_CLASSIFY_OFFSET + 0x0000)
+#define IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW (IPN3KE_CLASSIFY_OFFSET + 0x0004)
+#define IPN3KE_CLF_QINQ_STAG (IPN3KE_CLASSIFY_OFFSET + 0x0008)
+#define IPN3KE_CLF_LKUP_ENABLE (IPN3KE_CLASSIFY_OFFSET + 0x000C)
+#define IPN3KE_CLF_DFT_FLOW_ID (IPN3KE_CLASSIFY_OFFSET + 0x0040)
+#define IPN3KE_CLF_RX_PARSE_CFG (IPN3KE_CLASSIFY_OFFSET + 0x0080)
+#define IPN3KE_CLF_RX_STATS_CFG (IPN3KE_CLASSIFY_OFFSET + 0x00C0)
+#define IPN3KE_CLF_RX_STATS_RPT (IPN3KE_CLASSIFY_OFFSET + 0x00C4)
+#define IPN3KE_CLF_RX_TEST (IPN3KE_CLASSIFY_OFFSET + 0x0400)
+
+#define IPN3KE_CLF_EM_VERSION (IPN3KE_CLASSIFY_OFFSET + 0x40000 + 0x0000)
+#define IPN3KE_CLF_EM_NUM (IPN3KE_CLASSIFY_OFFSET + 0x40000 + 0x0008)
+#define IPN3KE_CLF_EM_KEY_WDTH (IPN3KE_CLASSIFY_OFFSET + 0x40000 + 0x000C)
+#define IPN3KE_CLF_EM_RES_WDTH (IPN3KE_CLASSIFY_OFFSET + 0x40000 + 0x0010)
+#define IPN3KE_CLF_EM_ALARMS (IPN3KE_CLASSIFY_OFFSET + 0x40000 + 0x0014)
+#define IPN3KE_CLF_EM_DRC_RLAT (IPN3KE_CLASSIFY_OFFSET + 0x40000 + 0x0018)
+
+#define IPN3KE_CLF_MHL_VERSION (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x0000)
+#define IPN3KE_CLF_MHL_GEN_CTRL (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x0018)
+#define IPN3KE_CLF_MHL_MGMT_CTRL (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x0020)
+#define IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY 31
+#define IPN3KE_CLF_MHL_MGMT_CTRL_FLUSH 0x0
+#define IPN3KE_CLF_MHL_MGMT_CTRL_INSERT 0x1
+#define IPN3KE_CLF_MHL_MGMT_CTRL_DELETE 0x2
+#define IPN3KE_CLF_MHL_MGMT_CTRL_SEARCH 0x3
+#define IPN3KE_CLF_MHL_FATAL_0 (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x0050)
+#define IPN3KE_CLF_MHL_MON_0 (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x0060)
+#define IPN3KE_CLF_MHL_TOTAL_ENTRIES (IPN3KE_CLASSIFY_OFFSET + \
+ 0x50000 + 0x0080)
+#define IPN3KE_CLF_MHL_ONEHIT_BUCKETS (IPN3KE_CLASSIFY_OFFSET + \
+ 0x50000 + 0x0084)
+#define IPN3KE_CLF_MHL_KEY_MASK 0xFFFFFFFF
+#define IPN3KE_CLF_MHL_KEY_0 (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x1000)
+#define IPN3KE_CLF_MHL_KEY_1 (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x1004)
+#define IPN3KE_CLF_MHL_KEY_2 (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x1008)
+#define IPN3KE_CLF_MHL_KEY_3 (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x100C)
+#define IPN3KE_CLF_MHL_RES_MASK 0xFFFFFFFF
+#define IPN3KE_CLF_MHL_RES (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x2000)
+
+int
+ipn3ke_rpst_dev_set_link_up(struct rte_eth_dev *dev);
+int
+ipn3ke_rpst_dev_set_link_down(struct rte_eth_dev *dev);
+int
+ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev,
+ __rte_unused int wait_to_complete);
+int
+ipn3ke_rpst_promiscuous_enable(struct rte_eth_dev *ethdev);
+int
+ipn3ke_rpst_promiscuous_disable(struct rte_eth_dev *ethdev);
+int
+ipn3ke_rpst_allmulticast_enable(struct rte_eth_dev *ethdev);
+int
+ipn3ke_rpst_allmulticast_disable(struct rte_eth_dev *ethdev);
+int
+ipn3ke_rpst_mac_addr_set(struct rte_eth_dev *ethdev,
+ struct rte_ether_addr *mac_addr);
+int
+ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu);
+
+int
+ipn3ke_rpst_init(struct rte_eth_dev *ethdev, void *init_params);
+int
+ipn3ke_rpst_uninit(struct rte_eth_dev *ethdev);
+int
+ipn3ke_hw_tm_init(struct ipn3ke_hw *hw);
+void
+ipn3ke_tm_init(struct ipn3ke_rpst *rpst);
+int
+ipn3ke_tm_ops_get(struct rte_eth_dev *ethdev,
+ void *arg);
+
+
+/* IPN3KE_MASK is a macro used on 32 bit registers */
+#define IPN3KE_MASK(mask, shift) ((mask) << (shift))
+
+#define IPN3KE_MAC_CTRL_BASE_0 0x00000000
+#define IPN3KE_MAC_CTRL_BASE_1 0x00008000
+
+#define IPN3KE_MAC_STATS_MASK 0xFFFFFFFFF
+
+/* All the address are in 4Bytes*/
+#define IPN3KE_MAC_PRIMARY_MAC_ADDR0 0x0010
+#define IPN3KE_MAC_PRIMARY_MAC_ADDR1 0x0011
+
+#define IPN3KE_MAC_MAC_RESET_CONTROL 0x001F
+#define IPN3KE_MAC_MAC_RESET_CONTROL_TX_SHIFT 0
+#define IPN3KE_MAC_MAC_RESET_CONTROL_TX_MASK \
+ IPN3KE_MASK(0x1, IPN3KE_MAC_MAC_RESET_CONTROL_TX_SHIFT)
+
+#define IPN3KE_MAC_MAC_RESET_CONTROL_RX_SHIFT 8
+#define IPN3KE_MAC_MAC_RESET_CONTROL_RX_MASK \
+ IPN3KE_MASK(0x1, IPN3KE_MAC_MAC_RESET_CONTROL_RX_SHIFT)
+
+#define IPN3KE_MAC_TX_PACKET_CONTROL 0x0020
+#define IPN3KE_MAC_TX_PACKET_CONTROL_SHIFT 0
+#define IPN3KE_MAC_TX_PACKET_CONTROL_MASK \
+ IPN3KE_MASK(0x1, IPN3KE_MAC_TX_PACKET_CONTROL_SHIFT)
+
+#define IPN3KE_MAC_TX_SRC_ADDR_OVERRIDE 0x002A
+#define IPN3KE_MAC_TX_SRC_ADDR_OVERRIDE_SHIFT 0
+#define IPN3KE_MAC_TX_SRC_ADDR_OVERRIDE_MASK \
+ IPN3KE_MASK(0x1, IPN3KE_MAC_TX_SRC_ADDR_OVERRIDE_SHIFT)
+
+#define IPN3KE_MAC_TX_FRAME_MAXLENGTH 0x002C
+#define IPN3KE_MAC_TX_FRAME_MAXLENGTH_SHIFT 0
+#define IPN3KE_MAC_TX_FRAME_MAXLENGTH_MASK \
+ IPN3KE_MASK(0xFFFF, IPN3KE_MAC_TX_FRAME_MAXLENGTH_SHIFT)
+
+#define IPN3KE_MAC_TX_PAUSEFRAME_CONTROL 0x0040
+#define IPN3KE_MAC_TX_PAUSEFRAME_CONTROL_SHIFT 0
+#define IPN3KE_MAC_TX_PAUSEFRAME_CONTROL_MASK \
+ IPN3KE_MASK(0x3, IPN3KE_MAC_TX_PAUSEFRAME_CONTROL_SHIFT)
+
+#define IPN3KE_MAC_TX_PAUSEFRAME_QUANTA 0x0042
+#define IPN3KE_MAC_TX_PAUSEFRAME_QUANTA_SHIFT 0
+#define IPN3KE_MAC_TX_PAUSEFRAME_QUANTA_MASK \
+ IPN3KE_MASK(0xFFFF, IPN3KE_MAC_TX_PAUSEFRAME_QUANTA_SHIFT)
+
+#define IPN3KE_MAC_TX_PAUSEFRAME_HOLDOFF_QUANTA 0x0043
+#define IPN3KE_MAC_TX_PAUSEFRAME_HOLDOFF_QUANTA_SHIFT 0
+#define IPN3KE_MAC_TX_PAUSEFRAME_HOLDOFF_QUANTA_MASK \
+ IPN3KE_MASK(0xFFFF, IPN3KE_MAC_TX_PAUSEFRAME_HOLDOFF_QUANTA_SHIFT)
+
+#define IPN3KE_MAC_TX_PAUSEFRAME_ENABLE 0x0044
+#define IPN3KE_MAC_TX_PAUSEFRAME_ENABLE_CFG_SHIFT 0
+#define IPN3KE_MAC_TX_PAUSEFRAME_ENABLE_CFG_MASK \
+ IPN3KE_MASK(0x1, IPN3KE_MAC_TX_PAUSEFRAME_ENABLE_CFG_SHIFT)
+
+#define IPN3KE_MAC_TX_PAUSEFRAME_ENABLE_TYPE_SHIFT 1
+#define IPN3KE_MAC_TX_PAUSEFRAME_ENABLE_TYPE_MASK \
+ IPN3KE_MASK(0x3, IPN3KE_MAC_TX_PAUSEFRAME_ENABLE_TYPE_SHIFT)
+
+#define IPN3KE_MAC_RX_TRANSFER_CONTROL 0x00A0
+#define IPN3KE_MAC_RX_TRANSFER_CONTROL_SHIFT 0x0
+#define IPN3KE_MAC_RX_TRANSFER_CONTROL_MASK \
+ IPN3KE_MASK(0x1, IPN3KE_MAC_RX_TRANSFER_CONTROL_SHIFT)
+
+#define IPN3KE_MAC_RX_FRAME_CONTROL 0x00AC
+#define IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_SHIFT 0x0
+#define IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_MASK \
+ IPN3KE_MASK(0x1, IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_SHIFT)
+
+#define IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_SHIFT 0x1
+#define IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_MASK \
+ IPN3KE_MASK(0x1, IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_SHIFT)
+
+#define IPN3KE_VLAN_TAG_SIZE 4
+/**
+ * The overhead from MTU to max frame size.
+ * Considering QinQ packet, the VLAN tag needs to be counted twice.
+ */
+#define IPN3KE_ETH_OVERHEAD \
+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IPN3KE_VLAN_TAG_SIZE * 2)
+
+#define IPN3KE_MAC_FRAME_SIZE_MAX 9728
+#define IPN3KE_MAC_RX_FRAME_MAXLENGTH 0x00AE
+#define IPN3KE_MAC_RX_FRAME_MAXLENGTH_SHIFT 0
+#define IPN3KE_MAC_RX_FRAME_MAXLENGTH_MASK \
+ IPN3KE_MASK(0xFFFF, IPN3KE_MAC_RX_FRAME_MAXLENGTH_SHIFT)
+
+#define IPN3KE_25G_MAX_TX_SIZE_CONFIG 0x407
+#define IPN3KE_25G_MAX_RX_SIZE_CONFIG 0x506
+
+#define IPN3KE_10G_TX_FRAME_MAXLENGTH 0x002C
+#define IPN3KE_10G_RX_FRAME_MAXLENGTH 0x00AE
+
+#define IPN3KE_REGISTER_WIDTH 32
+
+/*Bits[2:0]: Configuration of TX statistics counters:
+ *Bit[2]: Shadow request (active high): When set to the value of 1,
+ *TX statistics collection is paused. The underlying counters
+ *continue to operate, but the readable values reflect a snapshot at
+ *the time the pause flag was activated. Write a 0 to release.
+ *Bit[1]: Parity-error clear. When software sets this bit, the IP core
+ *clears the parity bit CNTR_TX_STATUS[0]. This bit
+ *(CNTR_TX_CONFIG[1]) is self-clearing.
+ *Bit[0]: Software can set this bit to the value of 1 to reset all of
+ *the TX statistics registers at the same time. This bit is selfclearing.
+ *Bits[31:3] are Reserved
+ */
+#define IPN3KE_25G_TX_STATISTICS_CONFIG 0x845
+#define IPN3KE_25G_TX_STATISTICS_CONFIG_SHADOW_REQUEST_MASK 0x00000004
+
+/*Bit[1]: Indicates that the TX statistics registers are paused (while
+ *CNTR_TX_CONFIG[2] is asserted).
+ *Bit[0]: Indicates the presence of at least one parity error in the
+ *TX statistics counters.
+ *Bits[31:2] are Reserved.
+ */
+#define IPN3KE_25G_TX_STATISTICS_STATUS 0x846
+#define IPN3KE_25G_TX_STATISTICS_STATUS_SHADOW_REQUEST_MASK 0x00000002
+
+#define IPN3KE_25G_CNTR_TX_FRAGMENTS_LO 0x800
+#define IPN3KE_25G_CNTR_TX_FRAGMENTS_HI 0x801
+#define IPN3KE_25G_CNTR_TX_JABBERS_LO 0x802
+#define IPN3KE_25G_CNTR_TX_JABBERS_HI 0x803
+#define IPN3KE_25G_CNTR_TX_FCS_LO 0x804
+#define IPN3KE_25G_CNTR_TX_FCS_HI 0x805
+#define IPN3KE_25G_CNTR_TX_CRCERR_LO 0x806
+#define IPN3KE_25G_CNTR_TX_CRCERR_HI 0x807
+#define IPN3KE_25G_CNTR_TX_MCAST_DATA_ERR_LO 0x808
+#define IPN3KE_25G_CNTR_TX_MCAST_DATA_ERR_HI 0x809
+#define IPN3KE_25G_CNTR_TX_BCAST_DATA_ERR_LO 0x80A
+#define IPN3KE_25G_CNTR_TX_BCAST_DATA_ERR_HI 0x80B
+#define IPN3KE_25G_CNTR_TX_UCAST_DATA_ERR_LO 0x80C
+#define IPN3KE_25G_CNTR_TX_UCAST_DATA_ERR_HI 0x80D
+#define IPN3KE_25G_CNTR_TX_MCAST_CTRL_ERR_LO 0x80E
+#define IPN3KE_25G_CNTR_TX_MCAST_CTRL_ERR_HI 0x80F
+#define IPN3KE_25G_CNTR_TX_BCAST_CTRL_ERR_LO 0x810
+#define IPN3KE_25G_CNTR_TX_BCAST_CTRL_ERR_HI 0x811
+#define IPN3KE_25G_CNTR_TX_UCAST_CTRL_ERR_LO 0x812
+#define IPN3KE_25G_CNTR_TX_UCAST_CTRL_ERR_HI 0x813
+#define IPN3KE_25G_CNTR_TX_PAUSE_ERR_LO 0x814
+#define IPN3KE_25G_CNTR_TX_PAUSE_ERR_HI 0x815
+#define IPN3KE_25G_CNTR_TX_64B_LO 0x816
+#define IPN3KE_25G_CNTR_TX_64B_HI 0x817
+#define IPN3KE_25G_CNTR_TX_65_127B_LO 0x818
+#define IPN3KE_25G_CNTR_TX_65_127B_HI 0x819
+#define IPN3KE_25G_CNTR_TX_128_255B_LO 0x81A
+#define IPN3KE_25G_CNTR_TX_128_255B_HI 0x81B
+#define IPN3KE_25G_CNTR_TX_256_511B_LO 0x81C
+#define IPN3KE_25G_CNTR_TX_256_511B_HI 0x81D
+#define IPN3KE_25G_CNTR_TX_512_1023B_LO 0x81E
+#define IPN3KE_25G_CNTR_TX_512_1023B_HI 0x81F
+#define IPN3KE_25G_CNTR_TX_1024_1518B_LO 0x820
+#define IPN3KE_25G_CNTR_TX_1024_1518B_HI 0x821
+#define IPN3KE_25G_CNTR_TX_1519_MAXB_LO 0x822
+#define IPN3KE_25G_CNTR_TX_1519_MAXB_HI 0x823
+#define IPN3KE_25G_CNTR_TX_OVERSIZE_LO 0x824
+#define IPN3KE_25G_CNTR_TX_OVERSIZE_HI 0x825
+#define IPN3KE_25G_CNTR_TX_MCAST_DATA_OK_LO 0x826
+#define IPN3KE_25G_CNTR_TX_MCAST_DATA_OK_HI 0x827
+#define IPN3KE_25G_CNTR_TX_BCAST_DATA_OK_LO 0x828
+#define IPN3KE_25G_CNTR_TX_BCAST_DATA_OK_HI 0x829
+#define IPN3KE_25G_CNTR_TX_UCAST_DATA_OK_LO 0x82A
+#define IPN3KE_25G_CNTR_TX_UCAST_DATA_OK_HI 0x82B
+#define IPN3KE_25G_CNTR_TX_MCAST_CTRL_LO 0x82C
+#define IPN3KE_25G_CNTR_TX_MCAST_CTRL_HI 0x82D
+#define IPN3KE_25G_CNTR_TX_BCAST_CTRL_LO 0x82E
+#define IPN3KE_25G_CNTR_TX_BCAST_CTRL_HI 0x82F
+#define IPN3KE_25G_CNTR_TX_UCAST_CTRL_LO 0x830
+#define IPN3KE_25G_CNTR_TX_UCAST_CTRL_HI 0x831
+#define IPN3KE_25G_CNTR_TX_PAUSE_LO 0x832
+#define IPN3KE_25G_CNTR_TX_PAUSE_HI 0x833
+#define IPN3KE_25G_CNTR_TX_RUNT_LO 0x834
+#define IPN3KE_25G_CNTR_TX_RUNT_HI 0x835
+#define IPN3KE_25G_TX_PAYLOAD_OCTETS_OK_LO 0x860
+#define IPN3KE_25G_TX_PAYLOAD_OCTETS_OK_HI 0x861
+#define IPN3KE_25G_TX_FRAME_OCTETS_OK_LO 0x862
+#define IPN3KE_25G_TX_FRAME_OCTETS_OK_HI 0x863
+
+/*Bits[2:0]: Configuration of RX statistics counters:
+ *Bit[2]: Shadow request (active high): When set to the value of 1,
+ *RX statistics collection is paused. The underlying counters
+ *continue to operate, but the readable values reflect a snapshot
+ *at the time the pause flag was activated. Write a 0 to release.
+ *Bit[1]: Parity-error clear. When software sets this bit, the IP
+ *core clears the parity bit CNTR_RX_STATUS[0]. This bit
+ *(CNTR_RX_CONFIG[1]) is self-clearing.
+ *Bit[0]: Software can set this bit to the value of 1 to reset all of
+ *the RX statistics registers at the same time. This bit is selfclearing.
+ *Bits[31:3] are Reserved.
+ */
+#define IPN3KE_25G_RX_STATISTICS_CONFIG 0x945
+#define IPN3KE_25G_RX_STATISTICS_CONFIG_SHADOW_REQUEST_MASK 0x00000004
+
+/*Bit[1]: Indicates that the RX statistics registers are paused
+ *(while CNTR_RX_CONFIG[2] is asserted).
+ *Bit[0]: Indicates the presence of at least one parity error in the
+ *RX statistics counters.
+ *Bits [31:2] are Reserved
+ */
+#define IPN3KE_25G_RX_STATISTICS_STATUS 0x946
+#define IPN3KE_25G_RX_STATISTICS_STATUS_SHADOW_REQUEST_MASK 0x00000002
+
+#define IPN3KE_25G_CNTR_RX_FRAGMENTS_LO 0x900
+#define IPN3KE_25G_CNTR_RX_FRAGMENTS_HI 0x901
+#define IPN3KE_25G_CNTR_RX_JABBERS_LO 0x902
+#define IPN3KE_25G_CNTR_RX_JABBERS_HI 0x903
+#define IPN3KE_25G_CNTR_RX_FCS_LO 0x904
+#define IPN3KE_25G_CNTR_RX_FCS_HI 0x905
+#define IPN3KE_25G_CNTR_RX_CRCERR_LO 0x906
+#define IPN3KE_25G_CNTR_RX_CRCERR_HI 0x907
+#define IPN3KE_25G_CNTR_RX_MCAST_DATA_ERR_LO 0x908
+#define IPN3KE_25G_CNTR_RX_MCAST_DATA_ERR_HI 0x909
+#define IPN3KE_25G_CNTR_RX_BCAST_DATA_ERR_LO 0x90A
+#define IPN3KE_25G_CNTR_RX_BCAST_DATA_ERR_HI 0x90B
+#define IPN3KE_25G_CNTR_RX_UCAST_DATA_ERR_LO 0x90C
+#define IPN3KE_25G_CNTR_RX_UCAST_DATA_ERR_HI 0x90D
+#define IPN3KE_25G_CNTR_RX_MCAST_CTRL_ERR_LO 0x90E
+#define IPN3KE_25G_CNTR_RX_MCAST_CTRL_ERR_HI 0x90F
+#define IPN3KE_25G_CNTR_RX_BCAST_CTRL_ERR_LO 0x910
+#define IPN3KE_25G_CNTR_RX_BCAST_CTRL_ERR_HI 0x911
+#define IPN3KE_25G_CNTR_RX_UCAST_CTRL_ERR_LO 0x912
+#define IPN3KE_25G_CNTR_RX_UCAST_CTRL_ERR_HI 0x913
+#define IPN3KE_25G_CNTR_RX_PAUSE_ERR_LO 0x914
+#define IPN3KE_25G_CNTR_RX_PAUSE_ERR_HI 0x915
+#define IPN3KE_25G_CNTR_RX_64B_LO 0x916
+#define IPN3KE_25G_CNTR_RX_64B_HI 0x917
+#define IPN3KE_25G_CNTR_RX_65_127B_LO 0x918
+#define IPN3KE_25G_CNTR_RX_65_127B_HI 0x919
+#define IPN3KE_25G_CNTR_RX_128_255B_LO 0x91A
+#define IPN3KE_25G_CNTR_RX_128_255B_HI 0x91B
+#define IPN3KE_25G_CNTR_RX_256_511B_LO 0x91C
+#define IPN3KE_25G_CNTR_RX_256_511B_HI 0x91D
+#define IPN3KE_25G_CNTR_RX_512_1023B_LO 0x91E
+#define IPN3KE_25G_CNTR_RX_512_1023B_HI 0x91F
+#define IPN3KE_25G_CNTR_RX_1024_1518B_LO 0x920
+#define IPN3KE_25G_CNTR_RX_1024_1518B_HI 0x921
+#define IPN3KE_25G_CNTR_RX_1519_MAXB_LO 0x922
+#define IPN3KE_25G_CNTR_RX_1519_MAXB_HI 0x923
+#define IPN3KE_25G_CNTR_RX_OVERSIZE_LO 0x924
+#define IPN3KE_25G_CNTR_RX_OVERSIZE_HI 0x925
+#define IPN3KE_25G_CNTR_RX_MCAST_DATA_OK_LO 0x926
+#define IPN3KE_25G_CNTR_RX_MCAST_DATA_OK_HI 0x927
+#define IPN3KE_25G_CNTR_RX_BCAST_DATA_OK_LO 0x928
+#define IPN3KE_25G_CNTR_RX_BCAST_DATA_OK_HI 0x929
+#define IPN3KE_25G_CNTR_RX_UCAST_DATA_OK_LO 0x92A
+#define IPN3KE_25G_CNTR_RX_UCAST_DATA_OK_HI 0x92B
+#define IPN3KE_25G_CNTR_RX_MCAST_CTRL_LO 0x92C
+#define IPN3KE_25G_CNTR_RX_MCAST_CTRL_HI 0x92D
+#define IPN3KE_25G_CNTR_RX_BCAST_CTRL_LO 0x92E
+#define IPN3KE_25G_CNTR_RX_BCAST_CTRL_HI 0x92F
+#define IPN3KE_25G_CNTR_RX_UCAST_CTRL_LO 0x930
+#define IPN3KE_25G_CNTR_RX_UCAST_CTRL_HI 0x931
+#define IPN3KE_25G_CNTR_RX_PAUSE_LO 0x932
+#define IPN3KE_25G_CNTR_RX_PAUSE_HI 0x933
+#define IPN3KE_25G_CNTR_RX_RUNT_LO 0x934
+#define IPN3KE_25G_CNTR_RX_RUNT_HI 0x935
+#define IPN3KE_25G_RX_PAYLOAD_OCTETS_OK_LO 0x960
+#define IPN3KE_25G_RX_PAYLOAD_OCTETS_OK_HI 0x961
+#define IPN3KE_25G_RX_FRAME_OCTETS_OK_LO 0x962
+#define IPN3KE_25G_RX_FRAME_OCTETS_OK_HI 0x963
+
+#define IPN3KE_10G_STATS_HI_VALID_MASK 0x0000000F
+
+#define IPN3KE_10G_TX_STATS_CLR 0x0140
+#define IPN3KE_10G_TX_STATS_CLR_CLEAR_SHIFT 0
+#define IPN3KE_10G_TX_STATS_CLR_CLEAR_MASK \
+ IPN3KE_MASK(0x1, IPN3KE_10G_TX_STATS_CLR_CLEAR_SHIFT)
+
+#define IPN3KE_10G_RX_STATS_CLR 0x01C0
+#define IPN3KE_10G_RX_STATS_CLR_CLEAR_SHIFT 0
+#define IPN3KE_10G_RX_STATS_CLR_CLEAR_MASK \
+ IPN3KE_MASK(0x1, IPN3KE_10G_RX_STATS_CLR_CLEAR_SHIFT)
+
+#define IPN3KE_10G_TX_STATS_FRAME_OK_LO 0x0142
+#define IPN3KE_10G_TX_STATS_FRAME_OK_HI 0x0143
+#define IPN3KE_10G_RX_STATS_FRAME_OK_LO 0x01C2
+#define IPN3KE_10G_RX_STATS_FRAME_OK_HI 0x01C3
+#define IPN3KE_10G_TX_STATS_FRAME_ERR_LO 0x0144
+#define IPN3KE_10G_TX_STATS_FRAME_ERR_HI 0x0145
+#define IPN3KE_10G_RX_STATS_FRAME_ERR_LO 0x01C4
+#define IPN3KE_10G_RX_STATS_FRAME_ERR_HI 0x01C5
+#define IPN3KE_10G_RX_STATS_FRAME_CRC_ERR_LO 0x01C6
+#define IPN3KE_10G_RX_STATS_FRAME_CRC_ERR_HI 0x01C7
+#define IPN3KE_10G_TX_STATS_OCTETS_OK_LO 0x0148
+#define IPN3KE_10G_TX_STATS_OCTETS_OK_HI 0x0149
+#define IPN3KE_10G_RX_STATS_OCTETS_OK_LO 0x01C8
+#define IPN3KE_10G_RX_STATS_OCTETS_OK_HI 0x01C9
+#define IPN3KE_10G_TX_STATS_PAUSE_MAC_CTRL_FRAMES_LO 0x014A
+#define IPN3KE_10G_TX_STATS_PAUSE_MAC_CTRL_FRAMES_HI 0x014B
+#define IPN3KE_10G_RX_STATS_PAUSE_MAC_CTRL_FRAMES_LO 0x01CA
+#define IPN3KE_10G_RX_STATS_PAUSE_MAC_CTRL_FRAMES_HI 0x01CB
+#define IPN3KE_10G_TX_STATS_IF_ERRORS_LO 0x014C
+#define IPN3KE_10G_TX_STATS_IF_ERRORS_HI 0x014D
+#define IPN3KE_10G_RX_STATS_IF_ERRORS_LO 0x01CC
+#define IPN3KE_10G_RX_STATS_IF_ERRORS_HI 0x01CD
+#define IPN3KE_10G_TX_STATS_UNICAST_FRAME_OK_LO 0x014E
+#define IPN3KE_10G_TX_STATS_UNICAST_FRAME_OK_HI 0x014F
+#define IPN3KE_10G_RX_STATS_UNICAST_FRAME_OK_LO 0x01CE
+#define IPN3KE_10G_RX_STATS_UNICAST_FRAME_OK_HI 0x01CF
+#define IPN3KE_10G_TX_STATS_UNICAST_FRAME_ERR_LO 0x0150
+#define IPN3KE_10G_TX_STATS_UNICAST_FRAME_ERR_HI 0x0151
+#define IPN3KE_10G_RX_STATS_UNICAST_FRAME_ERR_LO 0x01D0
+#define IPN3KE_10G_RX_STATS_UNICAST_FRAME_ERR_HI 0x01D1
+#define IPN3KE_10G_TX_STATS_MULTICAST_FRAME_OK_LO 0x0152
+#define IPN3KE_10G_TX_STATS_MULTICAST_FRAME_OK_HI 0x0153
+#define IPN3KE_10G_RX_STATS_MULTICAST_FRAME_OK_LO 0x01D2
+#define IPN3KE_10G_RX_STATS_MULTICAST_FRAME_OK_HI 0x01D3
+#define IPN3KE_10G_TX_STATS_MULTICAST_FRAME_ERR_LO 0x0154
+#define IPN3KE_10G_TX_STATS_MULTICAST_FRAME_ERR_HI 0x0155
+#define IPN3KE_10G_RX_STATS_MULTICAST_FRAME_ERR_LO 0x01D4
+#define IPN3KE_10G_RX_STATS_MULTICAST_FRAME_ERR_HI 0x01D5
+#define IPN3KE_10G_TX_STATS_BROADCAST_FRAME_OK_LO 0x0156
+#define IPN3KE_10G_TX_STATS_BROADCAST_FRAME_OK_HI 0x0157
+#define IPN3KE_10G_RX_STATS_BROADCAST_FRAME_OK_LO 0x01D6
+#define IPN3KE_10G_RX_STATS_BROADCAST_FRAME_OK_HI 0x01D7
+#define IPN3KE_10G_TX_STATS_BROADCAST_FRAME_ERR_LO 0x0158
+#define IPN3KE_10G_TX_STATS_BROADCAST_FRAME_ERR_HI 0x0159
+#define IPN3KE_10G_RX_STATS_BROADCAST_FRAME_ERR_LO 0x01D8
+#define IPN3KE_10G_RX_STATS_BROADCAST_FRAME_ERR_HI 0x01D9
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_OCTETS_LO 0x015A
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_OCTETS_HI 0x015B
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_OCTETS_LO 0x01DA
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_OCTETS_HI 0x01DB
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_LO 0x015C
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_HI 0x015D
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_LO 0x01DC
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_HI 0x01DD
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_UNDER_SIZE_PKTS_LO 0x015E
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_UNDER_SIZE_PKTS_HI 0x015F
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_UNDER_SIZE_PKTS_LO 0x01DE
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_UNDER_SIZE_PKTS_HI 0x01DF
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_OVER_SIZE_PKTS_LO 0x0160
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_OVER_SIZE_PKTS_HI 0x0161
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_OVER_SIZE_PKTS_LO 0x01E0
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_OVER_SIZE_PKTS_HI 0x01E1
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_64_OCTETS_LO 0x0162
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_64_OCTETS_HI 0x0163
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_64_OCTETS_LO 0x01E2
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_64_OCTETS_HI 0x01E3
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_65_127_OCTETS_LO 0x0164
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_65_127_OCTETS_HI 0x0165
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_65_127_OCTETS_LO 0x01E4
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_65_127_OCTETS_HI 0x01E5
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_128_255_OCTETS_LO 0x0166
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_128_255_OCTETS_HI 0x0167
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_128_255_OCTETS_LO 0x01E6
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_128_255_OCTETS_HI 0x01E7
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_256_511_OCTETS_LO 0x0168
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_256_511_OCTETS_HI 0x0169
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_256_511_OCTETS_LO 0x01E8
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_256_511_OCTETS_HI 0x01E9
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_512_1023_OCTETS_LO 0x016A
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_512_1023_OCTETS_HI 0x016B
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_512_1023_OCTETS_LO 0x01EA
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_512_1023_OCTETS_HI 0x01EB
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_1024_1518_OCTETS_LO 0x016C
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_1024_1518_OCTETS_HI 0x016D
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_1024_1518_OCTETS_LO 0x01EC
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_1024_1518_OCTETS_HI 0x01ED
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_1519_X_OCTETS_LO 0x016E
+#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_1519_X_OCTETS_HI 0x016F
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_1519_X_OCTETS_LO 0x01EE
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_1519_X_OCTETS_HI 0x01EF
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_FRAGMENTS_LO 0x01E0
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_FRAGMENTS_HI 0x01F1
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_JABBERS_LO 0x01E2
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_JABBERS_HI 0x01F3
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_CRC_ERR_LO 0x01E4
+#define IPN3KE_10G_RX_STATS_ETHER_STATS_CRC_ERR_HI 0x01F5
+#define IPN3KE_10G_TX_STATS_UNICAST_MAC_CTRL_FRAMES_LO 0x0176
+#define IPN3KE_10G_TX_STATS_UNICAST_MAC_CTRL_FRAMES_HI 0x0177
+#define IPN3KE_10G_RX_STATS_UNICAST_MAC_CTRL_FRAMES_LO 0x01F6
+#define IPN3KE_10G_RX_STATS_UNICAST_MAC_CTRL_FRAMES_HI 0x01F7
+#define IPN3KE_10G_TX_STATS_MULTICAST_MAC_CTRL_FRAMES_LO 0x0178
+#define IPN3KE_10G_TX_STATS_MULTICAST_MAC_CTRL_FRAMES_HI 0x0179
+#define IPN3KE_10G_RX_STATS_MULTICAST_MAC_CTRL_FRAMES_LO 0x01F8
+#define IPN3KE_10G_RX_STATS_MULTICAST_MAC_CTRL_FRAMES_HI 0x01F9
+#define IPN3KE_10G_TX_STATS_BROADCAST_MAC_CTRL_FRAMES_LO 0x017A
+#define IPN3KE_10G_TX_STATS_BROADCAST_MAC_CTRL_FRAMES_HI 0x017B
+#define IPN3KE_10G_RX_STATS_BROADCAST_MAC_CTRL_FRAMES_LO 0x01FA
+#define IPN3KE_10G_RX_STATS_BROADCAST_MAC_CTRL_FRAMES_HI 0x01FB
+#define IPN3KE_10G_TX_STATS_PFC_MAC_CTRL_FRAMES_LO 0x017C
+#define IPN3KE_10G_TX_STATS_PFC_MAC_CTRL_FRAMES_HI 0x017D
+#define IPN3KE_10G_RX_STATS_PFC_MAC_CTRL_FRAMES_LO 0x01FC
+#define IPN3KE_10G_RX_STATS_PFC_MAC_CTRL_FRAMES_HI 0x01FD
+
+static inline void ipn3ke_xmac_tx_enable(struct ipn3ke_hw *hw,
+ uint32_t mac_num, uint32_t eth_group_sel)
+{
+#define IPN3KE_XMAC_TX_ENABLE (0 & (IPN3KE_MAC_TX_PACKET_CONTROL_MASK))
+
+ (*hw->f_mac_write)(hw,
+ IPN3KE_XMAC_TX_ENABLE,
+ IPN3KE_MAC_TX_PACKET_CONTROL,
+ mac_num,
+ eth_group_sel);
+}
+
+static inline void ipn3ke_xmac_tx_disable(struct ipn3ke_hw *hw,
+ uint32_t mac_num, uint32_t eth_group_sel)
+{
+#define IPN3KE_XMAC_TX_DISABLE (1 & (IPN3KE_MAC_TX_PACKET_CONTROL_MASK))
+
+ (*hw->f_mac_write)(hw,
+ IPN3KE_XMAC_TX_DISABLE,
+ IPN3KE_MAC_TX_PACKET_CONTROL,
+ mac_num,
+ eth_group_sel);
+}
+
+static inline void ipn3ke_xmac_rx_enable(struct ipn3ke_hw *hw,
+ uint32_t mac_num, uint32_t eth_group_sel)
+{
+#define IPN3KE_XMAC_RX_ENABLE (0 & (IPN3KE_MAC_RX_TRANSFER_CONTROL_MASK))
+
+ (*hw->f_mac_write)(hw,
+ IPN3KE_XMAC_RX_ENABLE,
+ IPN3KE_MAC_RX_TRANSFER_CONTROL,
+ mac_num,
+ eth_group_sel);
+}
+
+static inline void ipn3ke_xmac_rx_disable(struct ipn3ke_hw *hw,
+ uint32_t mac_num, uint32_t eth_group_sel)
+{
+#define IPN3KE_XMAC_RX_DISABLE (1 & (IPN3KE_MAC_RX_TRANSFER_CONTROL_MASK))
+
+ (*hw->f_mac_write)(hw,
+ IPN3KE_XMAC_RX_DISABLE,
+ IPN3KE_MAC_RX_TRANSFER_CONTROL,
+ mac_num,
+ eth_group_sel);
+}
+
+static inline void ipn3ke_xmac_smac_ovd_dis(struct ipn3ke_hw *hw,
+ uint32_t mac_num, uint32_t eth_group_sel)
+{
+#define IPN3KE_XMAC_SMAC_OVERRIDE_DISABLE (0 & \
+ (IPN3KE_MAC_TX_SRC_ADDR_OVERRIDE_MASK))
+
+ (*hw->f_mac_write)(hw,
+ IPN3KE_XMAC_SMAC_OVERRIDE_DISABLE,
+ IPN3KE_MAC_TX_SRC_ADDR_OVERRIDE,
+ mac_num,
+ eth_group_sel);
+}
+
+static inline void ipn3ke_xmac_tx_clr_10G_stcs
+(struct ipn3ke_hw *hw, uint32_t mac_num, uint32_t eth_group_sel)
+{
+ uint32_t tmp;
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_10G_TX_STATS_CLR,
+ mac_num,
+ eth_group_sel);
+ tmp |= 0x00000001;
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_10G_TX_STATS_CLR,
+ mac_num,
+ eth_group_sel);
+}
+
+static inline void ipn3ke_xmac_rx_clr_10G_stcs
+(struct ipn3ke_hw *hw, uint32_t mac_num, uint32_t eth_group_sel)
+{
+ uint32_t tmp;
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_10G_RX_STATS_CLR,
+ mac_num,
+ eth_group_sel);
+ tmp |= 0x00000001;
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_10G_RX_STATS_CLR,
+ mac_num,
+ eth_group_sel);
+}
+
+static inline void ipn3ke_xmac_tx_clr_25G_stcs
+(struct ipn3ke_hw *hw, uint32_t mac_num, uint32_t eth_group_sel)
+{
+ uint32_t tmp = 0x00000001;
+
+ /* Bit[0]: Software can set this bit to the value of 1
+ * to reset all of the TX statistics registers at the same time.
+ * This bit is selfclearing.
+ */
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_25G_TX_STATISTICS_CONFIG,
+ mac_num,
+ eth_group_sel);
+}
+
+static inline void ipn3ke_xmac_rx_clr_25G_stcs
+(struct ipn3ke_hw *hw, uint32_t mac_num, uint32_t eth_group_sel)
+{
+ uint32_t tmp = 0x00000001;
+
+ /* Bit[0]: Software can set this bit to the value of 1
+ * to reset all of the RX statistics registers at the same time.
+ * This bit is selfclearing.
+ */
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_25G_RX_STATISTICS_CONFIG,
+ mac_num,
+ eth_group_sel);
+}
+
+#endif /* _IPN3KE_ETHDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c
new file mode 100644
index 000000000..f857e64af
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c
@@ -0,0 +1,1380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_io.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+#include <rte_bus_ifpga.h>
+#include <ifpga_common.h>
+#include <ifpga_logs.h>
+#include <ifpga_rawdev.h>
+
+#include "ipn3ke_rawdev_api.h"
+#include "ipn3ke_flow.h"
+#include "ipn3ke_logs.h"
+#include "ipn3ke_ethdev.h"
+
+/** Static initializer for items. */
+#define FLOW_PATTERNS(...) \
+ ((const enum rte_flow_item_type []) { \
+ __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
+ })
+
+enum IPN3KE_HASH_KEY_TYPE {
+ IPN3KE_HASH_KEY_VXLAN,
+ IPN3KE_HASH_KEY_MAC,
+ IPN3KE_HASH_KEY_QINQ,
+ IPN3KE_HASH_KEY_MPLS,
+ IPN3KE_HASH_KEY_IP_TCP,
+ IPN3KE_HASH_KEY_IP_UDP,
+ IPN3KE_HASH_KEY_IP_NVGRE,
+ IPN3KE_HASH_KEY_VXLAN_IP_UDP,
+};
+
+struct ipn3ke_flow_parse {
+ uint32_t mark:1; /**< Set if the flow is marked. */
+ uint32_t drop:1; /**< ACL drop. */
+ uint32_t key_type:IPN3KE_FLOW_KEY_ID_BITS;
+ uint32_t mark_id:IPN3KE_FLOW_RESULT_UID_BITS; /**< Mark identifier. */
+ uint8_t key_len; /**< Length in bit. */
+ uint8_t key[BITS_TO_BYTES(IPN3KE_FLOW_KEY_DATA_BITS)];
+ /**< key1, key2 */
+};
+
+typedef int (*pattern_filter_t)(const struct rte_flow_item patterns[],
+ struct rte_flow_error *error, struct ipn3ke_flow_parse *parser);
+
+
+struct ipn3ke_flow_pattern {
+ const enum rte_flow_item_type *const items;
+
+ pattern_filter_t filter;
+};
+
+/*
+ * @ RTL definition:
+ * typedef struct packed {
+ * logic [47:0] vxlan_inner_mac;
+ * logic [23:0] vxlan_vni;
+ * } Hash_Key_Vxlan_t;
+ *
+ * @ flow items:
+ * RTE_FLOW_ITEM_TYPE_VXLAN
+ * RTE_FLOW_ITEM_TYPE_ETH
+ */
+static int
+ipn3ke_pattern_vxlan(const struct rte_flow_item patterns[],
+ struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
+{
+ const struct rte_flow_item_vxlan *vxlan = NULL;
+ const struct rte_flow_item_eth *eth = NULL;
+ const struct rte_flow_item *item;
+
+ for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (/*!item->spec || item->mask || */item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Only support item with 'spec'");
+ return -rte_errno;
+ }
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth = item->spec;
+
+ rte_memcpy(&parser->key[0],
+ eth->src.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan = item->spec;
+
+ rte_memcpy(&parser->key[6], vxlan->vni, 3);
+ break;
+
+ default:
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support item type");
+ return -rte_errno;
+ }
+ }
+
+ if (vxlan != NULL && eth != NULL) {
+ parser->key_len = 48 + 24;
+ return 0;
+ }
+
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ patterns,
+ "Missed some patterns");
+ return -rte_errno;
+}
+
+/*
+ * @ RTL definition:
+ * typedef struct packed {
+ * logic [47:0] eth_smac;
+ * } Hash_Key_Mac_t;
+ *
+ * @ flow items:
+ * RTE_FLOW_ITEM_TYPE_ETH
+ */
+static int
+ipn3ke_pattern_mac(const struct rte_flow_item patterns[],
+ struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
+{
+ const struct rte_flow_item_eth *eth = NULL;
+ const struct rte_flow_item *item;
+
+ for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (!item->spec || item->mask || item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Only support item with 'spec'");
+ return -rte_errno;
+ }
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth = item->spec;
+
+ rte_memcpy(parser->key,
+ eth->src.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
+ break;
+
+ default:
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support item type");
+ return -rte_errno;
+ }
+ }
+
+ if (eth != NULL) {
+ parser->key_len = 48;
+ return 0;
+ }
+
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ patterns,
+ "Missed some patterns");
+ return -rte_errno;
+}
+
+/*
+ * @ RTL definition:
+ * typedef struct packed {
+ * logic [11:0] outer_vlan_id;
+ * logic [11:0] inner_vlan_id;
+ * } Hash_Key_QinQ_t;
+ *
+ * @ flow items:
+ * RTE_FLOW_ITEM_TYPE_VLAN
+ * RTE_FLOW_ITEM_TYPE_VLAN
+ */
+static int
+ipn3ke_pattern_qinq(const struct rte_flow_item patterns[],
+ struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
+{
+ const struct rte_flow_item_vlan *outer_vlan = NULL;
+ const struct rte_flow_item_vlan *inner_vlan = NULL;
+ const struct rte_flow_item *item;
+ uint16_t tci;
+
+ for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (!item->spec || item->mask || item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Only support item with 'spec'");
+ return -rte_errno;
+ }
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ if (!outer_vlan) {
+ outer_vlan = item->spec;
+
+ tci = rte_be_to_cpu_16(outer_vlan->tci);
+ parser->key[0] = (tci & 0xff0) >> 4;
+ parser->key[1] |= (tci & 0x00f) << 4;
+ } else {
+ inner_vlan = item->spec;
+
+ tci = rte_be_to_cpu_16(inner_vlan->tci);
+ parser->key[1] |= (tci & 0xf00) >> 8;
+ parser->key[2] = (tci & 0x0ff);
+ }
+ break;
+
+ default:
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support item type");
+ return -rte_errno;
+ }
+ }
+
+ if (outer_vlan != NULL && inner_vlan != NULL) {
+ parser->key_len = 12 + 12;
+ return 0;
+ }
+
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ patterns,
+ "Missed some patterns");
+ return -rte_errno;
+}
+
+/*
+ * @ RTL definition:
+ * typedef struct packed {
+ * logic [19:0] mpls_label1;
+ * logic [19:0] mpls_label2;
+ * } Hash_Key_Mpls_t;
+ *
+ * @ flow items:
+ * RTE_FLOW_ITEM_TYPE_MPLS
+ * RTE_FLOW_ITEM_TYPE_MPLS
+ */
+static int
+ipn3ke_pattern_mpls(const struct rte_flow_item patterns[],
+ struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
+{
+ const struct rte_flow_item_mpls *mpls1 = NULL;
+ const struct rte_flow_item_mpls *mpls2 = NULL;
+ const struct rte_flow_item *item;
+
+ for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (!item->spec || item->mask || item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Only support item with 'spec'");
+ return -rte_errno;
+ }
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ if (!mpls1) {
+ mpls1 = item->spec;
+
+ parser->key[0] = mpls1->label_tc_s[0];
+ parser->key[1] = mpls1->label_tc_s[1];
+ parser->key[2] = mpls1->label_tc_s[2] & 0xf0;
+ } else {
+ mpls2 = item->spec;
+
+ parser->key[2] |=
+ ((mpls2->label_tc_s[0] & 0xf0) >> 4);
+ parser->key[3] =
+ ((mpls2->label_tc_s[0] & 0xf) << 4) |
+ ((mpls2->label_tc_s[1] & 0xf0) >> 4);
+ parser->key[4] =
+ ((mpls2->label_tc_s[1] & 0xf) << 4) |
+ ((mpls2->label_tc_s[2] & 0xf0) >> 4);
+ }
+ break;
+
+ default:
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support item type");
+ return -rte_errno;
+ }
+ }
+
+ if (mpls1 != NULL && mpls2 != NULL) {
+ parser->key_len = 20 + 20;
+ return 0;
+ }
+
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ patterns,
+ "Missed some patterns");
+ return -rte_errno;
+}
+
+/*
+ * @ RTL definition:
+ * typedef struct packed {
+ * logic [31:0] ip_sa;
+ * logic [15:0] tcp_sport;
+ * } Hash_Key_Ip_Tcp_t;
+ *
+ * @ flow items:
+ * RTE_FLOW_ITEM_TYPE_IPV4
+ * RTE_FLOW_ITEM_TYPE_TCP
+ */
+static int
+ipn3ke_pattern_ip_tcp(const struct rte_flow_item patterns[],
+ struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
+{
+ const struct rte_flow_item_ipv4 *ipv4 = NULL;
+ const struct rte_flow_item_tcp *tcp = NULL;
+ const struct rte_flow_item *item;
+
+ for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (!item->spec || item->mask || item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Only support item with 'spec'");
+ return -rte_errno;
+ }
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ipv4 = item->spec;
+
+ rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4);
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp = item->spec;
+
+ rte_memcpy(&parser->key[4], &tcp->hdr.src_port, 2);
+ break;
+
+ default:
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support item type");
+ return -rte_errno;
+ }
+ }
+
+ if (ipv4 != NULL && tcp != NULL) {
+ parser->key_len = 32 + 16;
+ return 0;
+ }
+
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ patterns,
+ "Missed some patterns");
+ return -rte_errno;
+}
+
+/*
+ * @ RTL definition:
+ * typedef struct packed {
+ * logic [31:0] ip_sa;
+ * logic [15:0] udp_sport;
+ * } Hash_Key_Ip_Udp_t;
+ *
+ * @ flow items:
+ * RTE_FLOW_ITEM_TYPE_IPV4
+ * RTE_FLOW_ITEM_TYPE_UDP
+ */
+static int
+ipn3ke_pattern_ip_udp(const struct rte_flow_item patterns[],
+ struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
+{
+ const struct rte_flow_item_ipv4 *ipv4 = NULL;
+ const struct rte_flow_item_udp *udp = NULL;
+ const struct rte_flow_item *item;
+
+ for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (!item->spec || item->mask || item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Only support item with 'spec'");
+ return -rte_errno;
+ }
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ipv4 = item->spec;
+
+ rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4);
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp = item->spec;
+
+ rte_memcpy(&parser->key[4], &udp->hdr.src_port, 2);
+ break;
+
+ default:
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support item type");
+ return -rte_errno;
+ }
+ }
+
+ if (ipv4 != NULL && udp != NULL) {
+ parser->key_len = 32 + 16;
+ return 0;
+ }
+
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ patterns,
+ "Missed some patterns");
+ return -rte_errno;
+}
+
+/*
+ * @ RTL definition:
+ * typedef struct packed {
+ * logic [31:0] ip_sa;
+ * logic [15:0] udp_sport;
+ * logic [23:0] vsid;
+ * } Hash_Key_Ip_Nvgre_t;
+ *
+ * @ flow items:
+ * RTE_FLOW_ITEM_TYPE_IPV4
+ * RTE_FLOW_ITEM_TYPE_UDP
+ * RTE_FLOW_ITEM_TYPE_NVGRE
+ */
+static int
+ipn3ke_pattern_ip_nvgre(const struct rte_flow_item patterns[],
+ struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
+{
+ const struct rte_flow_item_nvgre *nvgre = NULL;
+ const struct rte_flow_item_ipv4 *ipv4 = NULL;
+ const struct rte_flow_item_udp *udp = NULL;
+ const struct rte_flow_item *item;
+
+ for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (!item->spec || item->mask || item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Only support item with 'spec'");
+ return -rte_errno;
+ }
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ipv4 = item->spec;
+
+ rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4);
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp = item->spec;
+
+ rte_memcpy(&parser->key[4], &udp->hdr.src_port, 2);
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ nvgre = item->spec;
+
+ rte_memcpy(&parser->key[6], nvgre->tni, 3);
+ break;
+
+ default:
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support item type");
+ return -rte_errno;
+ }
+ }
+
+ if (ipv4 != NULL && udp != NULL && nvgre != NULL) {
+ parser->key_len = 32 + 16 + 24;
+ return 0;
+ }
+
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ patterns,
+ "Missed some patterns");
+ return -rte_errno;
+}
+
+/*
+ * @ RTL definition:
+ * typedef struct packed{
+ * logic [23:0] vxlan_vni;
+ * logic [31:0] ip_sa;
+ * logic [15:0] udp_sport;
+ * } Hash_Key_Vxlan_Ip_Udp_t;
+ *
+ * @ flow items:
+ * RTE_FLOW_ITEM_TYPE_VXLAN
+ * RTE_FLOW_ITEM_TYPE_IPV4
+ * RTE_FLOW_ITEM_TYPE_UDP
+ */
+static int
+ipn3ke_pattern_vxlan_ip_udp(const struct rte_flow_item patterns[],
+ struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
+{
+ const struct rte_flow_item_vxlan *vxlan = NULL;
+ const struct rte_flow_item_ipv4 *ipv4 = NULL;
+ const struct rte_flow_item_udp *udp = NULL;
+ const struct rte_flow_item *item;
+
+ for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (!item->spec || item->mask || item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Only support item with 'spec'");
+ return -rte_errno;
+ }
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan = item->spec;
+
+ rte_memcpy(&parser->key[0], vxlan->vni, 3);
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ipv4 = item->spec;
+
+ rte_memcpy(&parser->key[3], &ipv4->hdr.src_addr, 4);
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp = item->spec;
+
+ rte_memcpy(&parser->key[7], &udp->hdr.src_port, 2);
+ break;
+
+ default:
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support item type");
+ return -rte_errno;
+ }
+ }
+
+ if (vxlan != NULL && ipv4 != NULL && udp != NULL) {
+ parser->key_len = 24 + 32 + 16;
+ return 0;
+ }
+
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ patterns,
+ "Missed some patterns");
+ return -rte_errno;
+}
+
+static const struct ipn3ke_flow_pattern ipn3ke_supported_patterns[] = {
+ [IPN3KE_HASH_KEY_VXLAN] = {
+ .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH),
+ .filter = ipn3ke_pattern_vxlan,
+ },
+
+ [IPN3KE_HASH_KEY_MAC] = {
+ .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_ETH),
+ .filter = ipn3ke_pattern_mac,
+ },
+
+ [IPN3KE_HASH_KEY_QINQ] = {
+ .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN),
+ .filter = ipn3ke_pattern_qinq,
+ },
+
+ [IPN3KE_HASH_KEY_MPLS] = {
+ .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_MPLS),
+ .filter = ipn3ke_pattern_mpls,
+ },
+
+ [IPN3KE_HASH_KEY_IP_TCP] = {
+ .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP),
+ .filter = ipn3ke_pattern_ip_tcp,
+ },
+
+ [IPN3KE_HASH_KEY_IP_UDP] = {
+ .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP),
+ .filter = ipn3ke_pattern_ip_udp,
+ },
+
+ [IPN3KE_HASH_KEY_IP_NVGRE] = {
+ .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_NVGRE),
+ .filter = ipn3ke_pattern_ip_nvgre,
+ },
+
+ [IPN3KE_HASH_KEY_VXLAN_IP_UDP] = {
+ .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP),
+ .filter = ipn3ke_pattern_vxlan_ip_udp,
+ },
+};
+
+static int
+ipn3ke_flow_convert_attributes(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (!attr) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL,
+ "NULL attribute.");
+ return -rte_errno;
+ }
+
+ if (attr->group) {
+ rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "groups are not supported");
+ return -rte_errno;
+ }
+
+ if (attr->egress) {
+ rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL,
+ "egress is not supported");
+ return -rte_errno;
+ }
+
+ if (attr->transfer) {
+ rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL,
+ "transfer is not supported");
+ return -rte_errno;
+ }
+
+ if (!attr->ingress) {
+ rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "only ingress is supported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_flow_convert_actions(const struct rte_flow_action actions[],
+ struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
+{
+ const struct rte_flow_action_mark *mark = NULL;
+
+ if (!actions) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL,
+ "NULL action.");
+ return -rte_errno;
+ }
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ if (mark) {
+ rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "duplicated mark");
+ return -rte_errno;
+ }
+
+ mark = actions->conf;
+ if (!mark) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "mark must be defined");
+ return -rte_errno;
+ } else if (mark->id > IPN3KE_FLOW_RESULT_UID_MAX) {
+ rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "mark id is out of range");
+ return -rte_errno;
+ }
+
+ parser->mark = 1;
+ parser->mark_id = mark->id;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ parser->drop = 1;
+ break;
+
+ default:
+ rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "invalid action");
+ return -rte_errno;
+ }
+ }
+
+ if (!parser->drop && !parser->mark) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no valid actions");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static bool
+ipn3ke_match_pattern(const enum rte_flow_item_type *patterns,
+ const struct rte_flow_item *input)
+{
+ const struct rte_flow_item *item = input;
+
+ while ((*patterns == item->type) &&
+ (*patterns != RTE_FLOW_ITEM_TYPE_END)) {
+ patterns++;
+ item++;
+ }
+
+ return (*patterns == RTE_FLOW_ITEM_TYPE_END &&
+ item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+static pattern_filter_t
+ipn3ke_find_filter_func(const struct rte_flow_item *input,
+ uint32_t *idx)
+{
+ pattern_filter_t filter = NULL;
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(ipn3ke_supported_patterns); i++) {
+ if (ipn3ke_match_pattern(ipn3ke_supported_patterns[i].items,
+ input)) {
+ filter = ipn3ke_supported_patterns[i].filter;
+ *idx = i;
+ break;
+ }
+ }
+
+ return filter;
+}
+
+static int
+ipn3ke_flow_convert_items(const struct rte_flow_item items[],
+ struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
+{
+ pattern_filter_t filter = NULL;
+ uint32_t idx;
+
+ if (!items) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL,
+ "NULL pattern.");
+ return -rte_errno;
+ }
+
+ filter = ipn3ke_find_filter_func(items, &idx);
+
+ if (!filter) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "Unsupported pattern");
+ return -rte_errno;
+ }
+
+ parser->key_type = idx;
+
+ return filter(items, error, parser);
+}
+
+/* Put the least @nbits of @data into @offset of @dst bits stream, and
+ * the @offset starts from MSB to LSB in each byte.
+ *
+ * MSB LSB
+ * +------+------+------+------+
+ * | | | | |
+ * +------+------+------+------+
+ * ^ ^
+ * |<- data: nbits ->|
+ * |
+ * offset
+ */
+static void
+copy_data_bits(uint8_t *dst, uint64_t data,
+ uint32_t offset, uint8_t nbits)
+{
+ uint8_t set, *p = &dst[offset / BITS_PER_BYTE];
+ uint8_t bits_to_set = BITS_PER_BYTE - (offset % BITS_PER_BYTE);
+ uint8_t mask_to_set = 0xff >> (offset % BITS_PER_BYTE);
+ uint32_t size = offset + nbits;
+
+ if (nbits > (sizeof(data) * BITS_PER_BYTE)) {
+ IPN3KE_AFU_PMD_ERR("nbits is out of range");
+ return;
+ }
+
+ while (nbits - bits_to_set >= 0) {
+ set = data >> (nbits - bits_to_set);
+
+ *p &= ~mask_to_set;
+ *p |= (set & mask_to_set);
+
+ nbits -= bits_to_set;
+ bits_to_set = BITS_PER_BYTE;
+ mask_to_set = 0xff;
+ p++;
+ }
+
+ if (nbits) {
+ uint8_t shift = BITS_PER_BYTE - (size % BITS_PER_BYTE);
+
+ set = data << shift;
+ mask_to_set = 0xff << shift;
+
+ *p &= ~mask_to_set;
+ *p |= (set & mask_to_set);
+ }
+}
+
+static void
+ipn3ke_flow_key_generation(struct ipn3ke_flow_parse *parser,
+ struct rte_flow *flow)
+{
+ uint32_t i, shift_bytes, len_in_bytes, offset;
+ uint64_t key;
+ uint8_t *dst;
+
+ dst = flow->rule.key;
+
+ copy_data_bits(dst,
+ parser->key_type,
+ IPN3KE_FLOW_KEY_ID_OFFSET,
+ IPN3KE_FLOW_KEY_ID_BITS);
+
+ /* The MSb of key is filled to 0 when it is less than
+ * IPN3KE_FLOW_KEY_DATA_BITS bit. And the parsed key data is
+ * save as MSB byte first in the array, it needs to move
+ * the bits before formatting them.
+ */
+ key = 0;
+ shift_bytes = 0;
+ len_in_bytes = BITS_TO_BYTES(parser->key_len);
+ offset = (IPN3KE_FLOW_KEY_DATA_OFFSET +
+ IPN3KE_FLOW_KEY_DATA_BITS -
+ parser->key_len);
+
+ for (i = 0; i < len_in_bytes; i++) {
+ key = (key << 8) | parser->key[i];
+
+ if (++shift_bytes == sizeof(key)) {
+ shift_bytes = 0;
+
+ copy_data_bits(dst, key, offset,
+ sizeof(key) * BITS_PER_BYTE);
+ offset += sizeof(key) * BITS_PER_BYTE;
+ key = 0;
+ }
+ }
+
+ if (shift_bytes != 0) {
+ uint32_t rem_bits;
+
+ rem_bits = parser->key_len % (sizeof(key) * BITS_PER_BYTE);
+ key >>= (shift_bytes * 8 - rem_bits);
+ copy_data_bits(dst, key, offset, rem_bits);
+ }
+}
+
+static void
+ipn3ke_flow_result_generation(struct ipn3ke_flow_parse *parser,
+ struct rte_flow *flow)
+{
+ uint8_t *dst;
+
+ if (parser->drop)
+ return;
+
+ dst = flow->rule.result;
+
+ copy_data_bits(dst,
+ 1,
+ IPN3KE_FLOW_RESULT_ACL_OFFSET,
+ IPN3KE_FLOW_RESULT_ACL_BITS);
+
+ copy_data_bits(dst,
+ parser->mark_id,
+ IPN3KE_FLOW_RESULT_UID_OFFSET,
+ IPN3KE_FLOW_RESULT_UID_BITS);
+}
+
+#define MHL_COMMAND_TIME_COUNT 0xFFFF
+#define MHL_COMMAND_TIME_INTERVAL_US 10
+
+static int
+ipn3ke_flow_hw_update(struct ipn3ke_hw *hw,
+ struct rte_flow *flow, uint32_t is_add)
+{
+ uint32_t *pdata = NULL;
+ uint32_t data;
+ uint32_t time_out = MHL_COMMAND_TIME_COUNT;
+ uint32_t i;
+
+ IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump start\n");
+
+ pdata = (uint32_t *)flow->rule.key;
+ IPN3KE_AFU_PMD_DEBUG(" - key :");
+
+ for (i = 0; i < RTE_DIM(flow->rule.key); i++)
+ IPN3KE_AFU_PMD_DEBUG(" %02x", flow->rule.key[i]);
+
+ for (i = 0; i < 4; i++)
+ IPN3KE_AFU_PMD_DEBUG(" %02x", ipn3ke_swap32(pdata[3 - i]));
+ IPN3KE_AFU_PMD_DEBUG("\n");
+
+ pdata = (uint32_t *)flow->rule.result;
+ IPN3KE_AFU_PMD_DEBUG(" - result:");
+
+ for (i = 0; i < RTE_DIM(flow->rule.result); i++)
+ IPN3KE_AFU_PMD_DEBUG(" %02x", flow->rule.result[i]);
+
+ for (i = 0; i < 1; i++)
+ IPN3KE_AFU_PMD_DEBUG(" %02x", pdata[i]);
+ IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump end\n");
+
+ pdata = (uint32_t *)flow->rule.key;
+
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CLF_MHL_KEY_0,
+ 0,
+ ipn3ke_swap32(pdata[3]),
+ IPN3KE_CLF_MHL_KEY_MASK);
+
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CLF_MHL_KEY_1,
+ 0,
+ ipn3ke_swap32(pdata[2]),
+ IPN3KE_CLF_MHL_KEY_MASK);
+
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CLF_MHL_KEY_2,
+ 0,
+ ipn3ke_swap32(pdata[1]),
+ IPN3KE_CLF_MHL_KEY_MASK);
+
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CLF_MHL_KEY_3,
+ 0,
+ ipn3ke_swap32(pdata[0]),
+ IPN3KE_CLF_MHL_KEY_MASK);
+
+ pdata = (uint32_t *)flow->rule.result;
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CLF_MHL_RES,
+ 0,
+ ipn3ke_swap32(pdata[0]),
+ IPN3KE_CLF_MHL_RES_MASK);
+
+ /* insert/delete the key and result */
+ data = 0;
+ data = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CLF_MHL_MGMT_CTRL,
+ 0,
+ 0x80000000);
+ time_out = MHL_COMMAND_TIME_COUNT;
+ while (IPN3KE_BIT_ISSET(data, IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY) &&
+ (time_out > 0)) {
+ data = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CLF_MHL_MGMT_CTRL,
+ 0,
+ 0x80000000);
+ time_out--;
+ rte_delay_us(MHL_COMMAND_TIME_INTERVAL_US);
+ }
+ if (!time_out)
+ return -1;
+ if (is_add)
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CLF_MHL_MGMT_CTRL,
+ 0,
+ IPN3KE_CLF_MHL_MGMT_CTRL_INSERT,
+ 0x3);
+ else
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CLF_MHL_MGMT_CTRL,
+ 0,
+ IPN3KE_CLF_MHL_MGMT_CTRL_DELETE,
+ 0x3);
+
+ return 0;
+}
+
+static int
+ipn3ke_flow_hw_flush(struct ipn3ke_hw *hw)
+{
+ uint32_t data;
+ uint32_t time_out = MHL_COMMAND_TIME_COUNT;
+
+ /* flush the MHL lookup table */
+ data = 0;
+ data = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CLF_MHL_MGMT_CTRL,
+ 0,
+ 0x80000000);
+ time_out = MHL_COMMAND_TIME_COUNT;
+ while (IPN3KE_BIT_ISSET(data, IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY) &&
+ (time_out > 0)) {
+ data = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CLF_MHL_MGMT_CTRL,
+ 0,
+ 0x80000000);
+ time_out--;
+ rte_delay_us(MHL_COMMAND_TIME_INTERVAL_US);
+ }
+ if (!time_out)
+ return -1;
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CLF_MHL_MGMT_CTRL,
+ 0,
+ IPN3KE_CLF_MHL_MGMT_CTRL_FLUSH,
+ 0x3);
+
+ return 0;
+}
+
+static void
+ipn3ke_flow_convert_finalise(struct ipn3ke_hw *hw,
+ struct ipn3ke_flow_parse *parser, struct rte_flow *flow)
+{
+ ipn3ke_flow_key_generation(parser, flow);
+ ipn3ke_flow_result_generation(parser, flow);
+ ipn3ke_flow_hw_update(hw, flow, 1);
+}
+
+static int
+ipn3ke_flow_convert(const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[], struct rte_flow_error *error,
+ struct ipn3ke_flow_parse *parser)
+{
+ int ret;
+
+ ret = ipn3ke_flow_convert_attributes(attr, error);
+ if (ret)
+ return ret;
+
+ ret = ipn3ke_flow_convert_actions(actions, error, parser);
+ if (ret)
+ return ret;
+
+ ret = ipn3ke_flow_convert_items(items, error, parser);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+ipn3ke_flow_validate(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr, const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[], struct rte_flow_error *error)
+{
+ struct ipn3ke_flow_parse parser = {0};
+ return ipn3ke_flow_convert(attr, pattern, actions, error, &parser);
+}
+
+static struct rte_flow *
+ipn3ke_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr, const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[], struct rte_flow_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_flow_parse parser = {0};
+ struct rte_flow *flow;
+ int ret;
+
+ if (hw->flow_num_entries == hw->flow_max_entries) {
+ rte_flow_error_set(error,
+ ENOBUFS,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "The flow table is full.");
+ return NULL;
+ }
+
+ ret = ipn3ke_flow_convert(attr, pattern, actions, error, &parser);
+ if (ret < 0) {
+ rte_flow_error_set(error,
+ -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Failed to create flow.");
+ return NULL;
+ }
+
+ flow = rte_zmalloc("ipn3ke_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error,
+ ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Failed to allocate memory");
+ return flow;
+ }
+
+ ipn3ke_flow_convert_finalise(hw, &parser, flow);
+
+ TAILQ_INSERT_TAIL(&hw->flow_list, flow, next);
+
+ return flow;
+}
+
+static int
+ipn3ke_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow, struct rte_flow_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ int ret = 0;
+
+ ret = ipn3ke_flow_hw_update(hw, flow, 0);
+ if (!ret) {
+ TAILQ_REMOVE(&hw->flow_list, flow, next);
+ rte_free(flow);
+ } else {
+ rte_flow_error_set(error,
+ -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Failed to destroy flow.");
+ }
+
+ return ret;
+}
+
+static int
+ipn3ke_flow_flush(struct rte_eth_dev *dev,
+ __rte_unused struct rte_flow_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct rte_flow *flow, *temp;
+
+ TAILQ_FOREACH_SAFE(flow, &hw->flow_list, next, temp) {
+ TAILQ_REMOVE(&hw->flow_list, flow, next);
+ rte_free(flow);
+ }
+
+ return ipn3ke_flow_hw_flush(hw);
+}
+
+int ipn3ke_flow_init(void *dev)
+{
+ struct ipn3ke_hw *hw = (struct ipn3ke_hw *)dev;
+ uint32_t data;
+
+ /* disable rx classifier bypass */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CLF_RX_TEST,
+ 0, 0, 0x1);
+
+ data = 0;
+ data = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CLF_RX_TEST,
+ 0,
+ 0x1);
+ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_TEST: %x\n", data);
+
+ /* configure base mac address */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CLF_BASE_DST_MAC_ADDR_HI,
+ 0,
+ 0x2457,
+ 0xFFFF);
+
+ data = 0;
+ data = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CLF_BASE_DST_MAC_ADDR_HI,
+ 0,
+ 0xFFFF);
+ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_HI: %x\n", data);
+
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW,
+ 0,
+ 0x9bdf1000,
+ 0xFFFFFFFF);
+
+ data = 0;
+ data = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW,
+ 0,
+ 0xFFFFFFFF);
+ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW: %x\n", data);
+
+
+ /* configure hash lookup rules enable */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CLF_LKUP_ENABLE,
+ 0,
+ 0xFD,
+ 0xFF);
+
+ data = 0;
+ data = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CLF_LKUP_ENABLE,
+ 0,
+ 0xFF);
+ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_LKUP_ENABLE: %x\n", data);
+
+
+ /* configure rx parse config, settings associatied with VxLAN */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CLF_RX_PARSE_CFG,
+ 0,
+ 0x212b5,
+ 0x3FFFF);
+
+ data = 0;
+ data = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CLF_RX_PARSE_CFG,
+ 0,
+ 0x3FFFF);
+ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_PARSE_CFG: %x\n", data);
+
+
+ /* configure QinQ S-Tag */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CLF_QINQ_STAG,
+ 0,
+ 0x88a8,
+ 0xFFFF);
+
+ data = 0;
+ data = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CLF_QINQ_STAG,
+ 0,
+ 0xFFFF);
+ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_QINQ_STAG: %x\n", data);
+
+
+ /* configure gen ctrl */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CLF_MHL_GEN_CTRL,
+ 0,
+ 0x3,
+ 0x3);
+
+ data = 0;
+ data = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CLF_MHL_GEN_CTRL,
+ 0,
+ 0x1F);
+ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_GEN_CTRL: %x\n", data);
+
+
+ /* clear monitoring register */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CLF_MHL_MON_0,
+ 0,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF);
+
+ data = 0;
+ data = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CLF_MHL_MON_0,
+ 0,
+ 0xFFFFFFFF);
+ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_MON_0: %x\n", data);
+
+
+ ipn3ke_flow_hw_flush(hw);
+
+ TAILQ_INIT(&hw->flow_list);
+ hw->flow_max_entries = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_CLF_EM_NUM,
+ 0,
+ 0xFFFFFFFF);
+ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_EN_NUM: %x\n", hw->flow_max_entries);
+ hw->flow_num_entries = 0;
+
+ return 0;
+}
+
+const struct rte_flow_ops ipn3ke_flow_ops = {
+ .validate = ipn3ke_flow_validate,
+ .create = ipn3ke_flow_create,
+ .destroy = ipn3ke_flow_destroy,
+ .flush = ipn3ke_flow_flush,
+};
diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.h b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.h
new file mode 100644
index 000000000..ef1a61f60
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _IPN3KE_FLOW_H_
+#define _IPN3KE_FLOW_H_
+
+/**
+ * Expand the length to DWORD alignment with 'Unused' field.
+ *
+ * FLOW KEY:
+ * | Unused |Ruler id (id) | Key1 Key2 … (data) |
+ * |--------+---------------+--------------------|
+ * | 17bits | 3 bits | Total 108 bits |
+ * MSB ---> LSB
+ *
+ * Note: And the MSb of key data is filled to 0 when it is less
+ * than 108 bit.
+ */
+#define IPN3KE_FLOW_KEY_UNUSED_BITS 17
+#define IPN3KE_FLOW_KEY_ID_BITS 3
+#define IPN3KE_FLOW_KEY_DATA_BITS 108
+
+#define IPN3KE_FLOW_KEY_TOTAL_BITS \
+ (IPN3KE_FLOW_KEY_UNUSED_BITS + \
+ IPN3KE_FLOW_KEY_ID_BITS + \
+ IPN3KE_FLOW_KEY_DATA_BITS)
+
+#define IPN3KE_FLOW_KEY_ID_OFFSET \
+ (IPN3KE_FLOW_KEY_UNUSED_BITS)
+
+#define IPN3KE_FLOW_KEY_DATA_OFFSET \
+ (IPN3KE_FLOW_KEY_ID_OFFSET + IPN3KE_FLOW_KEY_ID_BITS)
+
+/**
+ * Expand the length to DWORD alignment with 'Unused' field.
+ *
+ * FLOW RESULT:
+ * | Unused | enable (acl) | uid |
+ * |---------+--------------+--------------|
+ * | 15 bits | 1 bit | 16 bits |
+ * MSB ---> LSB
+ */
+
+#define IPN3KE_FLOW_RESULT_UNUSED_BITS 15
+#define IPN3KE_FLOW_RESULT_ACL_BITS 1
+#define IPN3KE_FLOW_RESULT_UID_BITS 16
+
+#define IPN3KE_FLOW_RESULT_TOTAL_BITS \
+ (IPN3KE_FLOW_RESULT_UNUSED_BITS + \
+ IPN3KE_FLOW_RESULT_ACL_BITS + \
+ IPN3KE_FLOW_RESULT_UID_BITS)
+
+#define IPN3KE_FLOW_RESULT_ACL_OFFSET \
+ (IPN3KE_FLOW_RESULT_UNUSED_BITS)
+
+#define IPN3KE_FLOW_RESULT_UID_OFFSET \
+ (IPN3KE_FLOW_RESULT_ACL_OFFSET + IPN3KE_FLOW_RESULT_ACL_BITS)
+
+#define IPN3KE_FLOW_RESULT_UID_MAX \
+ ((1UL << IPN3KE_FLOW_RESULT_UID_BITS) - 1)
+
+#ifndef BITS_PER_BYTE
+#define BITS_PER_BYTE 8
+#endif
+#define BITS_TO_BYTES(bits) \
+ (((bits) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
+
+struct ipn3ke_flow_rule {
+ uint8_t key[BITS_TO_BYTES(IPN3KE_FLOW_KEY_TOTAL_BITS)];
+ uint8_t result[BITS_TO_BYTES(IPN3KE_FLOW_RESULT_TOTAL_BITS)];
+};
+
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
+
+ struct ipn3ke_flow_rule rule;
+};
+
+TAILQ_HEAD(ipn3ke_flow_list, rte_flow);
+
+static inline uint16_t ipn3ke_swap16(uint16_t x)
+{
+ return ((x & 0xff) << 8) | ((x >> 8) & 0xff);
+}
+
+static inline uint32_t ipn3ke_swap32(uint32_t x)
+{
+ uint32_t high, low;
+ uint32_t high1, low1;
+
+ high = (x >> 16) & 0xffff;
+ low = x & 0xffff;
+ high1 = ipn3ke_swap16(low);
+ high1 = high1 << 16;
+ low1 = ipn3ke_swap16(high);
+ low1 = low1 & 0xffff;
+
+ return high1 | low1;
+}
+
+extern const struct rte_flow_ops ipn3ke_flow_ops;
+
+int ipn3ke_flow_init(void *dev);
+
+#endif /* _IPN3KE_FLOW_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_logs.h b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_logs.h
new file mode 100644
index 000000000..147fd8039
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_logs.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _IPN3KE_LOGS_H_
+#define _IPN3KE_LOGS_H_
+
+#include <rte_log.h>
+
+extern int ipn3ke_afu_logtype;
+
+#define IPN3KE_AFU_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ipn3ke_afu_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define IPN3KE_AFU_PMD_FUNC_TRACE() IPN3KE_AFU_PMD_LOG(DEBUG, ">>")
+
+#define IPN3KE_AFU_PMD_DEBUG(fmt, args...) \
+ IPN3KE_AFU_PMD_LOG(DEBUG, fmt, ## args)
+
+#define IPN3KE_AFU_PMD_INFO(fmt, args...) \
+ IPN3KE_AFU_PMD_LOG(INFO, fmt, ## args)
+
+#define IPN3KE_AFU_PMD_ERR(fmt, args...) \
+ IPN3KE_AFU_PMD_LOG(ERR, fmt, ## args)
+
+#define IPN3KE_AFU_PMD_WARN(fmt, args...) \
+ IPN3KE_AFU_PMD_LOG(WARNING, fmt, ## args)
+
+#endif /* _IPN3KE_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_rawdev_api.h b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_rawdev_api.h
new file mode 100644
index 000000000..fd2393fe6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_rawdev_api.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_RAWDEV_API_H_
+#define _IFPGA_RAWDEV_API_H_
+
+#include <rte_ether.h>
+
+enum ifpga_rawdev_retimer_media_type {
+ IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_UNKNOWN = 0,
+ IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_100GBASE_LR4,
+ IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_100GBASE_SR4,
+ IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_100GBASE_CR4,
+ IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_40GBASE_LR4,
+ IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_400GBASE_SR4,
+ IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_40GBASE_CR4,
+ IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_25GBASE_SR,
+ IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_25GBASE_CR,
+ IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_10GBASE_LR,
+ IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_10GBASE_SR,
+ IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_10GBASE_DAC,
+ IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_DEFAULT
+};
+
+enum ifpga_rawdev_retimer_mac_type {
+ IFPGA_RAWDEV_RETIMER_MAC_TYPE_UNKNOWN = 0,
+ IFPGA_RAWDEV_RETIMER_MAC_TYPE_100GE_CAUI,
+ IFPGA_RAWDEVG_RETIMER_MAC_TYPE_40GE_XLAUI,
+ IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI,
+ IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI,
+ IFPGA_RAWDEV_RETIMER_MAC_TYPE_DEFAULT
+};
+
+#define IFPGA_RAWDEV_LINK_SPEED_10GB_SHIFT 0x0
+#define IFPGA_RAWDEV_LINK_SPEED_40GB_SHIFT 0x1
+#define IFPGA_RAWDEV_LINK_SPEED_25GB_SHIFT 0x2
+
+enum ifpga_rawdev_link_speed {
+ IFPGA_RAWDEV_LINK_SPEED_UNKNOWN = 0,
+ IFPGA_RAWDEV_LINK_SPEED_10GB =
+ (1 << IFPGA_RAWDEV_LINK_SPEED_10GB_SHIFT),
+ IFPGA_RAWDEV_LINK_SPEED_40GB =
+ (1 << IFPGA_RAWDEV_LINK_SPEED_40GB_SHIFT),
+ IFPGA_RAWDEV_LINK_SPEED_25GB =
+ (1 << IFPGA_RAWDEV_LINK_SPEED_25GB_SHIFT),
+};
+
+struct ifpga_rawdevg_retimer_info {
+ int retimer_num;
+ int port_num;
+ enum ifpga_rawdev_retimer_media_type media_type;
+ enum ifpga_rawdev_retimer_mac_type mac_type;
+};
+
+struct ifpga_rawdevg_link_info {
+ int port;
+ int link_up;
+ enum ifpga_rawdev_link_speed link_speed;
+};
+
+struct ipn3ke_pub_func {
+ struct ifpga_rawdev *(*get_ifpga_rawdev)(const struct rte_rawdev *rdv);
+ int (*set_i40e_sw_dev)(uint16_t port_id, struct rte_eth_dev *sw_dev);
+};
+
+/**
+ * @internal
+ * The publid functions of bridge PAC N3000 FPGA and I40e.
+ */
+extern struct ipn3ke_pub_func ipn3ke_bridge_func;
+
+
+#endif /* _IFPGA_RAWDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c
new file mode 100644
index 000000000..b673c4914
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c
@@ -0,0 +1,2985 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <unistd.h>
+
+#include <rte_bus_pci.h>
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+
+#include <rte_mbuf.h>
+#include <rte_sched.h>
+#include <rte_ethdev_driver.h>
+#include <rte_spinlock.h>
+
+#include <rte_io.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+#include <rte_bus_ifpga.h>
+#include <ifpga_logs.h>
+
+#include "ipn3ke_rawdev_api.h"
+#include "ipn3ke_flow.h"
+#include "ipn3ke_logs.h"
+#include "ipn3ke_ethdev.h"
+
+static int ipn3ke_rpst_scan_num;
+static pthread_t ipn3ke_rpst_scan_thread;
+
+/** Double linked list of representor port. */
+TAILQ_HEAD(ipn3ke_rpst_list, ipn3ke_rpst);
+
+static struct ipn3ke_rpst_list ipn3ke_rpst_list =
+ TAILQ_HEAD_INITIALIZER(ipn3ke_rpst_list);
+
+static rte_spinlock_t ipn3ke_link_notify_list_lk = RTE_SPINLOCK_INITIALIZER;
+
+static int
+ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst);
+
+static int
+ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
+
+ dev_info->speed_capa =
+ (hw->retimer.mac_type ==
+ IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) ?
+ ETH_LINK_SPEED_10G :
+ ((hw->retimer.mac_type ==
+ IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) ?
+ ETH_LINK_SPEED_25G :
+ ETH_LINK_SPEED_AUTONEG);
+
+ dev_info->max_rx_queues = 1;
+ dev_info->max_tx_queues = 1;
+ dev_info->min_rx_bufsize = IPN3KE_AFU_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = IPN3KE_AFU_FRAME_SIZE_MAX;
+ dev_info->max_mac_addrs = hw->port_num;
+ dev_info->max_vfs = 0;
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .offloads = 0,
+ };
+ dev_info->rx_queue_offload_capa = 0;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ dev_info->tx_queue_offload_capa;
+
+ dev_info->dev_capa =
+ RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+
+ dev_info->switch_info.name = ethdev->device->name;
+ dev_info->switch_info.domain_id = rpst->switch_domain_id;
+ dev_info->switch_info.port_id = rpst->port_id;
+
+ return 0;
+}
+
+static int
+ipn3ke_rpst_dev_configure(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static int
+ipn3ke_rpst_dev_start(struct rte_eth_dev *dev)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
+ struct rte_rawdev *rawdev;
+ uint64_t base_mac;
+ uint32_t val;
+ char attr_name[IPN3KE_RAWDEV_ATTR_LEN_MAX];
+
+ rawdev = hw->rawdev;
+
+ memset(attr_name, 0, sizeof(attr_name));
+ snprintf(attr_name, IPN3KE_RAWDEV_ATTR_LEN_MAX, "%s",
+ "LineSideBaseMAC");
+ rawdev->dev_ops->attr_get(rawdev, attr_name, &base_mac);
+ rte_ether_addr_copy((struct rte_ether_addr *)&base_mac,
+ &rpst->mac_addr);
+
+ rte_ether_addr_copy(&rpst->mac_addr, &dev->data->mac_addrs[0]);
+ dev->data->mac_addrs->addr_bytes[RTE_ETHER_ADDR_LEN - 1] =
+ (uint8_t)rpst->port_id + 1;
+
+ if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
+ /* Set mac address */
+ rte_memcpy(((char *)(&val)),
+ (char *)&dev->data->mac_addrs->addr_bytes[0],
+ sizeof(uint32_t));
+ (*hw->f_mac_write)(hw,
+ val,
+ IPN3KE_MAC_PRIMARY_MAC_ADDR0,
+ rpst->port_id,
+ 0);
+ rte_memcpy(((char *)(&val)),
+ (char *)&dev->data->mac_addrs->addr_bytes[4],
+ sizeof(uint16_t));
+ (*hw->f_mac_write)(hw,
+ val,
+ IPN3KE_MAC_PRIMARY_MAC_ADDR1,
+ rpst->port_id,
+ 0);
+
+ /* Enable the TX path */
+ ipn3ke_xmac_tx_enable(hw, rpst->port_id, 0);
+
+ /* Disables source address override */
+ ipn3ke_xmac_smac_ovd_dis(hw, rpst->port_id, 0);
+
+ /* Enable the RX path */
+ ipn3ke_xmac_rx_enable(hw, rpst->port_id, 0);
+
+ /* Clear line side TX statistics counters */
+ ipn3ke_xmac_tx_clr_10G_stcs(hw, rpst->port_id, 0);
+
+ /* Clear line side RX statistics counters */
+ ipn3ke_xmac_rx_clr_10G_stcs(hw, rpst->port_id, 0);
+
+ /* Clear NIC side TX statistics counters */
+ ipn3ke_xmac_tx_clr_10G_stcs(hw, rpst->port_id, 1);
+
+ /* Clear NIC side RX statistics counters */
+ ipn3ke_xmac_rx_clr_10G_stcs(hw, rpst->port_id, 1);
+ } else if (hw->retimer.mac_type ==
+ IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
+ /* Clear line side TX statistics counters */
+ ipn3ke_xmac_tx_clr_25G_stcs(hw, rpst->port_id, 0);
+
+ /* Clear line side RX statistics counters */
+ ipn3ke_xmac_rx_clr_25G_stcs(hw, rpst->port_id, 0);
+
+ /* Clear NIC side TX statistics counters */
+ ipn3ke_xmac_tx_clr_25G_stcs(hw, rpst->port_id, 1);
+
+ /* Clear NIC side RX statistics counters */
+ ipn3ke_xmac_rx_clr_25G_stcs(hw, rpst->port_id, 1);
+ }
+
+ ipn3ke_rpst_link_update(dev, 0);
+
+ return 0;
+}
+
+static void
+ipn3ke_rpst_dev_stop(struct rte_eth_dev *dev)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
+
+ if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
+ /* Disable the TX path */
+ ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0);
+
+ /* Disable the RX path */
+ ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0);
+ }
+}
+
+static void
+ipn3ke_rpst_dev_close(struct rte_eth_dev *dev)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
+
+ if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
+ /* Disable the TX path */
+ ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0);
+
+ /* Disable the RX path */
+ ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0);
+ }
+}
+
+/*
+ * Reset PF device only to re-initialize resources in PMD layer
+ */
+static int
+ipn3ke_rpst_dev_reset(struct rte_eth_dev *dev)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
+
+ if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
+ /* Disable the TX path */
+ ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0);
+
+ /* Disable the RX path */
+ ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0);
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_rpst_rx_queue_start(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id)
+{
+ return 0;
+}
+
+static int
+ipn3ke_rpst_rx_queue_stop(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id)
+{
+ return 0;
+}
+
+static int
+ipn3ke_rpst_tx_queue_start(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t tx_queue_id)
+{
+ return 0;
+}
+
+static int
+ipn3ke_rpst_tx_queue_stop(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t tx_queue_id)
+{
+ return 0;
+}
+
+static int
+ipn3ke_rpst_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ __rte_unused struct rte_mempool *mp)
+{
+ return 0;
+}
+
+static void
+ipn3ke_rpst_rx_queue_release(__rte_unused void *rxq)
+{
+}
+
+static int
+ipn3ke_rpst_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ return 0;
+}
+
+static void
+ipn3ke_rpst_tx_queue_release(__rte_unused void *txq)
+{
+}
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct ipn3ke_rpst_eth_stats {
+ uint64_t tx_bytes; /* gotc */
+ uint64_t tx_multicast; /* mptc */
+ uint64_t tx_broadcast; /* bptc */
+ uint64_t tx_unicast; /* uptc */
+ uint64_t tx_discards; /* tdpc */
+ uint64_t tx_errors; /* tepc */
+ uint64_t rx_bytes; /* gorc */
+ uint64_t rx_multicast; /* mprc */
+ uint64_t rx_broadcast; /* bprc */
+ uint64_t rx_unicast; /* uprc */
+ uint64_t rx_discards; /* rdpc */
+ uint64_t rx_unknown_protocol; /* rupp */
+};
+
+/* store statistics names and its offset in stats structure */
+struct ipn3ke_rpst_xstats_name_offset {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned int offset;
+};
+
+static const struct ipn3ke_rpst_xstats_name_offset
+ipn3ke_rpst_stats_strings[] = {
+ {"tx_multicast_packets", offsetof(struct ipn3ke_rpst_eth_stats,
+ tx_multicast)},
+ {"tx_broadcast_packets", offsetof(struct ipn3ke_rpst_eth_stats,
+ tx_broadcast)},
+ {"tx_unicast_packets", offsetof(struct ipn3ke_rpst_eth_stats,
+ tx_unicast)},
+ {"tx_dropped_packets", offsetof(struct ipn3ke_rpst_eth_stats,
+ tx_discards)},
+ {"rx_multicast_packets", offsetof(struct ipn3ke_rpst_eth_stats,
+ rx_multicast)},
+ {"rx_broadcast_packets", offsetof(struct ipn3ke_rpst_eth_stats,
+ rx_broadcast)},
+ {"rx_unicast_packets", offsetof(struct ipn3ke_rpst_eth_stats,
+ rx_unicast)},
+ {"rx_dropped_packets", offsetof(struct ipn3ke_rpst_eth_stats,
+ rx_discards)},
+ {"rx_unknown_protocol_packets", offsetof(struct ipn3ke_rpst_eth_stats,
+ rx_unknown_protocol)},
+};
+
+#define IPN3KE_RPST_ETH_XSTATS_CNT (sizeof(ipn3ke_rpst_stats_strings) / \
+ sizeof(ipn3ke_rpst_stats_strings[0]))
+
+#define IPN3KE_RPST_PRIO_XSTATS_CNT 8
+
+/* Statistics collected by the MAC */
+struct ipn3ke_rpst_hw_port_stats {
+ /* eth stats collected by the port */
+ struct ipn3ke_rpst_eth_stats eth;
+
+ /* additional port specific stats */
+ uint64_t tx_dropped_link_down;
+ uint64_t crc_errors;
+ uint64_t illegal_bytes;
+ uint64_t error_bytes;
+ uint64_t mac_local_faults;
+ uint64_t mac_remote_faults;
+ uint64_t rx_length_errors;
+ uint64_t link_xon_rx;
+ uint64_t link_xoff_rx;
+ uint64_t priority_xon_rx[IPN3KE_RPST_PRIO_XSTATS_CNT];
+ uint64_t priority_xoff_rx[IPN3KE_RPST_PRIO_XSTATS_CNT];
+ uint64_t link_xon_tx;
+ uint64_t link_xoff_tx;
+ uint64_t priority_xon_tx[IPN3KE_RPST_PRIO_XSTATS_CNT];
+ uint64_t priority_xoff_tx[IPN3KE_RPST_PRIO_XSTATS_CNT];
+ uint64_t priority_xon_2_xoff[IPN3KE_RPST_PRIO_XSTATS_CNT];
+ uint64_t rx_size_64;
+ uint64_t rx_size_65_127;
+ uint64_t rx_size_128_255;
+ uint64_t rx_size_256_511;
+ uint64_t rx_size_512_1023;
+ uint64_t rx_size_1024_1518;
+ uint64_t rx_size_big;
+ uint64_t rx_undersize;
+ uint64_t rx_fragments;
+ uint64_t rx_oversize;
+ uint64_t rx_jabber;
+ uint64_t tx_size_64;
+ uint64_t tx_size_65_127;
+ uint64_t tx_size_128_255;
+ uint64_t tx_size_256_511;
+ uint64_t tx_size_512_1023;
+ uint64_t tx_size_1024_1518;
+ uint64_t tx_size_1519_to_max;
+ uint64_t mac_short_packet_dropped;
+ uint64_t checksum_error;
+ /* flow director stats */
+ uint64_t fd_atr_match;
+ uint64_t fd_sb_match;
+ uint64_t fd_atr_tunnel_match;
+ uint32_t fd_atr_status;
+ uint32_t fd_sb_status;
+ /* EEE LPI */
+ uint32_t tx_lpi_status;
+ uint32_t rx_lpi_status;
+ uint64_t tx_lpi_count;
+ uint64_t rx_lpi_count;
+};
+
+static const struct ipn3ke_rpst_xstats_name_offset
+ipn3ke_rpst_hw_port_strings[] = {
+ {"tx_link_down_dropped", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ tx_dropped_link_down)},
+ {"rx_crc_errors", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ crc_errors)},
+ {"rx_illegal_byte_errors", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ illegal_bytes)},
+ {"rx_error_bytes", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ error_bytes)},
+ {"mac_local_errors", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ mac_local_faults)},
+ {"mac_remote_errors", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ mac_remote_faults)},
+ {"rx_length_errors", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ rx_length_errors)},
+ {"tx_xon_packets", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ link_xon_tx)},
+ {"rx_xon_packets", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ link_xon_rx)},
+ {"tx_xoff_packets", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ link_xoff_tx)},
+ {"rx_xoff_packets", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ link_xoff_rx)},
+ {"rx_size_64_packets", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ rx_size_64)},
+ {"rx_size_65_to_127_packets", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ rx_size_65_127)},
+ {"rx_size_128_to_255_packets",
+ offsetof(struct ipn3ke_rpst_hw_port_stats,
+ rx_size_128_255)},
+ {"rx_size_256_to_511_packets",
+ offsetof(struct ipn3ke_rpst_hw_port_stats,
+ rx_size_256_511)},
+ {"rx_size_512_to_1023_packets",
+ offsetof(struct ipn3ke_rpst_hw_port_stats,
+ rx_size_512_1023)},
+ {"rx_size_1024_to_1518_packets",
+ offsetof(struct ipn3ke_rpst_hw_port_stats,
+ rx_size_1024_1518)},
+ {"rx_size_1519_to_max_packets",
+ offsetof(struct ipn3ke_rpst_hw_port_stats,
+ rx_size_big)},
+ {"rx_undersized_errors", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ rx_undersize)},
+ {"rx_oversize_errors", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ rx_oversize)},
+ {"rx_mac_short_dropped", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ mac_short_packet_dropped)},
+ {"rx_fragmented_errors", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ rx_fragments)},
+ {"rx_jabber_errors", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ rx_jabber)},
+ {"tx_size_64_packets", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ tx_size_64)},
+ {"tx_size_65_to_127_packets",
+ offsetof(struct ipn3ke_rpst_hw_port_stats,
+ tx_size_65_127)},
+ {"tx_size_128_to_255_packets",
+ offsetof(struct ipn3ke_rpst_hw_port_stats,
+ tx_size_128_255)},
+ {"tx_size_256_to_511_packets",
+ offsetof(struct ipn3ke_rpst_hw_port_stats,
+ tx_size_256_511)},
+ {"tx_size_512_to_1023_packets",
+ offsetof(struct ipn3ke_rpst_hw_port_stats,
+ tx_size_512_1023)},
+ {"tx_size_1024_to_1518_packets",
+ offsetof(struct ipn3ke_rpst_hw_port_stats,
+ tx_size_1024_1518)},
+ {"tx_size_1519_to_max_packets",
+ offsetof(struct ipn3ke_rpst_hw_port_stats,
+ tx_size_1519_to_max)},
+ {"rx_flow_director_atr_match_packets",
+ offsetof(struct ipn3ke_rpst_hw_port_stats,
+ fd_atr_match)},
+ {"rx_flow_director_sb_match_packets",
+ offsetof(struct ipn3ke_rpst_hw_port_stats,
+ fd_sb_match)},
+ {"tx_low_power_idle_status", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ tx_lpi_status)},
+ {"rx_low_power_idle_status", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ rx_lpi_status)},
+ {"tx_low_power_idle_count", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ tx_lpi_count)},
+ {"rx_low_power_idle_count", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ rx_lpi_count)},
+};
+
+#define IPN3KE_RPST_HW_PORT_XSTATS_CNT (sizeof(ipn3ke_rpst_hw_port_strings) \
+ / sizeof(ipn3ke_rpst_hw_port_strings[0]))
+
+static const struct ipn3ke_rpst_xstats_name_offset
+ipn3ke_rpst_rxq_prio_strings[] = {
+ {"xon_packets", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ priority_xon_rx)},
+ {"xoff_packets", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ priority_xoff_rx)},
+};
+
+#define IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT (sizeof(ipn3ke_rpst_rxq_prio_strings) \
+ / sizeof(ipn3ke_rpst_rxq_prio_strings[0]))
+
+static const struct ipn3ke_rpst_xstats_name_offset
+ipn3ke_rpst_txq_prio_strings[] = {
+ {"xon_packets", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ priority_xon_tx)},
+ {"xoff_packets", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ priority_xoff_tx)},
+ {"xon_to_xoff_packets", offsetof(struct ipn3ke_rpst_hw_port_stats,
+ priority_xon_2_xoff)},
+};
+
+#define IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT (sizeof(ipn3ke_rpst_txq_prio_strings) \
+ / sizeof(ipn3ke_rpst_txq_prio_strings[0]))
+
+static uint32_t
+ipn3ke_rpst_xstats_calc_num(void)
+{
+ return IPN3KE_RPST_ETH_XSTATS_CNT
+ + IPN3KE_RPST_HW_PORT_XSTATS_CNT
+ + (IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT
+ * IPN3KE_RPST_PRIO_XSTATS_CNT)
+ + (IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT
+ * IPN3KE_RPST_PRIO_XSTATS_CNT);
+}
+
+static void
+ipn3ke_rpst_25g_nic_side_tx_stats_reset(struct ipn3ke_hw *hw,
+uint16_t port_id)
+{
+ uint32_t tmp = 0x00000001;
+ /* Bit[0]: Software can set this bit to the value of 1
+ * to reset all of the TX statistics registers at the same time.
+ * This bit is selfclearing.
+ */
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_25G_TX_STATISTICS_CONFIG,
+ port_id,
+ 1);
+
+ while (tmp & 0x00000001) {
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_25G_TX_STATISTICS_CONFIG,
+ port_id,
+ 1);
+ if (tmp & 0x00000001)
+ usleep(5);
+ else
+ return;
+ }
+}
+
+static void
+ipn3ke_rpst_25g_nic_side_rx_stats_reset(struct ipn3ke_hw *hw,
+uint16_t port_id)
+{
+ uint32_t tmp = 0x00000001;
+ /* Bit[0]: Software can set this bit to the value of 1
+ * to reset all of the RX statistics registers at the same time.
+ * This bit is selfclearing.
+ */
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_25G_RX_STATISTICS_CONFIG,
+ port_id,
+ 1);
+
+ while (tmp & 0x00000001) {
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_25G_RX_STATISTICS_CONFIG,
+ port_id,
+ 1);
+ if (tmp & 0x00000001)
+ usleep(5);
+ else
+ return;
+ }
+}
+
+static void
+ipn3ke_rpst_10g_nic_side_tx_stats_reset(struct ipn3ke_hw *hw,
+uint16_t port_id)
+{
+ uint32_t tmp;
+
+ /*Bit [0]: Set this register to 1 to clear all TX statistics
+ *counters.
+ *The IP core clears this bit when all counters are cleared.
+ *Bits [31:1]: Reserved.
+ */
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_10G_TX_STATS_CLR,
+ port_id,
+ 1);
+ tmp |= 0x00000001;
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_10G_TX_STATS_CLR,
+ port_id,
+ 1);
+}
+
+static void
+ipn3ke_rpst_10g_nic_side_rx_stats_reset(struct ipn3ke_hw *hw,
+uint16_t port_id)
+{
+ uint32_t tmp;
+
+ /*Bit [0]: Set this register to 1 to clear all RX statistics
+ *counters.
+ *The IP core clears this bit when all counters are cleared.
+ *Bits [31:1]: Reserved
+ */
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_10G_RX_STATS_CLR,
+ port_id,
+ 1);
+ tmp |= 0x00000001;
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_10G_RX_STATS_CLR,
+ port_id,
+ 1);
+}
+
+static uint64_t
+ipn3ke_rpst_read_64bits_statistics_register(uint32_t addr_lo,
+uint32_t addr_hi, struct ipn3ke_hw *hw, uint16_t port_id)
+{
+ uint32_t statistics_lo = 0x00000000;
+ uint32_t statistics_hi = 0x00000000;
+ uint64_t statistics = 0x0000000000000000;
+
+ (*hw->f_mac_read)(hw,
+ &statistics_lo,
+ addr_lo,
+ port_id,
+ 0);
+
+ (*hw->f_mac_read)(hw,
+ &statistics_hi,
+ addr_hi,
+ port_id,
+ 0);
+
+ statistics += statistics_hi;
+ statistics = statistics << IPN3KE_REGISTER_WIDTH;
+ statistics += statistics_lo;
+ return statistics;
+
+}
+
+static int
+ipn3ke_rpst_read_25g_lineside_stats_registers
+(struct ipn3ke_hw *hw,
+uint16_t port_id,
+struct ipn3ke_rpst_hw_port_stats *hw_stats)
+{
+ uint32_t tmp;
+ uint64_t statistics;
+
+ memset(hw_stats, 0, sizeof(*hw_stats));
+
+ /*check Tx statistics is real time.
+ *if statistics has been paused, make it real time.
+ */
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_25G_TX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+
+ if (tmp & IPN3KE_25G_TX_STATISTICS_CONFIG_SHADOW_REQUEST_MASK) {
+ tmp &= 0xfffffffb;
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_25G_TX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+ }
+
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_25G_TX_STATISTICS_STATUS,
+ port_id,
+ 0);
+ if (tmp & IPN3KE_25G_TX_STATISTICS_STATUS_SHADOW_REQUEST_MASK) {
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_25G_TX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+ tmp &= 0xfffffffb;
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_25G_TX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+ }
+
+ /*check Rx statistics is real time.
+ *if statistics has been paused, make it real time.
+ */
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_25G_RX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+ if (tmp & IPN3KE_25G_RX_STATISTICS_CONFIG_SHADOW_REQUEST_MASK) {
+ tmp &= 0xfffffffb;
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_25G_RX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+ }
+
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_25G_RX_STATISTICS_STATUS,
+ port_id,
+ 0);
+
+ if (tmp & IPN3KE_25G_RX_STATISTICS_STATUS_SHADOW_REQUEST_MASK) {
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_25G_RX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+ tmp &= 0xfffffffb;
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_25G_RX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+ }
+
+ /* pause Tx counter to read the statistics */
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_25G_TX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+ tmp |= 0x00000004;
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_25G_TX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+
+ /* pause Rx counter to read the statistics */
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_25G_RX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+ tmp |= 0x00000004;
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_25G_RX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+
+ /*Number of transmitted frames less than 64 bytes
+ *and reporting a CRC error
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_FRAGMENTS_LO,
+ IPN3KE_25G_CNTR_TX_FRAGMENTS_HI,
+ hw, port_id);
+ hw_stats->eth.tx_errors += statistics;
+ hw_stats->crc_errors += statistics;
+
+ /*Number of transmitted oversized frames reporting a CRC error*/
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_JABBERS_LO,
+ IPN3KE_25G_CNTR_TX_JABBERS_HI,
+ hw, port_id);
+ hw_stats->eth.tx_errors += statistics;
+ hw_stats->crc_errors += statistics;
+
+ /* Number of transmitted packets with FCS errors */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_FCS_LO,
+ IPN3KE_25G_CNTR_TX_FCS_HI,
+ hw, port_id);
+ hw_stats->eth.tx_errors += statistics;
+ hw_stats->checksum_error += statistics;
+
+ /*Number of transmitted frames with a frame of length at
+ *least 64 reporting a CRC error
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_CRCERR_LO,
+ IPN3KE_25G_CNTR_TX_CRCERR_HI,
+ hw, port_id);
+ hw_stats->eth.tx_errors += statistics;
+ hw_stats->crc_errors += statistics;
+
+ /*Number of errored multicast frames transmitted,
+ *excluding control frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_MCAST_DATA_ERR_LO,
+ IPN3KE_25G_CNTR_TX_MCAST_DATA_ERR_HI,
+ hw, port_id);
+ hw_stats->eth.tx_errors += statistics;
+
+ /*Number of errored broadcast frames transmitted,
+ *excluding control frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_BCAST_DATA_ERR_LO,
+ IPN3KE_25G_CNTR_TX_BCAST_DATA_ERR_HI,
+ hw, port_id);
+ hw_stats->eth.tx_errors += statistics;
+
+ /*Number of errored unicast frames transmitted,
+ *excluding control frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_UCAST_DATA_ERR_LO,
+ IPN3KE_25G_CNTR_TX_UCAST_DATA_ERR_HI,
+ hw, port_id);
+ hw_stats->eth.tx_errors += statistics;
+
+ /* Number of errored multicast control frames transmitted */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_MCAST_CTRL_ERR_LO,
+ IPN3KE_25G_CNTR_TX_MCAST_CTRL_ERR_HI,
+ hw, port_id);
+ hw_stats->eth.tx_errors += statistics;
+
+ /* Number of errored broadcast control frames transmitted */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_BCAST_CTRL_ERR_LO,
+ IPN3KE_25G_CNTR_TX_BCAST_CTRL_ERR_HI,
+ hw, port_id);
+ hw_stats->eth.tx_errors += statistics;
+
+ /* Number of errored unicast control frames transmitted */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_UCAST_CTRL_ERR_LO,
+ IPN3KE_25G_CNTR_TX_UCAST_CTRL_ERR_HI,
+ hw, port_id);
+ hw_stats->eth.tx_errors += statistics;
+
+ /* Number of errored pause frames transmitted */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_PAUSE_ERR_LO,
+ IPN3KE_25G_CNTR_TX_PAUSE_ERR_HI,
+ hw, port_id);
+ hw_stats->eth.tx_errors += statistics;
+
+ /*Number of 64-byte transmitted frames,
+ *including the CRC field but excluding the preamble
+ *and SFD bytes
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_64B_LO,
+ IPN3KE_25G_CNTR_TX_64B_HI,
+ hw, port_id);
+ hw_stats->tx_size_64 += statistics;
+
+ /* Number of transmitted frames between 65 and 127 bytes */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_65_127B_LO,
+ IPN3KE_25G_CNTR_TX_65_127B_HI,
+ hw, port_id);
+ hw_stats->tx_size_65_127 += statistics;
+
+ /* Number of transmitted frames between 128 and 255 bytes */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_128_255B_LO,
+ IPN3KE_25G_CNTR_TX_128_255B_HI,
+ hw, port_id);
+ hw_stats->tx_size_128_255 += statistics;
+
+ /* Number of transmitted frames between 256 and 511 bytes */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_256_511B_LO,
+ IPN3KE_25G_CNTR_TX_256_511B_HI,
+ hw, port_id);
+ hw_stats->tx_size_256_511 += statistics;
+
+ /* Number of transmitted frames between 512 and 1023 bytes */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_512_1023B_LO,
+ IPN3KE_25G_CNTR_TX_512_1023B_HI,
+ hw, port_id);
+ hw_stats->tx_size_512_1023 += statistics;
+
+ /* Number of transmitted frames between 1024 and 1518 bytes */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_1024_1518B_LO,
+ IPN3KE_25G_CNTR_TX_1024_1518B_HI,
+ hw, port_id);
+ hw_stats->tx_size_1024_1518 += statistics;
+
+ /*Number of transmitted frames of size between 1519 bytes
+ *and the number of bytes specified in the MAX_TX_SIZE_CONFIG
+ *register
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_1519_MAXB_LO,
+ IPN3KE_25G_CNTR_TX_1519_MAXB_HI,
+ hw, port_id);
+ hw_stats->tx_size_1519_to_max += statistics;
+
+ /*Number of oversized frames (frames with more bytes than the
+ *number specified in the MAX_TX_SIZE_CONFIG register)
+ *transmitted
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_OVERSIZE_LO,
+ IPN3KE_25G_CNTR_TX_OVERSIZE_HI,
+ hw, port_id);
+
+ /*Number of valid multicast frames transmitted,
+ *excluding control frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_MCAST_DATA_OK_LO,
+ IPN3KE_25G_CNTR_TX_MCAST_DATA_OK_HI,
+ hw, port_id);
+ hw_stats->eth.tx_multicast += statistics;
+
+ /*Number of valid broadcast frames transmitted,
+ *excluding control frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_BCAST_DATA_OK_LO,
+ IPN3KE_25G_CNTR_TX_BCAST_DATA_OK_HI,
+ hw, port_id);
+ hw_stats->eth.tx_broadcast += statistics;
+
+ /*Number of valid unicast frames transmitted,
+ *excluding control frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_UCAST_DATA_OK_LO,
+ IPN3KE_25G_CNTR_TX_UCAST_DATA_OK_HI,
+ hw, port_id);
+ hw_stats->eth.tx_unicast += statistics;
+
+ /*Number of valid multicast frames transmitted,
+ *excluding data frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_MCAST_CTRL_LO,
+ IPN3KE_25G_CNTR_TX_MCAST_CTRL_HI,
+ hw, port_id);
+ hw_stats->eth.tx_multicast += statistics;
+
+ /*Number of valid broadcast frames transmitted,
+ *excluding data frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_BCAST_CTRL_LO,
+ IPN3KE_25G_CNTR_TX_BCAST_CTRL_HI,
+ hw, port_id);
+ hw_stats->eth.tx_broadcast += statistics;
+
+ /*Number of valid unicast frames transmitted,
+ *excluding data frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_UCAST_CTRL_LO,
+ IPN3KE_25G_CNTR_TX_UCAST_CTRL_HI,
+ hw, port_id);
+ hw_stats->eth.tx_unicast += statistics;
+
+ /* Number of valid pause frames transmitted */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_PAUSE_LO,
+ IPN3KE_25G_CNTR_TX_PAUSE_HI,
+ hw, port_id);
+
+ /*Number of transmitted runt packets. The IP core does not
+ *transmit frames of length less than nine bytes.
+ *The IP core pads frames of length nine bytes to 64 bytes to
+ *extend them to 64 bytes. Therefore, this counter does not
+ *increment in normal operating conditions.
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_TX_RUNT_LO,
+ IPN3KE_25G_CNTR_TX_RUNT_HI,
+ hw, port_id);
+
+ /*Number of transmitted payload bytes in frames with no FCS,
+ *undersized, oversized, or payload length errors.
+ *If VLAN detection is turned off for the TX MAC (bit[1]
+ *of the TX_MAC_CONTROL register at offset 0x40A has
+ *the value of 1), the IP core counts the VLAN header bytes
+ *(4 bytes for VLAN and 8 bytes for stacked VLAN)
+ *as payload bytes. This register is compliant with
+ *the requirements for aOctetsTransmittedOK in section
+ *5.2.2.1.8 of the IEEE Standard 802.3-2008.
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_TX_PAYLOAD_OCTETS_OK_LO,
+ IPN3KE_25G_TX_PAYLOAD_OCTETS_OK_HI,
+ hw, port_id);
+ hw_stats->eth.tx_bytes += statistics;
+
+ /*Number of transmitted bytes in frames with no FCS, undersized,
+ *oversized, or payload length errors. This register is
+ *compliant with the requirements for ifOutOctets in RFC3635
+ *(Managed Objects for Ethernet-like Interface Types)
+ *and TX etherStatsOctets in RFC2819(Remote Network Monitoring
+ *Management Information Base (RMON)).
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_TX_FRAME_OCTETS_OK_LO,
+ IPN3KE_25G_TX_FRAME_OCTETS_OK_HI,
+ hw, port_id);
+
+ /*Number of received frames less than 64 bytes
+ *and reporting a CRC error
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_FRAGMENTS_LO,
+ IPN3KE_25G_CNTR_RX_FRAGMENTS_HI,
+ hw, port_id);
+ hw_stats->eth.rx_discards += statistics;
+ hw_stats->crc_errors += statistics;
+ hw_stats->rx_length_errors += statistics;
+
+ /* Number of received oversized frames reporting a CRC error */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_JABBERS_LO,
+ IPN3KE_25G_CNTR_RX_JABBERS_HI,
+ hw, port_id);
+ hw_stats->eth.rx_discards += statistics;
+ hw_stats->crc_errors += statistics;
+ hw_stats->rx_length_errors += statistics;
+
+ /*Number of received packets with FCS errors.
+ *This register maintains a count of the number of pulses
+ *on the "l<n>_rx_fcs_error" or "rx_fcs_error" output signal
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_FCS_LO,
+ IPN3KE_25G_CNTR_RX_FCS_HI,
+ hw, port_id);
+ hw_stats->eth.rx_discards += statistics;
+ hw_stats->checksum_error += statistics;
+
+ /*Number of received frames with a frame of length at least 64
+ *with CRC error
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_CRCERR_LO,
+ IPN3KE_25G_CNTR_RX_CRCERR_HI,
+ hw, port_id);
+ hw_stats->eth.rx_discards += statistics;
+ hw_stats->crc_errors += statistics;
+
+ /*Number of errored multicast frames received,
+ *excluding control frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_MCAST_DATA_ERR_LO,
+ IPN3KE_25G_CNTR_RX_MCAST_DATA_ERR_HI,
+ hw, port_id);
+ hw_stats->eth.rx_discards += statistics;
+
+ /*Number of errored broadcast frames received,
+ *excluding control frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_BCAST_DATA_ERR_LO,
+ IPN3KE_25G_CNTR_RX_BCAST_DATA_ERR_HI,
+ hw, port_id);
+ hw_stats->eth.rx_discards += statistics;
+
+ /*Number of errored unicast frames received,
+ *excluding control frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_UCAST_DATA_ERR_LO,
+ IPN3KE_25G_CNTR_RX_UCAST_DATA_ERR_HI,
+ hw, port_id);
+ hw_stats->eth.rx_discards += statistics;
+
+ /* Number of errored multicast control frames received */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_MCAST_CTRL_ERR_LO,
+ IPN3KE_25G_CNTR_RX_MCAST_CTRL_ERR_HI,
+ hw, port_id);
+ hw_stats->eth.rx_discards += statistics;
+
+ /* Number of errored broadcast control frames received */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_BCAST_CTRL_ERR_LO,
+ IPN3KE_25G_CNTR_RX_BCAST_CTRL_ERR_HI,
+ hw, port_id);
+ hw_stats->eth.rx_discards += statistics;
+
+ /* Number of errored unicast control frames received */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_UCAST_CTRL_ERR_LO,
+ IPN3KE_25G_CNTR_RX_UCAST_CTRL_ERR_HI,
+ hw, port_id);
+ hw_stats->eth.rx_discards += statistics;
+
+ /* Number of errored pause frames received */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_PAUSE_ERR_LO,
+ IPN3KE_25G_CNTR_RX_PAUSE_ERR_HI,
+ hw, port_id);
+ hw_stats->eth.rx_discards += statistics;
+
+ /*Number of 64-byte received frames,
+ *including the CRC field but excluding the preamble
+ *and SFD bytes
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_64B_LO,
+ IPN3KE_25G_CNTR_RX_64B_HI,
+ hw, port_id);
+ hw_stats->rx_size_64 += statistics;
+
+ /*Number of received frames between 65 and 127 bytes */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_65_127B_LO,
+ IPN3KE_25G_CNTR_RX_65_127B_HI,
+ hw, port_id);
+ hw_stats->rx_size_65_127 += statistics;
+
+ /*Number of received frames between 128 and 255 bytes
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_128_255B_LO,
+ IPN3KE_25G_CNTR_RX_128_255B_HI,
+ hw, port_id);
+ hw_stats->rx_size_128_255 += statistics;
+
+ /*Number of received frames between 256 and 511 bytes
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_256_511B_LO,
+ IPN3KE_25G_CNTR_RX_256_511B_HI,
+ hw, port_id);
+ hw_stats->rx_size_256_511 += statistics;
+
+ /*Number of received frames between 512 and 1023 bytes
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_512_1023B_LO,
+ IPN3KE_25G_CNTR_RX_512_1023B_HI,
+ hw, port_id);
+ hw_stats->rx_size_512_1023 += statistics;
+
+ /*Number of received frames between 1024 and 1518 bytes
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_1024_1518B_LO,
+ IPN3KE_25G_CNTR_RX_1024_1518B_HI,
+ hw, port_id);
+ hw_stats->rx_size_1024_1518 += statistics;
+
+ /*Number of received frames of size between 1519 bytes
+ *and the number of bytes specified in the MAX_TX_SIZE_CONFIG
+ *register
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_1519_MAXB_LO,
+ IPN3KE_25G_CNTR_RX_1519_MAXB_HI,
+ hw, port_id);
+ hw_stats->rx_size_big += statistics;
+
+ /*Number of oversized frames (frames with more bytes
+ *than the number specified in the MAX_TX_SIZE_CONFIG register)
+ *received
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_OVERSIZE_LO,
+ IPN3KE_25G_CNTR_RX_OVERSIZE_HI,
+ hw, port_id);
+ hw_stats->rx_jabber += statistics;
+
+ /*Number of valid multicast frames received,
+ *excluding control frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_MCAST_DATA_OK_LO,
+ IPN3KE_25G_CNTR_RX_MCAST_DATA_OK_HI,
+ hw, port_id);
+ hw_stats->eth.rx_multicast += statistics;
+
+ /*Number of valid broadcast frames received,
+ *excluding control frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_BCAST_DATA_OK_LO,
+ IPN3KE_25G_CNTR_RX_BCAST_DATA_OK_HI,
+ hw, port_id);
+ hw_stats->eth.rx_broadcast += statistics;
+
+ /*Number of valid unicast frames received,
+ *excluding control frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_UCAST_DATA_OK_LO,
+ IPN3KE_25G_CNTR_RX_UCAST_DATA_OK_HI,
+ hw, port_id);
+ hw_stats->eth.rx_unicast += statistics;
+
+ /*Number of valid multicast frames received,
+ *excluding data frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_MCAST_CTRL_LO,
+ IPN3KE_25G_CNTR_RX_MCAST_CTRL_HI,
+ hw, port_id);
+ hw_stats->eth.rx_multicast += statistics;
+
+ /*Number of valid broadcast frames received,
+ *excluding data frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_BCAST_CTRL_LO,
+ IPN3KE_25G_CNTR_RX_BCAST_CTRL_HI,
+ hw, port_id);
+ hw_stats->eth.rx_broadcast += statistics;
+
+ /*Number of valid unicast frames received,
+ *excluding data frames
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_UCAST_CTRL_LO,
+ IPN3KE_25G_CNTR_RX_UCAST_CTRL_HI,
+ hw, port_id);
+ hw_stats->eth.rx_unicast += statistics;
+
+ /*Number of received pause frames, with or without error
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_PAUSE_LO,
+ IPN3KE_25G_CNTR_RX_PAUSE_HI,
+ hw, port_id);
+
+ /*Number of received runt packets. A runt is a packet of size
+ *less than 64 bytes but greater than eight bytes.
+ *If a packet is eight bytes or smaller, it is considered
+ *a decoding error and not a runt frame, and the IP core
+ *does not flag it nor count it as a runt.
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_CNTR_RX_RUNT_LO,
+ IPN3KE_25G_CNTR_RX_RUNT_HI,
+ hw, port_id);
+
+ /*Number of received payload bytes in frames with no FCS,
+ *undersized, oversized, or payload length errors.
+ *If VLAN detection is turned off for the RX MAC (bit [1] of the
+ *"RXMAC_CONTROL" register at offset 0x50A has the value of 1),
+ *the IP core counts the VLAN header bytes (4 bytes for VLAN and
+ *8 bytes for stacked VLAN) as payload bytes.
+ *This register is compliant with the requirements for
+ *aOctetsReceivedOK in section 5.2.2.1.14 of the IEEE Standard
+ *802.3-2008
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_RX_PAYLOAD_OCTETS_OK_LO,
+ IPN3KE_25G_RX_PAYLOAD_OCTETS_OK_HI,
+ hw, port_id);
+ hw_stats->eth.rx_bytes += statistics;
+
+ /*Number of received bytes in frames with no FCS, undersized,
+ *oversized, or payload length errors.
+ *This register is compliant with the requirements for
+ *ifInOctets in RFC3635 (Managed Objects for Ethernet-like
+ *Interface Types) and RX etherStatsOctets in RFC2819
+ *(Remote Network Monitoring Management Information Base
+ *(RMON)).
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_25G_RX_FRAME_OCTETS_OK_LO,
+ IPN3KE_25G_RX_FRAME_OCTETS_OK_HI,
+ hw, port_id);
+
+ /*resume Tx counter to real time
+ */
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_25G_TX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+ tmp &= 0xfffffffb;
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_25G_TX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+
+ /*resume Rx counter to real time
+ */
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_25G_RX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+ tmp &= 0xfffffffb;
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_25G_RX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+
+ return 0;
+}
+
+static void
+ipn3ke_rpst_25g_lineside_tx_stats_reset(struct ipn3ke_hw *hw,
+uint16_t port_id)
+{
+ uint32_t tmp = 0x00000001;
+ /* Bit[0]: Software can set this bit to the value of 1
+ * to reset all of the TX statistics registers at the same time.
+ * This bit is selfclearing.
+ */
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_25G_TX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+
+ while (tmp & 0x00000001) {
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_25G_TX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+ if (tmp & 0x00000001)
+ usleep(5);
+ else
+ return;
+ }
+}
+
+static void
+ipn3ke_rpst_25g_lineside_rx_stats_reset(struct ipn3ke_hw *hw,
+uint16_t port_id)
+{
+ uint32_t tmp = 0x00000001;
+ /* Bit[0]: Software can set this bit to the value of 1
+ * to reset all of the RX statistics registers at the same time.
+ * This bit is selfclearing.
+ */
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_25G_RX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+
+ while (tmp & 0x00000001) {
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_25G_RX_STATISTICS_CONFIG,
+ port_id,
+ 0);
+ if (tmp & 0x00000001)
+ usleep(5);
+ else
+ return;
+ }
+}
+
+static uint64_t
+ipn3ke_rpst_read_36bits_statistics_register(uint32_t addr_lo,
+uint32_t addr_hi, struct ipn3ke_hw *hw, uint16_t port_id)
+{
+ uint32_t statistics_lo = 0x00000000;
+ uint32_t statistics_hi = 0x00000000;
+ uint64_t statistics = 0x0000000000000000;
+
+ (*hw->f_mac_read)(hw,
+ &statistics_lo,
+ addr_lo,
+ port_id,
+ 0);
+ (*hw->f_mac_read)(hw,
+ &statistics_hi,
+ addr_hi,
+ port_id,
+ 0);
+ statistics_hi &= IPN3KE_10G_STATS_HI_VALID_MASK;
+ statistics += statistics_hi;
+ statistics = statistics << IPN3KE_REGISTER_WIDTH;
+ statistics += statistics_lo;
+ return statistics;
+}
+
+static int
+ipn3ke_rpst_read_10g_lineside_stats_registers
+(struct ipn3ke_hw *hw,
+uint16_t port_id,
+struct ipn3ke_rpst_hw_port_stats *hw_stats,
+struct rte_eth_stats *stats)
+{
+ uint64_t statistics = 0;
+
+ memset(hw_stats, 0, sizeof(*hw_stats));
+ memset(stats, 0, sizeof(*stats));
+
+ /*36-bit statistics counter that collects the number of frames
+ *that are successfully transmitted, including control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_FRAME_OK_LO,
+ IPN3KE_10G_TX_STATS_FRAME_OK_HI,
+ hw, port_id);
+ stats->opackets = statistics;
+
+ /*36-bit statistics counter that collects the number of frames
+ *that are successfully received, including control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_FRAME_OK_LO,
+ IPN3KE_10G_RX_STATS_FRAME_OK_HI,
+ hw, port_id);
+ stats->ipackets = statistics;
+
+ /*36-bit statistics counter that collects the number of frames
+ *transmitted with error, including control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_FRAME_ERR_LO,
+ IPN3KE_10G_TX_STATS_FRAME_ERR_HI,
+ hw, port_id);
+ stats->oerrors = statistics;
+ hw_stats->eth.tx_errors = statistics;
+
+ /*36-bit statistics counter that collects the number of frames
+ *received with error, including control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_FRAME_ERR_LO,
+ IPN3KE_10G_RX_STATS_FRAME_ERR_HI,
+ hw, port_id);
+ stats->ierrors = statistics;
+ hw_stats->eth.rx_discards = statistics;
+
+ /*36-bit statistics counter that collects the number
+ *of RX frames with CRC error.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_FRAME_CRC_ERR_LO,
+ IPN3KE_10G_RX_STATS_FRAME_CRC_ERR_HI,
+ hw, port_id);
+ hw_stats->crc_errors = statistics;
+
+ /*64-bit statistics counter that collects the payload length,
+ *including the bytes in control frames.
+ *The payload length is the number of data and padding bytes
+ *transmitted.
+ *If the tx_vlan_detection[0] register bit is set to 1,
+ *the VLAN and stacked VLAN tags are counted as part of
+ *the TX payload.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_OCTETS_OK_LO,
+ IPN3KE_10G_TX_STATS_OCTETS_OK_HI,
+ hw, port_id);
+ stats->obytes = statistics;
+ hw_stats->eth.tx_bytes = statistics;
+
+ /*64-bit statistics counter that collects the payload length,
+ *including the bytes in control frames.
+ *The payload length is the number of data and padding bytes
+ *received.
+ *If the rx_vlan_detection[0] register bit is set to 1,
+ *the VLAN and stacked VLAN tags are counted as part of
+ *the RX payload.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_OCTETS_OK_LO,
+ IPN3KE_10G_RX_STATS_OCTETS_OK_HI,
+ hw, port_id);
+ stats->ibytes = statistics;
+ hw_stats->eth.rx_bytes = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *valid pause frames transmitted.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_PAUSE_MAC_CTRL_FRAMES_LO,
+ IPN3KE_10G_TX_STATS_PAUSE_MAC_CTRL_FRAMES_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the number of
+ *valid pause frames received.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_PAUSE_MAC_CTRL_FRAMES_LO,
+ IPN3KE_10G_RX_STATS_PAUSE_MAC_CTRL_FRAMES_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the number of frames
+ *transmitted that are invalid and with error.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_IF_ERRORS_LO,
+ IPN3KE_10G_TX_STATS_IF_ERRORS_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the number of frames
+ *received that are invalid and with error.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_IF_ERRORS_LO,
+ IPN3KE_10G_RX_STATS_IF_ERRORS_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the number of
+ *good unicast frames transmitted,
+ *excluding control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_UNICAST_FRAME_OK_LO,
+ IPN3KE_10G_TX_STATS_UNICAST_FRAME_OK_HI,
+ hw, port_id);
+ hw_stats->eth.tx_unicast = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *good unicast frames received,
+ *excluding control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_UNICAST_FRAME_OK_LO,
+ IPN3KE_10G_RX_STATS_UNICAST_FRAME_OK_HI,
+ hw, port_id);
+ hw_stats->eth.rx_unicast = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *unicast frames transmitted with error,
+ *excluding control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_UNICAST_FRAME_ERR_LO,
+ IPN3KE_10G_TX_STATS_UNICAST_FRAME_ERR_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the number of
+ *unicast frames received with error,
+ *excluding control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_UNICAST_FRAME_ERR_LO,
+ IPN3KE_10G_RX_STATS_UNICAST_FRAME_ERR_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the number of
+ *good multicast frames transmitted,
+ *excluding control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_MULTICAST_FRAME_OK_LO,
+ IPN3KE_10G_TX_STATS_MULTICAST_FRAME_OK_HI,
+ hw, port_id);
+ hw_stats->eth.tx_multicast = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *good multicast frames received,
+ *excluding control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_MULTICAST_FRAME_OK_LO,
+ IPN3KE_10G_RX_STATS_MULTICAST_FRAME_OK_HI,
+ hw, port_id);
+ hw_stats->eth.rx_multicast = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *multicast frames transmitted with error,
+ *excluding control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_MULTICAST_FRAME_ERR_LO,
+ IPN3KE_10G_TX_STATS_MULTICAST_FRAME_ERR_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the number
+ *of multicast frames received with error,
+ *excluding control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_MULTICAST_FRAME_ERR_LO,
+ IPN3KE_10G_RX_STATS_MULTICAST_FRAME_ERR_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the number of
+ *good broadcast frames transmitted,
+ *excluding control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_BROADCAST_FRAME_OK_LO,
+ IPN3KE_10G_TX_STATS_BROADCAST_FRAME_OK_HI,
+ hw, port_id);
+ hw_stats->eth.tx_broadcast = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *good broadcast frames received,
+ *excluding control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_BROADCAST_FRAME_OK_LO,
+ IPN3KE_10G_RX_STATS_BROADCAST_FRAME_OK_HI,
+ hw, port_id);
+ hw_stats->eth.rx_broadcast = statistics;
+
+ /*36-bit statistics counter that collects the number
+ *of broadcast frames transmitted with error,
+ *excluding control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_BROADCAST_FRAME_ERR_LO,
+ IPN3KE_10G_TX_STATS_BROADCAST_FRAME_ERR_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the number of
+ *broadcast frames received with error,
+ *excluding control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_BROADCAST_FRAME_ERR_LO,
+ IPN3KE_10G_RX_STATS_BROADCAST_FRAME_ERR_HI,
+ hw, port_id);
+
+ /*64-bit statistics counter that collects the total number of
+ *octets transmitted.
+ *This count includes good, errored, and invalid frames.
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_10G_TX_STATS_ETHER_STATS_OCTETS_LO,
+ IPN3KE_10G_TX_STATS_ETHER_STATS_OCTETS_HI,
+ hw, port_id);
+
+ /*64-bit statistics counter that collects the total number of
+ *octets received.
+ *This count includes good, errored, and invalid frames.
+ */
+ statistics = ipn3ke_rpst_read_64bits_statistics_register(
+ IPN3KE_10G_RX_STATS_ETHER_STATS_OCTETS_LO,
+ IPN3KE_10G_RX_STATS_ETHER_STATS_OCTETS_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the total number of
+ *good, errored, and invalid frames transmitted.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_LO,
+ IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the total number of
+ *good, errored, and invalid frames received.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_LO,
+ IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the number of
+ *undersized TX frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_ETHER_STATS_UNDER_SIZE_PKTS_LO,
+ IPN3KE_10G_TX_STATS_ETHER_STATS_UNDER_SIZE_PKTS_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the number of
+ *undersized RX frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_ETHER_STATS_UNDER_SIZE_PKTS_LO,
+ IPN3KE_10G_RX_STATS_ETHER_STATS_UNDER_SIZE_PKTS_HI,
+ hw, port_id);
+ hw_stats->rx_undersize = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *TX frames whose length exceeds the maximum frame length
+ *specified.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_ETHER_STATS_OVER_SIZE_PKTS_LO,
+ IPN3KE_10G_TX_STATS_ETHER_STATS_OVER_SIZE_PKTS_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the number of
+ *RX frames whose length exceeds the maximum frame length
+ *specified.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_ETHER_STATS_OVER_SIZE_PKTS_LO,
+ IPN3KE_10G_RX_STATS_ETHER_STATS_OVER_SIZE_PKTS_HI,
+ hw, port_id);
+ hw_stats->rx_oversize = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *64-byte TX frames,
+ *including the CRC field
+ *but excluding the preamble and SFD bytes.
+ *This count includes good, errored, and invalid frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_64_OCTETS_LO,
+ IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_64_OCTETS_HI,
+ hw, port_id);
+ hw_stats->tx_size_64 = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *64-byte RX frames,
+ *including the CRC field
+ *but excluding the preamble and SFD bytes.
+ *This count includes good, errored, and invalid frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_64_OCTETS_LO,
+ IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_64_OCTETS_HI,
+ hw, port_id);
+ hw_stats->rx_size_64 = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *TX frames between the length of 65 and 127 bytes,
+ *including the CRC field
+ *but excluding the preamble and SFD bytes.
+ *This count includes good, errored, and invalid frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_65_127_OCTETS_LO,
+ IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_65_127_OCTETS_HI,
+ hw, port_id);
+ hw_stats->tx_size_65_127 = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *RX frames between the length of 65 and 127 bytes,
+ *including the CRC field
+ *but excluding the preamble and SFD bytes.
+ *This count includes good, errored, and invalid frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_65_127_OCTETS_LO,
+ IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_65_127_OCTETS_HI,
+ hw, port_id);
+ hw_stats->rx_size_65_127 = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *TX frames between the length of 128 and 255 bytes,
+ *including the CRC field
+ *but excluding the preamble and SFD bytes.
+ *This count includes good, errored, and invalid frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_128_255_OCTETS_LO,
+ IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_128_255_OCTETS_HI,
+ hw, port_id);
+ hw_stats->tx_size_128_255 = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *RX frames between the length of 128 and 255 bytes,
+ *including the CRC field
+ *but excluding the preamble and SFD bytes.
+ *This count includes good, errored, and invalid frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_128_255_OCTETS_LO,
+ IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_128_255_OCTETS_HI,
+ hw, port_id);
+ hw_stats->rx_size_128_255 = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *TX frames between the length of 256 and 511 bytes,
+ *including the CRC field
+ *but excluding the preamble and SFD bytes.
+ *This count includes good, errored, and invalid frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_256_511_OCTETS_LO,
+ IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_256_511_OCTETS_HI,
+ hw, port_id);
+ hw_stats->tx_size_256_511 = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *RX frames between the length of 256 and 511 bytes,
+ *including the CRC field
+ *but excluding the preamble and SFD bytes.
+ *This count includes good, errored, and invalid frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_256_511_OCTETS_LO,
+ IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_256_511_OCTETS_HI,
+ hw, port_id);
+ hw_stats->rx_size_256_511 = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *TX frames between the length of 512 and 1023 bytes,
+ *including the CRC field
+ *but excluding the preamble and SFD bytes.
+ *This count includes good, errored, and invalid frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_512_1023_OCTETS_LO,
+ IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_512_1023_OCTETS_HI,
+ hw, port_id);
+ hw_stats->tx_size_512_1023 = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *RX frames between the length of 512 and 1023 bytes,
+ *including the CRC field
+ *but excluding the preamble and SFD bytes.
+ *This count includes good, errored, and invalid frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_512_1023_OCTETS_LO,
+ IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_512_1023_OCTETS_HI,
+ hw, port_id);
+ hw_stats->rx_size_512_1023 = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *TX frames between the length of 1024 and 1518 bytes,
+ *including the CRC field but
+ *excluding the preamble and SFD bytes.
+ *This count includes good, errored, and invalid frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_1024_1518_OCTETS_LO,
+ IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_1024_1518_OCTETS_HI,
+ hw, port_id);
+ hw_stats->tx_size_1024_1518 = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *RX frames between the length of 1024 and 1518 bytes,
+ *including the CRC field
+ *but excluding the preamble and SFD bytes.
+ *This count includes good, errored, and invalid frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_1024_1518_OCTETS_LO,
+ IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_1024_1518_OCTETS_HI,
+ hw, port_id);
+ hw_stats->rx_size_1024_1518 = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *TX frames equal or more than the length of 1,519 bytes,
+ *including the CRC field
+ *but excluding the preamble and SFD bytes.
+ *This count includes good, errored, and invalid frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_1519_X_OCTETS_LO,
+ IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_1519_X_OCTETS_HI,
+ hw, port_id);
+ hw_stats->tx_size_1519_to_max = statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *RX frames equal or more than the length of 1,519 bytes,
+ *including the CRC field
+ *but excluding the preamble and SFD bytes.
+ *This count includes good,
+ *errored, and invalid frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_1519_X_OCTETS_LO,
+ IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_1519_X_OCTETS_HI,
+ hw, port_id);
+ hw_stats->rx_size_big = statistics;
+
+ /*36-bit statistics counter that collects the total number of
+ *RX frames with length less than 64 bytes and CRC error.
+ *The MAC does not drop these frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_ETHER_STATS_FRAGMENTS_LO,
+ IPN3KE_10G_RX_STATS_ETHER_STATS_FRAGMENTS_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the number of
+ *oversized RX frames with CRC error.
+ *The MAC does not drop these frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_ETHER_STATS_JABBERS_LO,
+ IPN3KE_10G_RX_STATS_ETHER_STATS_JABBERS_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the number of
+ *RX frames with CRC error,
+ *whose length is between 64 and the maximum frame length
+ *specified in the register.
+ *The MAC does not drop these frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_ETHER_STATS_CRC_ERR_LO,
+ IPN3KE_10G_RX_STATS_ETHER_STATS_CRC_ERR_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the number of
+ *valid TX unicast control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_UNICAST_MAC_CTRL_FRAMES_LO,
+ IPN3KE_10G_TX_STATS_UNICAST_MAC_CTRL_FRAMES_HI,
+ hw, port_id);
+ hw_stats->eth.tx_unicast += statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *valid RX unicast control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_UNICAST_MAC_CTRL_FRAMES_LO,
+ IPN3KE_10G_RX_STATS_UNICAST_MAC_CTRL_FRAMES_HI,
+ hw, port_id);
+ hw_stats->eth.rx_unicast += statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *valid TX multicast control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_MULTICAST_MAC_CTRL_FRAMES_LO,
+ IPN3KE_10G_TX_STATS_MULTICAST_MAC_CTRL_FRAMES_HI,
+ hw, port_id);
+ hw_stats->eth.tx_multicast += statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *valid RX multicast control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_MULTICAST_MAC_CTRL_FRAMES_LO,
+ IPN3KE_10G_RX_STATS_MULTICAST_MAC_CTRL_FRAMES_HI,
+ hw, port_id);
+ hw_stats->eth.rx_multicast += statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *valid TX broadcast control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_BROADCAST_MAC_CTRL_FRAMES_LO,
+ IPN3KE_10G_TX_STATS_BROADCAST_MAC_CTRL_FRAMES_HI,
+ hw, port_id);
+ hw_stats->eth.tx_broadcast += statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *valid RX broadcast control frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_BROADCAST_MAC_CTRL_FRAMES_LO,
+ IPN3KE_10G_RX_STATS_BROADCAST_MAC_CTRL_FRAMES_HI,
+ hw, port_id);
+ hw_stats->eth.rx_broadcast += statistics;
+
+ /*36-bit statistics counter that collects the number of
+ *valid TX PFC frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_TX_STATS_PFC_MAC_CTRL_FRAMES_LO,
+ IPN3KE_10G_TX_STATS_PFC_MAC_CTRL_FRAMES_HI,
+ hw, port_id);
+
+ /*36-bit statistics counter that collects the number of
+ *valid RX PFC frames.
+ */
+ statistics = ipn3ke_rpst_read_36bits_statistics_register(
+ IPN3KE_10G_RX_STATS_PFC_MAC_CTRL_FRAMES_LO,
+ IPN3KE_10G_RX_STATS_PFC_MAC_CTRL_FRAMES_HI,
+ hw, port_id);
+
+ return 0;
+}
+
+static void
+ipn3ke_rpst_10g_lineside_tx_stats_reset(struct ipn3ke_hw *hw,
+uint16_t port_id)
+{
+ uint32_t tmp;
+
+ /*Bit [0]: Set this register to 1 to clear all TX statistics
+ *counters.
+ *The IP core clears this bit when all counters are cleared.
+ *Bits [31:1]: Reserved.
+ */
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_10G_TX_STATS_CLR,
+ port_id,
+ 0);
+ tmp |= 0x00000001;
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_10G_TX_STATS_CLR,
+ port_id,
+ 0);
+}
+
+static void
+ipn3ke_rpst_10g_lineside_rx_stats_reset(struct ipn3ke_hw *hw,
+uint16_t port_id)
+{
+ uint32_t tmp;
+
+ /*Bit [0]: Set this register to 1 to clear all RX statistics
+ *counters.
+ *The IP core clears this bit when all counters are cleared.
+ *Bits [31:1]: Reserved
+ */
+ tmp = 0x00000000;
+ (*hw->f_mac_read)(hw,
+ &tmp,
+ IPN3KE_10G_RX_STATS_CLR,
+ port_id,
+ 0);
+ tmp |= 0x00000001;
+ (*hw->f_mac_write)(hw,
+ tmp,
+ IPN3KE_10G_RX_STATS_CLR,
+ port_id,
+ 0);
+}
+
+static int
+ipn3ke_rpst_stats_reset(struct rte_eth_dev *ethdev)
+{
+ uint16_t port_id = 0;
+ char *ch;
+ int cnt = 0;
+ struct rte_afu_device *afu_dev = NULL;
+ struct ipn3ke_hw *hw = NULL;
+
+ if (!ethdev) {
+ IPN3KE_AFU_PMD_ERR("ethernet device to reset is NULL!");
+ return -EINVAL;
+ }
+
+ afu_dev = RTE_ETH_DEV_TO_AFU(ethdev);
+ if (!afu_dev) {
+ IPN3KE_AFU_PMD_ERR("afu device to reset is NULL!");
+ return -EINVAL;
+ }
+
+ if (!afu_dev->shared.data) {
+ IPN3KE_AFU_PMD_ERR("hardware data to reset is NULL!");
+ return -EINVAL;
+ }
+
+ hw = afu_dev->shared.data;
+
+ ch = ethdev->data->name;
+ if (!ch) {
+ IPN3KE_AFU_PMD_ERR("ethdev name is NULL!");
+ return -EINVAL;
+ }
+ while (ch) {
+ if (*ch == '_')
+ cnt++;
+ ch++;
+ if (cnt == 3)
+ break;
+ }
+ if (!ch) {
+ IPN3KE_AFU_PMD_ERR("Can not get port_id from ethdev name!");
+ return -EINVAL;
+ }
+ port_id = atoi(ch);
+
+ if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
+ ipn3ke_rpst_25g_nic_side_tx_stats_reset(hw, port_id);
+ ipn3ke_rpst_25g_nic_side_rx_stats_reset(hw, port_id);
+ ipn3ke_rpst_25g_lineside_tx_stats_reset(hw, port_id);
+ ipn3ke_rpst_25g_lineside_rx_stats_reset(hw, port_id);
+ } else if (hw->retimer.mac_type ==
+ IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
+ ipn3ke_rpst_10g_nic_side_tx_stats_reset(hw, port_id);
+ ipn3ke_rpst_10g_nic_side_rx_stats_reset(hw, port_id);
+ ipn3ke_rpst_10g_lineside_tx_stats_reset(hw, port_id);
+ ipn3ke_rpst_10g_lineside_rx_stats_reset(hw, port_id);
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_rpst_stats_get
+(struct rte_eth_dev *ethdev, struct rte_eth_stats *stats)
+{
+ uint16_t port_id = 0;
+ char *ch;
+ int cnt = 0;
+ int i = 0;
+ struct rte_afu_device *afu_dev = NULL;
+ struct ipn3ke_hw *hw = NULL;
+ struct ipn3ke_rpst_hw_port_stats hw_stats;
+
+ if (!ethdev) {
+ IPN3KE_AFU_PMD_ERR("ethernet device to get statistics is NULL");
+ return -EINVAL;
+ }
+ if (!stats) {
+ IPN3KE_AFU_PMD_ERR("Address to return statistics is NULL!");
+ return -EINVAL;
+ }
+
+ afu_dev = RTE_ETH_DEV_TO_AFU(ethdev);
+ if (!afu_dev) {
+ IPN3KE_AFU_PMD_ERR("afu device to get statistics is NULL!");
+ return -EINVAL;
+ }
+
+ if (!afu_dev->shared.data) {
+ IPN3KE_AFU_PMD_ERR("hardware data to get statistics is NULL!");
+ return -EINVAL;
+ }
+
+ hw = afu_dev->shared.data;
+
+ ch = ethdev->data->name;
+ if (!ch) {
+ IPN3KE_AFU_PMD_ERR("ethdev name is NULL!");
+ return -EINVAL;
+ }
+ while (ch) {
+ if (*ch == '_')
+ cnt++;
+ ch++;
+ if (cnt == 3)
+ break;
+ }
+ if (!ch) {
+ IPN3KE_AFU_PMD_ERR("Can not get port_id from ethdev name!");
+ return -EINVAL;
+ }
+ port_id = atoi(ch);
+
+ if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
+ ipn3ke_rpst_read_25g_lineside_stats_registers(hw,
+ port_id,
+ &hw_stats);
+
+ stats->ipackets = hw_stats.rx_size_64
+ + hw_stats.rx_size_65_127
+ + hw_stats.rx_size_128_255
+ + hw_stats.rx_size_256_511
+ + hw_stats.rx_size_512_1023
+ + hw_stats.rx_size_1024_1518
+ + hw_stats.rx_size_big
+ + hw_stats.rx_undersize
+ + hw_stats.rx_fragments
+ + hw_stats.rx_oversize
+ + hw_stats.rx_jabber;
+ stats->opackets = hw_stats.tx_size_64
+ + hw_stats.tx_size_65_127
+ + hw_stats.tx_size_128_255
+ + hw_stats.tx_size_256_511
+ + hw_stats.tx_size_512_1023
+ + hw_stats.tx_size_1024_1518
+ + hw_stats.tx_size_1519_to_max;
+ stats->ibytes = hw_stats.eth.rx_bytes;
+ stats->obytes = hw_stats.eth.tx_bytes;
+ stats->imissed = 0;
+ stats->ierrors = hw_stats.eth.rx_discards
+ + hw_stats.eth.rx_unknown_protocol;
+ stats->oerrors = hw_stats.eth.tx_discards
+ + hw_stats.eth.tx_errors;
+ stats->rx_nombuf = 0;
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ stats->q_ipackets[i] = 0;
+ stats->q_opackets[i] = 0;
+ stats->q_ibytes[i] = 0;
+ stats->q_obytes[i] = 0;
+ stats->q_errors[i] = 0;
+ }
+ } else {
+ ipn3ke_rpst_read_10g_lineside_stats_registers(hw,
+ port_id,
+ &hw_stats,
+ stats);
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_rpst_xstats_get
+(struct rte_eth_dev *ethdev, struct rte_eth_xstat *xstats, unsigned int n)
+{
+ uint16_t port_id = 0;
+ char *ch = NULL;
+ int cnt = 0;
+ unsigned int i, count, prio;
+ struct rte_afu_device *afu_dev = NULL;
+ struct ipn3ke_hw *hw = NULL;
+ struct ipn3ke_rpst_hw_port_stats hw_stats;
+ struct rte_eth_stats stats;
+
+ if (!xstats)
+ return 0;
+
+ if (!ethdev) {
+ IPN3KE_AFU_PMD_ERR("ethernet device to get statistics is NULL");
+ return -EINVAL;
+ }
+
+ afu_dev = RTE_ETH_DEV_TO_AFU(ethdev);
+ if (!afu_dev) {
+ IPN3KE_AFU_PMD_ERR("afu device to get statistics is NULL!");
+ return -EINVAL;
+ }
+
+ if (!afu_dev->shared.data) {
+ IPN3KE_AFU_PMD_ERR("hardware data to get statistics is NULL!");
+ return -EINVAL;
+ }
+
+ hw = afu_dev->shared.data;
+
+ ch = ethdev->data->name;
+ if (!ch) {
+ IPN3KE_AFU_PMD_ERR("ethdev name is NULL!");
+ return -EINVAL;
+ }
+ while (ch) {
+ if (*ch == '_')
+ cnt++;
+ ch++;
+ if (cnt == 3)
+ break;
+ }
+ if (!ch) {
+ IPN3KE_AFU_PMD_ERR("Can not get port_id from ethdev name!");
+ return -EINVAL;
+ }
+ port_id = atoi(ch);
+
+ count = ipn3ke_rpst_xstats_calc_num();
+ if (n < count)
+ return count;
+
+ if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) {
+ ipn3ke_rpst_read_25g_lineside_stats_registers(hw,
+ port_id,
+ &hw_stats);
+ } else {
+ ipn3ke_rpst_read_10g_lineside_stats_registers(hw,
+ port_id,
+ &hw_stats,
+ &stats);
+ }
+
+ count = 0;
+
+ /* Get stats from ipn3ke_rpst_stats */
+ for (i = 0; i < IPN3KE_RPST_ETH_XSTATS_CNT; i++) {
+ xstats[count].value = *(uint64_t *)(((char *)&hw_stats.eth)
+ + ipn3ke_rpst_stats_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+
+ /* Get individiual stats from ipn3ke_rpst_hw_port */
+ for (i = 0; i < IPN3KE_RPST_HW_PORT_XSTATS_CNT; i++) {
+ xstats[count].value = *(uint64_t *)(((char *)(&hw_stats)) +
+ ipn3ke_rpst_hw_port_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+
+ /* Get individiual stats from ipn3ke_rpst_rxq_pri */
+ for (i = 0; i < IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT; i++) {
+ for (prio = 0; prio < IPN3KE_RPST_PRIO_XSTATS_CNT; prio++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)(&hw_stats)) +
+ ipn3ke_rpst_rxq_prio_strings[i].offset +
+ (sizeof(uint64_t) * prio));
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ /* Get individiual stats from ipn3ke_rpst_txq_prio */
+ for (i = 0; i < IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT; i++) {
+ for (prio = 0; prio < IPN3KE_RPST_PRIO_XSTATS_CNT; prio++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)(&hw_stats)) +
+ ipn3ke_rpst_txq_prio_strings[i].offset +
+ (sizeof(uint64_t) * prio));
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static int
+ipn3ke_rpst_xstats_get_names
+(__rte_unused struct rte_eth_dev *dev,
+struct rte_eth_xstat_name *xstats_names,
+__rte_unused unsigned int limit)
+{
+ unsigned int count = 0;
+ unsigned int i, prio;
+
+ if (!xstats_names)
+ return ipn3ke_rpst_xstats_calc_num();
+
+ /* Note: limit checked in rte_eth_xstats_names() */
+
+ /* Get stats from ipn3ke_rpst_stats */
+ for (i = 0; i < IPN3KE_RPST_ETH_XSTATS_CNT; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ ipn3ke_rpst_stats_strings[i].name);
+ count++;
+ }
+
+ /* Get individiual stats from ipn3ke_rpst_hw_port */
+ for (i = 0; i < IPN3KE_RPST_HW_PORT_XSTATS_CNT; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ ipn3ke_rpst_hw_port_strings[i].name);
+ count++;
+ }
+
+ /* Get individiual stats from ipn3ke_rpst_rxq_pri */
+ for (i = 0; i < IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT; i++) {
+ for (prio = 0; prio < 8; prio++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_priority%u_%s",
+ prio,
+ ipn3ke_rpst_rxq_prio_strings[i].name);
+ count++;
+ }
+ }
+
+ /* Get individiual stats from ipn3ke_rpst_txq_prio */
+ for (i = 0; i < IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT; i++) {
+ for (prio = 0; prio < 8; prio++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_priority%u_%s",
+ prio,
+ ipn3ke_rpst_txq_prio_strings[i].name);
+ count++;
+ }
+ }
+ return count;
+}
+
+static void
+ipn3ke_update_link(struct rte_rawdev *rawdev,
+ uint16_t port, struct rte_eth_link *link)
+{
+ uint64_t line_link_bitmap = 0;
+ enum ifpga_rawdev_link_speed link_speed;
+
+ rawdev->dev_ops->attr_get(rawdev,
+ "LineSideLinkStatus",
+ (uint64_t *)&line_link_bitmap);
+
+ /* Parse the link status */
+ if ((1 << port) & line_link_bitmap)
+ link->link_status = 1;
+ else
+ link->link_status = 0;
+
+ IPN3KE_AFU_PMD_DEBUG("port is %d\n", port);
+ IPN3KE_AFU_PMD_DEBUG("link->link_status is %d\n", link->link_status);
+
+ rawdev->dev_ops->attr_get(rawdev,
+ "LineSideLinkSpeed",
+ (uint64_t *)&link_speed);
+ switch (link_speed) {
+ case IFPGA_RAWDEV_LINK_SPEED_10GB:
+ link->link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case IFPGA_RAWDEV_LINK_SPEED_25GB:
+ link->link_speed = ETH_SPEED_NUM_25G;
+ break;
+ default:
+ IPN3KE_AFU_PMD_ERR("Unknown link speed info %u", link_speed);
+ break;
+ }
+}
+
+/*
+ * Set device link up.
+ */
+int
+ipn3ke_rpst_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
+ struct rte_eth_dev *pf;
+ int ret = 0;
+
+ if (rpst->i40e_pf_eth) {
+ ret = rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id);
+ pf = rpst->i40e_pf_eth;
+ (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
+ }
+
+ return ret;
+}
+
+/*
+ * Set device link down.
+ */
+int
+ipn3ke_rpst_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
+ struct rte_eth_dev *pf;
+ int ret = 0;
+
+ if (rpst->i40e_pf_eth) {
+ ret = rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id);
+ pf = rpst->i40e_pf_eth;
+ (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
+ }
+
+ return ret;
+}
+
+int
+ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev,
+ __rte_unused int wait_to_complete)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
+ struct rte_rawdev *rawdev;
+ struct rte_eth_link link;
+ struct rte_eth_dev *pf;
+
+ memset(&link, 0, sizeof(link));
+
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = !(ethdev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+
+ rawdev = hw->rawdev;
+ ipn3ke_update_link(rawdev, rpst->port_id, &link);
+
+ if (!rpst->ori_linfo.link_status &&
+ link.link_status) {
+ IPN3KE_AFU_PMD_DEBUG("Update Rpst %d Up\n", rpst->port_id);
+ rpst->ori_linfo.link_status = link.link_status;
+ rpst->ori_linfo.link_speed = link.link_speed;
+
+ rte_eth_linkstatus_set(ethdev, &link);
+
+ if (rpst->i40e_pf_eth) {
+ IPN3KE_AFU_PMD_DEBUG("Update FVL PF %d Up\n",
+ rpst->i40e_pf_eth_port_id);
+ rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id);
+ pf = rpst->i40e_pf_eth;
+ (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
+ }
+ } else if (rpst->ori_linfo.link_status &&
+ !link.link_status) {
+ IPN3KE_AFU_PMD_DEBUG("Update Rpst %d Down\n",
+ rpst->port_id);
+ rpst->ori_linfo.link_status = link.link_status;
+ rpst->ori_linfo.link_speed = link.link_speed;
+
+ rte_eth_linkstatus_set(ethdev, &link);
+
+ if (rpst->i40e_pf_eth) {
+ IPN3KE_AFU_PMD_DEBUG("Update FVL PF %d Down\n",
+ rpst->i40e_pf_eth_port_id);
+ rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id);
+ pf = rpst->i40e_pf_eth;
+ (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
+ }
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst)
+{
+ struct ipn3ke_hw *hw;
+ struct rte_rawdev *rawdev;
+ struct rte_eth_link link;
+ struct rte_eth_dev *pf;
+
+ if (rpst == NULL)
+ return -1;
+
+ hw = rpst->hw;
+
+ memset(&link, 0, sizeof(link));
+
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = !(rpst->ethdev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+
+ rawdev = hw->rawdev;
+ ipn3ke_update_link(rawdev, rpst->port_id, &link);
+
+ if (!rpst->ori_linfo.link_status &&
+ link.link_status) {
+ IPN3KE_AFU_PMD_DEBUG("Check Rpst %d Up\n", rpst->port_id);
+ rpst->ori_linfo.link_status = link.link_status;
+ rpst->ori_linfo.link_speed = link.link_speed;
+
+ rte_eth_linkstatus_set(rpst->ethdev, &link);
+
+ if (rpst->i40e_pf_eth) {
+ IPN3KE_AFU_PMD_DEBUG("Check FVL PF %d Up\n",
+ rpst->i40e_pf_eth_port_id);
+ rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id);
+ pf = rpst->i40e_pf_eth;
+ (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
+ }
+ } else if (rpst->ori_linfo.link_status &&
+ !link.link_status) {
+ IPN3KE_AFU_PMD_DEBUG("Check Rpst %d Down\n", rpst->port_id);
+ rpst->ori_linfo.link_status = link.link_status;
+ rpst->ori_linfo.link_speed = link.link_speed;
+
+ rte_eth_linkstatus_set(rpst->ethdev, &link);
+
+ if (rpst->i40e_pf_eth) {
+ IPN3KE_AFU_PMD_DEBUG("Check FVL PF %d Down\n",
+ rpst->i40e_pf_eth_port_id);
+ rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id);
+ pf = rpst->i40e_pf_eth;
+ (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
+ }
+ }
+
+ return 0;
+}
+
+static void *
+ipn3ke_rpst_scan_handle_request(__rte_unused void *param)
+{
+ struct ipn3ke_rpst *rpst;
+ int num = 0;
+#define MS 1000
+#define SCAN_NUM 32
+
+ for (;;) {
+ num = 0;
+ TAILQ_FOREACH(rpst, &ipn3ke_rpst_list, next) {
+ if (rpst->i40e_pf_eth &&
+ rpst->ethdev->data->dev_started &&
+ rpst->i40e_pf_eth->data->dev_started)
+ ipn3ke_rpst_link_check(rpst);
+
+ if (++num > SCAN_NUM)
+ rte_delay_us(1 * MS);
+ }
+ rte_delay_us(50 * MS);
+
+ if (num == 0xffffff)
+ return NULL;
+ }
+
+ return NULL;
+}
+
+static int
+ipn3ke_rpst_scan_check(void)
+{
+ int ret;
+
+ if (ipn3ke_rpst_scan_num == 1) {
+ ret = rte_ctrl_thread_create(&ipn3ke_rpst_scan_thread,
+ "ipn3ke scanner",
+ NULL,
+ ipn3ke_rpst_scan_handle_request, NULL);
+ if (ret) {
+ IPN3KE_AFU_PMD_ERR("Fail to create ipn3ke rpst scan thread");
+ return -1;
+ }
+ } else if (ipn3ke_rpst_scan_num == 0) {
+ ret = pthread_cancel(ipn3ke_rpst_scan_thread);
+ if (ret)
+ IPN3KE_AFU_PMD_ERR("Can't cancel the thread");
+
+ ret = pthread_join(ipn3ke_rpst_scan_thread, NULL);
+ if (ret)
+ IPN3KE_AFU_PMD_ERR("Can't join the thread");
+
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+ipn3ke_rpst_promiscuous_enable(struct rte_eth_dev *ethdev)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
+ uint32_t rddata, val;
+
+ if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
+ /* Enable all unicast */
+ (*hw->f_mac_read)(hw,
+ &rddata,
+ IPN3KE_MAC_RX_FRAME_CONTROL,
+ rpst->port_id,
+ 0);
+ val = 1;
+ val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_MASK;
+ val |= rddata;
+ (*hw->f_mac_write)(hw,
+ val,
+ IPN3KE_MAC_RX_FRAME_CONTROL,
+ rpst->port_id,
+ 0);
+ }
+
+ return 0;
+}
+
+int
+ipn3ke_rpst_promiscuous_disable(struct rte_eth_dev *ethdev)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
+ uint32_t rddata, val;
+
+ if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
+ /* Disable all unicast */
+ (*hw->f_mac_read)(hw,
+ &rddata,
+ IPN3KE_MAC_RX_FRAME_CONTROL,
+ rpst->port_id,
+ 0);
+ val = 0;
+ val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_MASK;
+ val |= rddata;
+ (*hw->f_mac_write)(hw,
+ val,
+ IPN3KE_MAC_RX_FRAME_CONTROL,
+ rpst->port_id,
+ 0);
+ }
+
+ return 0;
+}
+
+int
+ipn3ke_rpst_allmulticast_enable(struct rte_eth_dev *ethdev)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
+ uint32_t rddata, val;
+
+ if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
+ /* Enable all unicast */
+ (*hw->f_mac_read)(hw,
+ &rddata,
+ IPN3KE_MAC_RX_FRAME_CONTROL,
+ rpst->port_id,
+ 0);
+ val = 1;
+ val <<= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_SHIFT;
+ val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_MASK;
+ val |= rddata;
+ (*hw->f_mac_write)(hw,
+ val,
+ IPN3KE_MAC_RX_FRAME_CONTROL,
+ rpst->port_id,
+ 0);
+ }
+
+ return 0;
+}
+
+int
+ipn3ke_rpst_allmulticast_disable(struct rte_eth_dev *ethdev)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
+ uint32_t rddata, val;
+
+ if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
+ /* Disable all unicast */
+ (*hw->f_mac_read)(hw,
+ &rddata,
+ IPN3KE_MAC_RX_FRAME_CONTROL,
+ rpst->port_id,
+ 0);
+ val = 0;
+ val <<= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_SHIFT;
+ val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_MASK;
+ val |= rddata;
+ (*hw->f_mac_write)(hw,
+ val,
+ IPN3KE_MAC_RX_FRAME_CONTROL,
+ rpst->port_id,
+ 0);
+ }
+
+ return 0;
+}
+
+int
+ipn3ke_rpst_mac_addr_set(struct rte_eth_dev *ethdev,
+ struct rte_ether_addr *mac_addr)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
+ uint32_t val;
+
+ if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
+ IPN3KE_AFU_PMD_ERR("Tried to set invalid MAC address.");
+ return -EINVAL;
+ }
+
+ if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
+ rte_ether_addr_copy(&mac_addr[0], &rpst->mac_addr);
+
+ /* Set mac address */
+ rte_memcpy(((char *)(&val)), &mac_addr[0], sizeof(uint32_t));
+ (*hw->f_mac_write)(hw,
+ val,
+ IPN3KE_MAC_PRIMARY_MAC_ADDR0,
+ rpst->port_id,
+ 0);
+ rte_memcpy(((char *)(&val)), &mac_addr[4], sizeof(uint16_t));
+ (*hw->f_mac_write)(hw,
+ val,
+ IPN3KE_MAC_PRIMARY_MAC_ADDR0,
+ rpst->port_id,
+ 0);
+ }
+
+ return 0;
+}
+
+int
+ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu)
+{
+ int ret = 0;
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
+ struct rte_eth_dev_data *dev_data = ethdev->data;
+ uint32_t frame_size = mtu + IPN3KE_ETH_OVERHEAD;
+
+ /* check if mtu is within the allowed range */
+ if (mtu < RTE_ETHER_MIN_MTU ||
+ frame_size > IPN3KE_MAC_FRAME_SIZE_MAX)
+ return -EINVAL;
+
+ /* mtu setting is forbidden if port is start */
+ /* make sure NIC port is stopped */
+ if (rpst->i40e_pf_eth && rpst->i40e_pf_eth->data->dev_started) {
+ IPN3KE_AFU_PMD_ERR("NIC port %d must "
+ "be stopped before configuration",
+ rpst->i40e_pf_eth->data->port_id);
+ return -EBUSY;
+ }
+ /* mtu setting is forbidden if port is start */
+ if (dev_data->dev_started) {
+ IPN3KE_AFU_PMD_ERR("FPGA port %d must "
+ "be stopped before configuration",
+ dev_data->port_id);
+ return -EBUSY;
+ }
+
+ if (frame_size > RTE_ETHER_MAX_LEN)
+ dev_data->dev_conf.rxmode.offloads |=
+ (uint64_t)(DEV_RX_OFFLOAD_JUMBO_FRAME);
+ else
+ dev_data->dev_conf.rxmode.offloads &=
+ (uint64_t)(~DEV_RX_OFFLOAD_JUMBO_FRAME);
+
+ dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ if (rpst->i40e_pf_eth) {
+ ret = rpst->i40e_pf_eth->dev_ops->mtu_set(rpst->i40e_pf_eth,
+ mtu);
+ if (!ret)
+ rpst->i40e_pf_eth->data->mtu = mtu;
+ }
+
+ return ret;
+}
+
+static int
+ipn3ke_afu_filter_ctrl(struct rte_eth_dev *ethdev,
+ enum rte_filter_type filter_type, enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+ struct ipn3ke_hw *hw;
+ struct ipn3ke_rpst *rpst;
+
+ if (ethdev == NULL)
+ return -EINVAL;
+
+ hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
+ rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
+
+ if (hw->acc_flow)
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &ipn3ke_flow_ops;
+ break;
+ default:
+ IPN3KE_AFU_PMD_WARN("Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+ else if (rpst->i40e_pf_eth)
+ (*rpst->i40e_pf_eth->dev_ops->filter_ctrl)(ethdev,
+ filter_type,
+ filter_op,
+ arg);
+ else
+ return -EINVAL;
+
+ return ret;
+}
+
+static const struct eth_dev_ops ipn3ke_rpst_dev_ops = {
+ .dev_infos_get = ipn3ke_rpst_dev_infos_get,
+
+ .dev_configure = ipn3ke_rpst_dev_configure,
+ .dev_start = ipn3ke_rpst_dev_start,
+ .dev_stop = ipn3ke_rpst_dev_stop,
+ .dev_close = ipn3ke_rpst_dev_close,
+ .dev_reset = ipn3ke_rpst_dev_reset,
+
+ .stats_get = ipn3ke_rpst_stats_get,
+ .xstats_get = ipn3ke_rpst_xstats_get,
+ .xstats_get_names = ipn3ke_rpst_xstats_get_names,
+ .stats_reset = ipn3ke_rpst_stats_reset,
+ .xstats_reset = ipn3ke_rpst_stats_reset,
+
+ .filter_ctrl = ipn3ke_afu_filter_ctrl,
+
+ .rx_queue_start = ipn3ke_rpst_rx_queue_start,
+ .rx_queue_stop = ipn3ke_rpst_rx_queue_stop,
+ .tx_queue_start = ipn3ke_rpst_tx_queue_start,
+ .tx_queue_stop = ipn3ke_rpst_tx_queue_stop,
+ .rx_queue_setup = ipn3ke_rpst_rx_queue_setup,
+ .rx_queue_release = ipn3ke_rpst_rx_queue_release,
+ .tx_queue_setup = ipn3ke_rpst_tx_queue_setup,
+ .tx_queue_release = ipn3ke_rpst_tx_queue_release,
+
+ .dev_set_link_up = ipn3ke_rpst_dev_set_link_up,
+ .dev_set_link_down = ipn3ke_rpst_dev_set_link_down,
+ .link_update = ipn3ke_rpst_link_update,
+
+ .promiscuous_enable = ipn3ke_rpst_promiscuous_enable,
+ .promiscuous_disable = ipn3ke_rpst_promiscuous_disable,
+ .allmulticast_enable = ipn3ke_rpst_allmulticast_enable,
+ .allmulticast_disable = ipn3ke_rpst_allmulticast_disable,
+ .mac_addr_set = ipn3ke_rpst_mac_addr_set,
+ .mtu_set = ipn3ke_rpst_mtu_set,
+
+ .tm_ops_get = ipn3ke_tm_ops_get,
+};
+
+static uint16_t ipn3ke_rpst_recv_pkts(__rte_unused void *rx_q,
+ __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static uint16_t
+ipn3ke_rpst_xmit_pkts(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+int
+ipn3ke_rpst_init(struct rte_eth_dev *ethdev, void *init_params)
+{
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
+ struct ipn3ke_rpst *representor_param =
+ (struct ipn3ke_rpst *)init_params;
+
+ if (representor_param->port_id >= representor_param->hw->port_num)
+ return -ENODEV;
+
+ if (ipn3ke_bridge_func.set_i40e_sw_dev == NULL)
+ return -ENOMEM;
+
+ rpst->ethdev = ethdev;
+ rpst->switch_domain_id = representor_param->switch_domain_id;
+ rpst->port_id = representor_param->port_id;
+ rpst->hw = representor_param->hw;
+ rpst->i40e_pf_eth = representor_param->i40e_pf_eth;
+ rpst->i40e_pf_eth_port_id = representor_param->i40e_pf_eth_port_id;
+ if (rpst->i40e_pf_eth)
+ ipn3ke_bridge_func.set_i40e_sw_dev(rpst->i40e_pf_eth_port_id,
+ rpst->ethdev);
+
+ ethdev->data->mac_addrs = rte_zmalloc("ipn3ke", RTE_ETHER_ADDR_LEN, 0);
+ if (!ethdev->data->mac_addrs) {
+ IPN3KE_AFU_PMD_ERR("Failed to "
+ "allocated memory for storing mac address");
+ return -ENODEV;
+ }
+
+ if (rpst->hw->tm_hw_enable)
+ ipn3ke_tm_init(rpst);
+
+ /* Set representor device ops */
+ ethdev->dev_ops = &ipn3ke_rpst_dev_ops;
+
+ /* No data-path, but need stub Rx/Tx functions to avoid crash
+ * when testing with the likes of testpmd.
+ */
+ ethdev->rx_pkt_burst = ipn3ke_rpst_recv_pkts;
+ ethdev->tx_pkt_burst = ipn3ke_rpst_xmit_pkts;
+
+ ethdev->data->nb_rx_queues = 1;
+ ethdev->data->nb_tx_queues = 1;
+
+ ethdev->data->mac_addrs = rte_zmalloc("ipn3ke_afu_representor",
+ RTE_ETHER_ADDR_LEN,
+ 0);
+ if (!ethdev->data->mac_addrs) {
+ IPN3KE_AFU_PMD_ERR("Failed to "
+ "allocated memory for storing mac address");
+ return -ENODEV;
+ }
+
+ ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+ rte_spinlock_lock(&ipn3ke_link_notify_list_lk);
+ TAILQ_INSERT_TAIL(&ipn3ke_rpst_list, rpst, next);
+ ipn3ke_rpst_scan_num++;
+ ipn3ke_rpst_scan_check();
+ rte_spinlock_unlock(&ipn3ke_link_notify_list_lk);
+
+ return 0;
+}
+
+int
+ipn3ke_rpst_uninit(struct rte_eth_dev *ethdev)
+{
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
+
+ rte_spinlock_lock(&ipn3ke_link_notify_list_lk);
+ TAILQ_REMOVE(&ipn3ke_rpst_list, rpst, next);
+ ipn3ke_rpst_scan_num--;
+ ipn3ke_rpst_scan_check();
+ rte_spinlock_unlock(&ipn3ke_link_notify_list_lk);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_tm.c b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_tm.c
new file mode 100644
index 000000000..5a16c5f96
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_tm.c
@@ -0,0 +1,2055 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_bus_pci.h>
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+#include <rte_tm_driver.h>
+
+#include <rte_mbuf.h>
+#include <rte_sched.h>
+#include <rte_ethdev_driver.h>
+
+#include <rte_io.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+#include <rte_bus_ifpga.h>
+#include <ifpga_logs.h>
+
+#include "ipn3ke_rawdev_api.h"
+#include "ipn3ke_flow.h"
+#include "ipn3ke_logs.h"
+#include "ipn3ke_ethdev.h"
+
+#define BYTES_IN_MBPS (1000 * 1000 / 8)
+#define SUBPORT_TC_PERIOD 10
+#define PIPE_TC_PERIOD 40
+
+struct ipn3ke_tm_shaper_params_range_type {
+ uint32_t m1;
+ uint32_t m2;
+ uint32_t exp;
+ uint32_t exp2;
+ uint32_t low;
+ uint32_t high;
+};
+struct ipn3ke_tm_shaper_params_range_type ipn3ke_tm_shaper_params_rang[] = {
+ { 0, 1, 0, 1, 0, 4},
+ { 2, 3, 0, 1, 8, 12},
+ { 4, 7, 0, 1, 16, 28},
+ { 8, 15, 0, 1, 32, 60},
+ { 16, 31, 0, 1, 64, 124},
+ { 32, 63, 0, 1, 128, 252},
+ { 64, 127, 0, 1, 256, 508},
+ {128, 255, 0, 1, 512, 1020},
+ {256, 511, 0, 1, 1024, 2044},
+ {512, 1023, 0, 1, 2048, 4092},
+ {512, 1023, 1, 2, 4096, 8184},
+ {512, 1023, 2, 4, 8192, 16368},
+ {512, 1023, 3, 8, 16384, 32736},
+ {512, 1023, 4, 16, 32768, 65472},
+ {512, 1023, 5, 32, 65536, 130944},
+ {512, 1023, 6, 64, 131072, 261888},
+ {512, 1023, 7, 128, 262144, 523776},
+ {512, 1023, 8, 256, 524288, 1047552},
+ {512, 1023, 9, 512, 1048576, 2095104},
+ {512, 1023, 10, 1024, 2097152, 4190208},
+ {512, 1023, 11, 2048, 4194304, 8380416},
+ {512, 1023, 12, 4096, 8388608, 16760832},
+ {512, 1023, 13, 8192, 16777216, 33521664},
+ {512, 1023, 14, 16384, 33554432, 67043328},
+ {512, 1023, 15, 32768, 67108864, 134086656},
+};
+
+#define IPN3KE_TM_SHAPER_RANGE_NUM (sizeof(ipn3ke_tm_shaper_params_rang) / \
+ sizeof(struct ipn3ke_tm_shaper_params_range_type))
+
+#define IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX \
+ (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high)
+
+#define IPN3KE_TM_SHAPER_PEAK_RATE_MAX \
+ (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high)
+
+int
+ipn3ke_hw_tm_init(struct ipn3ke_hw *hw)
+{
+#define SCRATCH_DATA 0xABCDEF
+ struct ipn3ke_tm_node *nodes;
+ struct ipn3ke_tm_tdrop_profile *tdrop_profile;
+ int node_num;
+ int i;
+
+ if (hw == NULL)
+ return -EINVAL;
+#if IPN3KE_TM_SCRATCH_RW
+ uint32_t scratch_data;
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_TM_SCRATCH,
+ 0,
+ SCRATCH_DATA,
+ 0xFFFFFFFF);
+ scratch_data = IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_TM_SCRATCH,
+ 0,
+ 0xFFFFFFFF);
+ if (scratch_data != SCRATCH_DATA)
+ return -EINVAL;
+#endif
+ /* alloc memory for all hierarchy nodes */
+ node_num = hw->port_num +
+ IPN3KE_TM_VT_NODE_NUM +
+ IPN3KE_TM_COS_NODE_NUM;
+
+ nodes = rte_zmalloc("ipn3ke_tm_nodes",
+ sizeof(struct ipn3ke_tm_node) * node_num,
+ 0);
+ if (!nodes)
+ return -ENOMEM;
+
+ /* alloc memory for Tail Drop Profile */
+ tdrop_profile = rte_zmalloc("ipn3ke_tm_tdrop_profile",
+ sizeof(struct ipn3ke_tm_tdrop_profile) *
+ IPN3KE_TM_TDROP_PROFILE_NUM,
+ 0);
+ if (!tdrop_profile) {
+ rte_free(nodes);
+ return -ENOMEM;
+ }
+
+ hw->nodes = nodes;
+ hw->port_nodes = nodes;
+ hw->vt_nodes = hw->port_nodes + hw->port_num;
+ hw->cos_nodes = hw->vt_nodes + IPN3KE_TM_VT_NODE_NUM;
+ hw->tdrop_profile = tdrop_profile;
+ hw->tdrop_profile_num = IPN3KE_TM_TDROP_PROFILE_NUM;
+
+ for (i = 0, nodes = hw->port_nodes;
+ i < hw->port_num;
+ i++, nodes++) {
+ nodes->node_index = i;
+ nodes->level = IPN3KE_TM_NODE_LEVEL_PORT;
+ nodes->tm_id = RTE_TM_NODE_ID_NULL;
+ nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
+ nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ nodes->weight = 0;
+ nodes->parent_node = NULL;
+ nodes->shaper_profile.valid = 0;
+ nodes->tdrop_profile = NULL;
+ nodes->n_children = 0;
+ TAILQ_INIT(&nodes->children_node_list);
+ }
+
+ for (i = 0, nodes = hw->vt_nodes;
+ i < IPN3KE_TM_VT_NODE_NUM;
+ i++, nodes++) {
+ nodes->node_index = i;
+ nodes->level = IPN3KE_TM_NODE_LEVEL_VT;
+ nodes->tm_id = RTE_TM_NODE_ID_NULL;
+ nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
+ nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ nodes->weight = 0;
+ nodes->parent_node = NULL;
+ nodes->shaper_profile.valid = 0;
+ nodes->tdrop_profile = NULL;
+ nodes->n_children = 0;
+ TAILQ_INIT(&nodes->children_node_list);
+ }
+
+ for (i = 0, nodes = hw->cos_nodes;
+ i < IPN3KE_TM_COS_NODE_NUM;
+ i++, nodes++) {
+ nodes->node_index = i;
+ nodes->level = IPN3KE_TM_NODE_LEVEL_COS;
+ nodes->tm_id = RTE_TM_NODE_ID_NULL;
+ nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ nodes->parent_node_id = RTE_TM_NODE_ID_NULL;
+ nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ nodes->weight = 0;
+ nodes->parent_node = NULL;
+ nodes->shaper_profile.valid = 0;
+ nodes->tdrop_profile = NULL;
+ nodes->n_children = 0;
+ TAILQ_INIT(&nodes->children_node_list);
+ }
+
+ for (i = 0, tdrop_profile = hw->tdrop_profile;
+ i < IPN3KE_TM_TDROP_PROFILE_NUM;
+ i++, tdrop_profile++) {
+ tdrop_profile->tdrop_profile_id = i;
+ tdrop_profile->n_users = 0;
+ tdrop_profile->valid = 0;
+ }
+
+ return 0;
+}
+
+void
+ipn3ke_tm_init(struct ipn3ke_rpst *rpst)
+{
+ struct ipn3ke_tm_internals *tm;
+ struct ipn3ke_tm_node *port_node;
+
+ tm = &rpst->tm;
+
+ port_node = &rpst->hw->port_nodes[rpst->port_id];
+ tm->h.port_node = port_node;
+
+ tm->h.n_shaper_profiles = 0;
+ tm->h.n_tdrop_profiles = 0;
+ tm->h.n_vt_nodes = 0;
+ tm->h.n_cos_nodes = 0;
+
+ tm->h.port_commit_node = NULL;
+ TAILQ_INIT(&tm->h.vt_commit_node_list);
+ TAILQ_INIT(&tm->h.cos_commit_node_list);
+
+ tm->hierarchy_frozen = 0;
+ tm->tm_started = 1;
+ tm->tm_id = rpst->port_id;
+}
+
+static struct ipn3ke_tm_shaper_profile *
+ipn3ke_hw_tm_shaper_profile_search(struct ipn3ke_hw *hw,
+ uint32_t shaper_profile_id, struct rte_tm_error *error)
+{
+ struct ipn3ke_tm_shaper_profile *sp = NULL;
+ uint32_t level_of_node_id;
+ uint32_t node_index;
+
+ /* Shaper profile ID must not be NONE. */
+ if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE) {
+ rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return NULL;
+ }
+
+ level_of_node_id = shaper_profile_id / IPN3KE_TM_NODE_LEVEL_MOD;
+ node_index = shaper_profile_id % IPN3KE_TM_NODE_LEVEL_MOD;
+
+ switch (level_of_node_id) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ if (node_index >= hw->port_num)
+ rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+ else
+ sp = &hw->port_nodes[node_index].shaper_profile;
+
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ if (node_index >= IPN3KE_TM_VT_NODE_NUM)
+ rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+ else
+ sp = &hw->vt_nodes[node_index].shaper_profile;
+
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ if (node_index >= IPN3KE_TM_COS_NODE_NUM)
+ rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+ else
+ sp = &hw->cos_nodes[node_index].shaper_profile;
+
+ break;
+ default:
+ rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+ }
+
+ return sp;
+}
+
+static struct ipn3ke_tm_tdrop_profile *
+ipn3ke_hw_tm_tdrop_profile_search(struct ipn3ke_hw *hw,
+ uint32_t tdrop_profile_id)
+{
+ struct ipn3ke_tm_tdrop_profile *tdrop_profile;
+
+ if (tdrop_profile_id >= hw->tdrop_profile_num)
+ return NULL;
+
+ tdrop_profile = &hw->tdrop_profile[tdrop_profile_id];
+ if (tdrop_profile->valid)
+ return tdrop_profile;
+
+ return NULL;
+}
+
+static struct ipn3ke_tm_node *
+ipn3ke_hw_tm_node_search(struct ipn3ke_hw *hw, uint32_t tm_id,
+ uint32_t node_id, uint32_t state_mask)
+{
+ uint32_t level_of_node_id;
+ uint32_t node_index;
+ struct ipn3ke_tm_node *n;
+
+ level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
+ node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
+
+ switch (level_of_node_id) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ if (node_index >= hw->port_num)
+ return NULL;
+ n = &hw->port_nodes[node_index];
+
+ break;
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ if (node_index >= IPN3KE_TM_VT_NODE_NUM)
+ return NULL;
+ n = &hw->vt_nodes[node_index];
+
+ break;
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ if (node_index >= IPN3KE_TM_COS_NODE_NUM)
+ return NULL;
+ n = &hw->cos_nodes[node_index];
+
+ break;
+ default:
+ return NULL;
+ }
+
+ /* Check tm node status */
+ if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE) {
+ if (n->tm_id != RTE_TM_NODE_ID_NULL ||
+ n->parent_node_id != RTE_TM_NODE_ID_NULL ||
+ n->parent_node != NULL ||
+ n->n_children > 0) {
+ IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
+ }
+ } else if (n->node_state < IPN3KE_TM_NODE_STATE_MAX) {
+ if (n->tm_id == RTE_TM_NODE_ID_NULL ||
+ (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT &&
+ n->parent_node_id == RTE_TM_NODE_ID_NULL) ||
+ (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT &&
+ n->parent_node == NULL)) {
+ IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
+ }
+ } else {
+ IPN3KE_AFU_PMD_ERR("tm node check error %d", 1);
+ }
+
+ if (IPN3KE_BIT_ISSET(state_mask, n->node_state)) {
+ if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE)
+ return n;
+ else if (n->tm_id == tm_id)
+ return n;
+ else
+ return NULL;
+ } else {
+ return NULL;
+ }
+}
+
+/* Traffic manager node type get */
+static int
+ipn3ke_pmd_tm_node_type_get(struct rte_eth_dev *dev,
+ uint32_t node_id, int *is_leaf, struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ uint32_t tm_id;
+ struct ipn3ke_tm_node *node;
+ uint32_t state_mask;
+
+ if (is_leaf == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ tm_id = tm->tm_id;
+
+ state_mask = 0;
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
+ node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
+ if (node_id == RTE_TM_NODE_ID_NULL ||
+ node == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ *is_leaf = (node->level == IPN3KE_TM_NODE_LEVEL_COS) ? 1 : 0;
+
+ return 0;
+}
+
+#define WRED_SUPPORTED 0
+
+#define STATS_MASK_DEFAULT \
+ (RTE_TM_STATS_N_PKTS | \
+ RTE_TM_STATS_N_BYTES | \
+ RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
+ RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
+
+#define STATS_MASK_QUEUE \
+ (STATS_MASK_DEFAULT | RTE_TM_STATS_N_PKTS_QUEUED)
+
+/* Traffic manager capabilities get */
+static int
+ipn3ke_tm_capabilities_get(__rte_unused struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap, struct rte_tm_error *error)
+{
+ if (cap == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* set all the parameters to 0 first. */
+ memset(cap, 0, sizeof(*cap));
+
+ cap->n_nodes_max = 1 + IPN3KE_TM_COS_NODE_NUM + IPN3KE_TM_VT_NODE_NUM;
+ cap->n_levels_max = IPN3KE_TM_NODE_LEVEL_MAX;
+
+ cap->non_leaf_nodes_identical = 0;
+ cap->leaf_nodes_identical = 1;
+
+ cap->shaper_n_max = 1 + IPN3KE_TM_VT_NODE_NUM;
+ cap->shaper_private_n_max = 1 + IPN3KE_TM_VT_NODE_NUM;
+ cap->shaper_private_dual_rate_n_max = 0;
+ cap->shaper_private_rate_min = 1;
+ cap->shaper_private_rate_max = 1 + IPN3KE_TM_VT_NODE_NUM;
+
+ cap->shaper_shared_n_max = 0;
+ cap->shaper_shared_n_nodes_per_shaper_max = 0;
+ cap->shaper_shared_n_shapers_per_node_max = 0;
+ cap->shaper_shared_dual_rate_n_max = 0;
+ cap->shaper_shared_rate_min = 0;
+ cap->shaper_shared_rate_max = 0;
+
+ cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+ cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+
+ cap->sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
+ cap->sched_sp_n_priorities_max = 3;
+ cap->sched_wfq_n_children_per_group_max = UINT32_MAX;
+ cap->sched_wfq_n_groups_max = 1;
+ cap->sched_wfq_weight_max = UINT32_MAX;
+
+ cap->cman_wred_packet_mode_supported = 0;
+ cap->cman_wred_byte_mode_supported = 0;
+ cap->cman_head_drop_supported = 0;
+ cap->cman_wred_context_n_max = 0;
+ cap->cman_wred_context_private_n_max = 0;
+ cap->cman_wred_context_shared_n_max = 0;
+ cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
+ cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
+
+ /**
+ * cap->mark_vlan_dei_supported = {0, 0, 0};
+ * cap->mark_ip_ecn_tcp_supported = {0, 0, 0};
+ * cap->mark_ip_ecn_sctp_supported = {0, 0, 0};
+ * cap->mark_ip_dscp_supported = {0, 0, 0};
+ */
+
+ cap->dynamic_update_mask = 0;
+
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+/* Traffic manager level capabilities get */
+static int
+ipn3ke_tm_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id, struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+
+ if (cap == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (level_id >= IPN3KE_TM_NODE_LEVEL_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* set all the parameters to 0 first. */
+ memset(cap, 0, sizeof(*cap));
+
+ switch (level_id) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ cap->n_nodes_max = hw->port_num;
+ cap->n_nodes_nonleaf_max = IPN3KE_TM_VT_NODE_NUM;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = 0;
+ cap->leaf_nodes_identical = 0;
+
+ cap->nonleaf.shaper_private_supported = 0;
+ cap->nonleaf.shaper_private_dual_rate_supported = 0;
+ cap->nonleaf.shaper_private_rate_min = 1;
+ cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
+ cap->nonleaf.shaper_shared_n_max = 0;
+
+ cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 0;
+
+ cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ cap->n_nodes_max = IPN3KE_TM_VT_NODE_NUM;
+ cap->n_nodes_nonleaf_max = IPN3KE_TM_COS_NODE_NUM;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = 0;
+ cap->leaf_nodes_identical = 0;
+
+ cap->nonleaf.shaper_private_supported = 0;
+ cap->nonleaf.shaper_private_dual_rate_supported = 0;
+ cap->nonleaf.shaper_private_rate_min = 1;
+ cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
+ cap->nonleaf.shaper_shared_n_max = 0;
+
+ cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 0;
+
+ cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ cap->n_nodes_max = IPN3KE_TM_COS_NODE_NUM;
+ cap->n_nodes_nonleaf_max = 0;
+ cap->n_nodes_leaf_max = IPN3KE_TM_COS_NODE_NUM;
+ cap->non_leaf_nodes_identical = 0;
+ cap->leaf_nodes_identical = 0;
+
+ cap->leaf.shaper_private_supported = 0;
+ cap->leaf.shaper_private_dual_rate_supported = 0;
+ cap->leaf.shaper_private_rate_min = 0;
+ cap->leaf.shaper_private_rate_max = 0;
+ cap->leaf.shaper_shared_n_max = 0;
+
+ cap->leaf.cman_head_drop_supported = 0;
+ cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED;
+ cap->leaf.cman_wred_byte_mode_supported = 0;
+ cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+
+ cap->leaf.stats_mask = STATS_MASK_QUEUE;
+ break;
+
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+ }
+
+ return 0;
+}
+
+/* Traffic manager node capabilities get */
+static int
+ipn3ke_tm_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id, struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct ipn3ke_rpst *representor = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ uint32_t tm_id;
+ struct ipn3ke_tm_node *tm_node;
+ uint32_t state_mask;
+
+ if (cap == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ tm_id = tm->tm_id;
+
+ state_mask = 0;
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
+ tm_node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
+ if (tm_node == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (tm_node->tm_id != representor->port_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* set all the parameters to 0 first. */
+ memset(cap, 0, sizeof(*cap));
+
+ switch (tm_node->level) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ cap->shaper_private_supported = 1;
+ cap->shaper_private_dual_rate_supported = 0;
+ cap->shaper_private_rate_min = 1;
+ cap->shaper_private_rate_max = UINT32_MAX;
+ cap->shaper_shared_n_max = 0;
+
+ cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ IPN3KE_TM_VT_NODE_NUM;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+
+ cap->stats_mask = STATS_MASK_DEFAULT;
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ cap->shaper_private_supported = 1;
+ cap->shaper_private_dual_rate_supported = 0;
+ cap->shaper_private_rate_min = 1;
+ cap->shaper_private_rate_max = UINT32_MAX;
+ cap->shaper_shared_n_max = 0;
+
+ cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ IPN3KE_TM_COS_NODE_NUM;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+
+ cap->stats_mask = STATS_MASK_DEFAULT;
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ cap->shaper_private_supported = 0;
+ cap->shaper_private_dual_rate_supported = 0;
+ cap->shaper_private_rate_min = 0;
+ cap->shaper_private_rate_max = 0;
+ cap->shaper_shared_n_max = 0;
+
+ cap->leaf.cman_head_drop_supported = 0;
+ cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED;
+ cap->leaf.cman_wred_byte_mode_supported = 0;
+ cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+
+ cap->stats_mask = STATS_MASK_QUEUE;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_tm_shaper_parame_trans(struct rte_tm_shaper_params *profile,
+ struct ipn3ke_tm_shaper_profile *local_profile,
+ const struct ipn3ke_tm_shaper_params_range_type *ref_data)
+{
+ uint32_t i;
+ const struct ipn3ke_tm_shaper_params_range_type *r;
+ uint64_t rate;
+
+ rate = profile->peak.rate;
+ for (i = 0, r = ref_data; i < IPN3KE_TM_SHAPER_RANGE_NUM; i++, r++) {
+ if (rate >= r->low &&
+ rate <= r->high) {
+ local_profile->m = (rate / 4) / r->exp2;
+ local_profile->e = r->exp;
+ local_profile->rate = rate;
+
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static int
+ipn3ke_tm_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id, struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ struct ipn3ke_tm_shaper_profile *sp;
+
+ /* Shaper profile must not exist. */
+ sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error);
+ if (!sp || (sp && sp->valid))
+ return -rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+
+ /* Profile must not be NULL. */
+ if (profile == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Peak rate: non-zero, 32-bit */
+ if (profile->peak.rate == 0 ||
+ profile->peak.rate > IPN3KE_TM_SHAPER_PEAK_RATE_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Peak size: non-zero, 32-bit */
+ if (profile->peak.size != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Dual-rate profiles are not supported. */
+ if (profile->committed.rate > IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Packet length adjust: 24 bytes */
+ if (profile->pkt_length_adjust != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (ipn3ke_tm_shaper_parame_trans(profile,
+ sp,
+ ipn3ke_tm_shaper_params_rang)) {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
+ NULL,
+ rte_strerror(EINVAL));
+ } else {
+ sp->valid = 1;
+ rte_memcpy(&sp->params, profile, sizeof(sp->params));
+ }
+
+ tm->h.n_shaper_profiles++;
+
+ return 0;
+}
+
+/* Traffic manager shaper profile delete */
+static int
+ipn3ke_tm_shaper_profile_delete(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id, struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ struct ipn3ke_tm_shaper_profile *sp;
+
+ /* Check existing */
+ sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error);
+ if (!sp || (sp && !sp->valid))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ sp->valid = 0;
+ tm->h.n_shaper_profiles--;
+
+ return 0;
+}
+
+static int
+ipn3ke_tm_tdrop_profile_check(__rte_unused struct rte_eth_dev *dev,
+ uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
+ struct rte_tm_error *error)
+{
+ enum rte_color color;
+
+ /* TDROP profile ID must not be NONE. */
+ if (tdrop_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Profile must not be NULL. */
+ if (profile == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* TDROP profile should be in packet mode */
+ if (profile->packet_mode != 0)
+ return -rte_tm_error_set(error,
+ ENOTSUP,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(ENOTSUP));
+
+ /* min_th <= max_th, max_th > 0 */
+ for (color = RTE_COLOR_GREEN; color <= RTE_COLOR_GREEN; color++) {
+ uint64_t min_th = profile->red_params[color].min_th;
+ uint64_t max_th = profile->red_params[color].max_th;
+
+ if (((min_th >> IPN3KE_TDROP_TH1_SHIFT) >>
+ IPN3KE_TDROP_TH1_SHIFT) ||
+ max_th != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_hw_tm_tdrop_wr(struct ipn3ke_hw *hw,
+ struct ipn3ke_tm_tdrop_profile *tp)
+{
+ if (tp->valid) {
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CCB_PROFILE_MS,
+ 0,
+ tp->th2,
+ IPN3KE_CCB_PROFILE_MS_MASK);
+
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CCB_PROFILE_P,
+ tp->tdrop_profile_id,
+ tp->th1,
+ IPN3KE_CCB_PROFILE_MASK);
+ } else {
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CCB_PROFILE_MS,
+ 0,
+ 0,
+ IPN3KE_CCB_PROFILE_MS_MASK);
+
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CCB_PROFILE_P,
+ tp->tdrop_profile_id,
+ 0,
+ IPN3KE_CCB_PROFILE_MASK);
+ }
+
+ return 0;
+}
+
+/* Traffic manager TDROP profile add */
+static int
+ipn3ke_tm_tdrop_profile_add(struct rte_eth_dev *dev,
+ uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
+ struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ struct ipn3ke_tm_tdrop_profile *tp;
+ int status;
+ uint64_t min_th;
+ uint32_t th1, th2;
+
+ /* Check input params */
+ status = ipn3ke_tm_tdrop_profile_check(dev,
+ tdrop_profile_id,
+ profile,
+ error);
+ if (status)
+ return status;
+
+ /* Memory allocation */
+ tp = &hw->tdrop_profile[tdrop_profile_id];
+
+ /* Fill in */
+ tp->valid = 1;
+ min_th = profile->red_params[RTE_COLOR_GREEN].min_th;
+ th1 = (uint32_t)(min_th & IPN3KE_TDROP_TH1_MASK);
+ th2 = (uint32_t)((min_th >> IPN3KE_TDROP_TH1_SHIFT) &
+ IPN3KE_TDROP_TH2_MASK);
+ tp->th1 = th1;
+ tp->th2 = th2;
+ rte_memcpy(&tp->params, profile, sizeof(tp->params));
+
+ /* Add to list */
+ tm->h.n_tdrop_profiles++;
+
+ /* Write FPGA */
+ ipn3ke_hw_tm_tdrop_wr(hw, tp);
+
+ return 0;
+}
+
+/* Traffic manager TDROP profile delete */
+static int
+ipn3ke_tm_tdrop_profile_delete(struct rte_eth_dev *dev,
+ uint32_t tdrop_profile_id, struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ struct ipn3ke_tm_tdrop_profile *tp;
+
+ /* Check existing */
+ tp = ipn3ke_hw_tm_tdrop_profile_search(hw, tdrop_profile_id);
+ if (tp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Check unused */
+ if (tp->n_users)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Set free */
+ tp->valid = 0;
+ tm->h.n_tdrop_profiles--;
+
+ /* Write FPGA */
+ ipn3ke_hw_tm_tdrop_wr(hw, tp);
+
+ return 0;
+}
+
+static int
+ipn3ke_tm_node_add_check_parameter(uint32_t tm_id,
+ uint32_t node_id, uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ uint32_t level_of_node_id;
+ uint32_t node_index;
+ uint32_t parent_level_id;
+
+ if (node_id == RTE_TM_NODE_ID_NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* priority: must be 0, 1, 2, 3 */
+ if (priority > IPN3KE_TM_NODE_PRIORITY_HIGHEST)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* weight: must be 1 .. 255 */
+ if (weight > IPN3KE_TM_NODE_WEIGHT_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* check node id and parent id*/
+ level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
+ if (level_of_node_id != level_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
+ parent_level_id = parent_node_id / IPN3KE_TM_NODE_LEVEL_MOD;
+ switch (level_id) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ if (node_index != tm_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ if (parent_node_id != RTE_TM_NODE_ID_NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ if (node_index >= IPN3KE_TM_VT_NODE_NUM)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ if (parent_level_id != IPN3KE_TM_NODE_LEVEL_PORT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ if (node_index >= IPN3KE_TM_COS_NODE_NUM)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ if (parent_level_id != IPN3KE_TM_NODE_LEVEL_VT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* params: must not be NULL */
+ if (params == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS,
+ NULL,
+ rte_strerror(EINVAL));
+ /* No shared shapers */
+ if (params->n_shared_shapers != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+}
+
+static int
+ipn3ke_tm_node_add_check_mount(uint32_t tm_id,
+ uint32_t node_id, uint32_t parent_node_id, uint32_t level_id,
+ struct rte_tm_error *error)
+{
+ uint32_t node_index;
+ uint32_t parent_index;
+ uint32_t parent_index1;
+
+ node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
+ parent_index = parent_node_id % IPN3KE_TM_NODE_LEVEL_MOD;
+ parent_index1 = node_index / IPN3KE_TM_NODE_MOUNT_MAX;
+ switch (level_id) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ if (parent_index != tm_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ if (parent_index != parent_index1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ return 0;
+}
+
+/* Traffic manager node add */
+static int
+ipn3ke_tm_node_add(struct rte_eth_dev *dev,
+ uint32_t node_id, uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ uint32_t tm_id;
+ struct ipn3ke_tm_node *n, *parent_node;
+ uint32_t node_state, state_mask;
+ int status;
+
+ /* Checks */
+ if (tm->hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ tm_id = tm->tm_id;
+
+ status = ipn3ke_tm_node_add_check_parameter(tm_id,
+ node_id,
+ parent_node_id,
+ priority,
+ weight,
+ level_id,
+ params,
+ error);
+ if (status)
+ return status;
+
+ status = ipn3ke_tm_node_add_check_mount(tm_id,
+ node_id,
+ parent_node_id,
+ level_id,
+ error);
+ if (status)
+ return status;
+
+ /* Shaper profile ID must not be NONE. */
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE &&
+ params->shaper_profile_id != node_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Memory allocation */
+ state_mask = 0;
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_IDLE);
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_DEL);
+ n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
+ if (!n)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ node_state = n->node_state;
+
+ /* Check parent node */
+ state_mask = 0;
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
+ if (parent_node_id != RTE_TM_NODE_ID_NULL) {
+ parent_node = ipn3ke_hw_tm_node_search(hw,
+ tm_id,
+ parent_node_id,
+ state_mask);
+ if (!parent_node)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ } else {
+ parent_node = NULL;
+ }
+
+ switch (level_id) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
+ n->tm_id = tm_id;
+ tm->h.port_commit_node = n;
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ if (node_state == IPN3KE_TM_NODE_STATE_IDLE) {
+ TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node);
+ if (parent_node)
+ parent_node->n_children++;
+ tm->h.n_vt_nodes++;
+ } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
+ if (parent_node)
+ parent_node->n_children++;
+ tm->h.n_vt_nodes++;
+ }
+ n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
+ n->parent_node_id = parent_node_id;
+ n->tm_id = tm_id;
+ n->parent_node = parent_node;
+
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ if (node_state == IPN3KE_TM_NODE_STATE_IDLE) {
+ TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list,
+ n, node);
+ if (parent_node)
+ parent_node->n_children++;
+ tm->h.n_cos_nodes++;
+ } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
+ if (parent_node)
+ parent_node->n_children++;
+ tm->h.n_cos_nodes++;
+ }
+ n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD;
+ n->parent_node_id = parent_node_id;
+ n->tm_id = tm_id;
+ n->parent_node = parent_node;
+
+ break;
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* Fill in */
+ n->priority = priority;
+ n->weight = weight;
+
+ if (n->level == IPN3KE_TM_NODE_LEVEL_COS &&
+ params->leaf.cman == RTE_TM_CMAN_TAIL_DROP)
+ n->tdrop_profile = ipn3ke_hw_tm_tdrop_profile_search(hw,
+ params->leaf.wred.wred_profile_id);
+
+ rte_memcpy(&n->params, params, sizeof(n->params));
+
+ return 0;
+}
+
+static int
+ipn3ke_tm_node_del_check_parameter(uint32_t tm_id,
+ uint32_t node_id, struct rte_tm_error *error)
+{
+ uint32_t level_of_node_id;
+ uint32_t node_index;
+
+ if (node_id == RTE_TM_NODE_ID_NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* check node id and parent id*/
+ level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
+ node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD;
+ switch (level_of_node_id) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ if (node_index != tm_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ if (node_index >= IPN3KE_TM_VT_NODE_NUM)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ if (node_index >= IPN3KE_TM_COS_NODE_NUM)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ break;
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ return 0;
+}
+
+/* Traffic manager node delete */
+static int
+ipn3ke_pmd_tm_node_delete(struct rte_eth_dev *dev,
+ uint32_t node_id, struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ struct ipn3ke_tm_node *n, *parent_node;
+ uint32_t tm_id;
+ int status;
+ uint32_t level_of_node_id;
+ uint32_t node_state;
+ uint32_t state_mask;
+
+ /* Check hierarchy changes are currently allowed */
+ if (tm->hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ tm_id = tm->tm_id;
+
+ status = ipn3ke_tm_node_del_check_parameter(tm_id,
+ node_id,
+ error);
+ if (status)
+ return status;
+
+ /* Check existing */
+ state_mask = 0;
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
+ n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask);
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (n->n_children > 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ node_state = n->node_state;
+
+ level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD;
+
+ /* Check parent node */
+ if (n->parent_node_id != RTE_TM_NODE_ID_NULL) {
+ state_mask = 0;
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD);
+ IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED);
+ parent_node = ipn3ke_hw_tm_node_search(hw,
+ tm_id,
+ n->parent_node_id,
+ state_mask);
+ if (!parent_node)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ if (n->parent_node != parent_node)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ } else {
+ parent_node = NULL;
+ }
+
+ switch (level_of_node_id) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ if (tm->h.port_node != n)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
+ tm->h.port_commit_node = n;
+
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) {
+ if (parent_node)
+ TAILQ_REMOVE(&parent_node->children_node_list,
+ n, node);
+ TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node);
+ if (parent_node)
+ parent_node->n_children--;
+ tm->h.n_vt_nodes--;
+ } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
+ if (parent_node)
+ parent_node->n_children--;
+ tm->h.n_vt_nodes--;
+ }
+ n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
+
+ break;
+
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) {
+ if (parent_node)
+ TAILQ_REMOVE(&parent_node->children_node_list,
+ n, node);
+ TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list,
+ n, node);
+ if (parent_node)
+ parent_node->n_children--;
+ tm->h.n_cos_nodes--;
+ } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
+ if (parent_node)
+ parent_node->n_children--;
+ tm->h.n_cos_nodes--;
+ }
+ n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL;
+
+ break;
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_tm_hierarchy_commit_check(struct rte_eth_dev *dev,
+ struct rte_tm_error *error)
+{
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ uint32_t tm_id;
+ struct ipn3ke_tm_node_list *nl;
+ struct ipn3ke_tm_node *n, *parent_node;
+
+ tm_id = tm->tm_id;
+
+ nl = &tm->h.cos_commit_node_list;
+ TAILQ_FOREACH(n, nl, node) {
+ parent_node = n->parent_node;
+ if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
+ if (n->parent_node_id == RTE_TM_NODE_ID_NULL ||
+ n->level != IPN3KE_TM_NODE_LEVEL_COS ||
+ n->tm_id != tm_id ||
+ parent_node == NULL ||
+ (parent_node &&
+ parent_node->node_state ==
+ IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) ||
+ (parent_node &&
+ parent_node->node_state ==
+ IPN3KE_TM_NODE_STATE_IDLE) ||
+ n->shaper_profile.valid == 0) {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ } else if (n->node_state ==
+ IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
+ if (n->level != IPN3KE_TM_NODE_LEVEL_COS ||
+ n->n_children != 0) {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ }
+ }
+
+ nl = &tm->h.vt_commit_node_list;
+ TAILQ_FOREACH(n, nl, node) {
+ parent_node = n->parent_node;
+ if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
+ if (n->parent_node_id == RTE_TM_NODE_ID_NULL ||
+ n->level != IPN3KE_TM_NODE_LEVEL_VT ||
+ n->tm_id != tm_id ||
+ parent_node == NULL ||
+ (parent_node &&
+ parent_node->node_state ==
+ IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) ||
+ (parent_node &&
+ parent_node->node_state ==
+ IPN3KE_TM_NODE_STATE_IDLE) ||
+ n->shaper_profile.valid == 0) {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ } else if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ n = tm->h.port_commit_node;
+ if (n &&
+ (n->parent_node_id != RTE_TM_NODE_ID_NULL ||
+ n->level != IPN3KE_TM_NODE_LEVEL_PORT ||
+ n->tm_id != tm_id ||
+ n->parent_node != NULL ||
+ n->shaper_profile.valid == 0)) {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_hw_tm_node_wr(struct ipn3ke_hw *hw,
+ struct ipn3ke_tm_node *n,
+ struct ipn3ke_tm_node *parent_node)
+{
+ uint32_t level;
+
+ level = n->level;
+
+ switch (level) {
+ case IPN3KE_TM_NODE_LEVEL_PORT:
+ /**
+ * Configure Type
+ */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_TYPE_L3_X,
+ n->node_index,
+ n->priority,
+ IPN3KE_QOS_TYPE_MASK);
+
+ /**
+ * Configure Sch_wt
+ */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_SCH_WT_L3_X,
+ n->node_index,
+ n->weight,
+ IPN3KE_QOS_SCH_WT_MASK);
+
+ /**
+ * Configure Shap_wt
+ */
+ if (n->shaper_profile.valid)
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_SHAP_WT_L3_X,
+ n->node_index,
+ ((n->shaper_profile.e << 10) |
+ n->shaper_profile.m),
+ IPN3KE_QOS_SHAP_WT_MASK);
+
+ break;
+ case IPN3KE_TM_NODE_LEVEL_VT:
+ /**
+ * Configure Type
+ */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_TYPE_L2_X,
+ n->node_index,
+ n->priority,
+ IPN3KE_QOS_TYPE_MASK);
+
+ /**
+ * Configure Sch_wt
+ */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_SCH_WT_L2_X,
+ n->node_index,
+ n->weight,
+ IPN3KE_QOS_SCH_WT_MASK);
+
+ /**
+ * Configure Shap_wt
+ */
+ if (n->shaper_profile.valid)
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_SHAP_WT_L2_X,
+ n->node_index,
+ ((n->shaper_profile.e << 10) |
+ n->shaper_profile.m),
+ IPN3KE_QOS_SHAP_WT_MASK);
+
+ /**
+ * Configure Map
+ */
+ if (parent_node)
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_MAP_L2_X,
+ n->node_index,
+ parent_node->node_index,
+ IPN3KE_QOS_MAP_L2_MASK);
+
+ break;
+ case IPN3KE_TM_NODE_LEVEL_COS:
+ /**
+ * Configure Tail Drop mapping
+ */
+ if (n->tdrop_profile && n->tdrop_profile->valid) {
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_CCB_QPROFILE_Q,
+ n->node_index,
+ n->tdrop_profile->tdrop_profile_id,
+ IPN3KE_CCB_QPROFILE_MASK);
+ }
+
+ /**
+ * Configure Type
+ */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_TYPE_L1_X,
+ n->node_index,
+ n->priority,
+ IPN3KE_QOS_TYPE_MASK);
+
+ /**
+ * Configure Sch_wt
+ */
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_SCH_WT_L1_X,
+ n->node_index,
+ n->weight,
+ IPN3KE_QOS_SCH_WT_MASK);
+
+ /**
+ * Configure Shap_wt
+ */
+ if (n->shaper_profile.valid)
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_SHAP_WT_L1_X,
+ n->node_index,
+ ((n->shaper_profile.e << 10) |
+ n->shaper_profile.m),
+ IPN3KE_QOS_SHAP_WT_MASK);
+
+ /**
+ * Configure COS queue to port
+ */
+ while (IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_QM_UID_CONFIG_CTRL,
+ 0,
+ 0x80000000))
+ ;
+
+ if (parent_node && parent_node->parent_node)
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QM_UID_CONFIG_DATA,
+ 0,
+ (1 << 8 | parent_node->parent_node->node_index),
+ 0x1FF);
+
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QM_UID_CONFIG_CTRL,
+ 0,
+ n->node_index,
+ 0xFFFFF);
+
+ while (IPN3KE_MASK_READ_REG(hw,
+ IPN3KE_QM_UID_CONFIG_CTRL,
+ 0,
+ 0x80000000))
+ ;
+
+ /**
+ * Configure Map
+ */
+ if (parent_node)
+ IPN3KE_MASK_WRITE_REG(hw,
+ IPN3KE_QOS_MAP_L1_X,
+ n->node_index,
+ parent_node->node_index,
+ IPN3KE_QOS_MAP_L1_MASK);
+
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_tm_hierarchy_hw_commit(struct rte_eth_dev *dev,
+ struct rte_tm_error *error)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ struct ipn3ke_tm_node_list *nl;
+ struct ipn3ke_tm_node *n, *nn, *parent_node;
+
+ n = tm->h.port_commit_node;
+ if (n) {
+ if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
+ tm->h.port_commit_node = NULL;
+
+ n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
+ } else if (n->node_state ==
+ IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
+ tm->h.port_commit_node = NULL;
+
+ n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ n->weight = 0;
+ n->tm_id = RTE_TM_NODE_ID_NULL;
+ } else {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ parent_node = n->parent_node;
+ ipn3ke_hw_tm_node_wr(hw, n, parent_node);
+ }
+
+ nl = &tm->h.vt_commit_node_list;
+ for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
+ nn = TAILQ_NEXT(n, node);
+ if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
+ n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
+ parent_node = n->parent_node;
+ TAILQ_REMOVE(nl, n, node);
+ TAILQ_INSERT_TAIL(&parent_node->children_node_list,
+ n, node);
+ } else if (n->node_state ==
+ IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
+ parent_node = n->parent_node;
+ TAILQ_REMOVE(nl, n, node);
+
+ n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ n->parent_node_id = RTE_TM_NODE_ID_NULL;
+ n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ n->weight = 0;
+ n->tm_id = RTE_TM_NODE_ID_NULL;
+ n->parent_node = NULL;
+ } else {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ ipn3ke_hw_tm_node_wr(hw, n, parent_node);
+ }
+
+ nl = &tm->h.cos_commit_node_list;
+ for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
+ nn = TAILQ_NEXT(n, node);
+ if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) {
+ n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED;
+ parent_node = n->parent_node;
+ TAILQ_REMOVE(nl, n, node);
+ TAILQ_INSERT_TAIL(&parent_node->children_node_list,
+ n, node);
+ } else if (n->node_state ==
+ IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) {
+ n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ parent_node = n->parent_node;
+ TAILQ_REMOVE(nl, n, node);
+
+ n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ n->parent_node_id = RTE_TM_NODE_ID_NULL;
+ n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ n->weight = 0;
+ n->tm_id = RTE_TM_NODE_ID_NULL;
+ n->parent_node = NULL;
+
+ if (n->tdrop_profile)
+ n->tdrop_profile->n_users--;
+ } else {
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ ipn3ke_hw_tm_node_wr(hw, n, parent_node);
+ }
+
+ return 0;
+}
+
+static int
+ipn3ke_tm_hierarchy_commit_clear(struct rte_eth_dev *dev)
+{
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ struct ipn3ke_tm_node_list *nl;
+ struct ipn3ke_tm_node *n;
+ struct ipn3ke_tm_node *nn;
+
+ n = tm->h.port_commit_node;
+ if (n) {
+ n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ n->weight = 0;
+ n->tm_id = RTE_TM_NODE_ID_NULL;
+ n->n_children = 0;
+
+ tm->h.port_commit_node = NULL;
+ }
+
+ nl = &tm->h.vt_commit_node_list;
+ for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
+ nn = TAILQ_NEXT(n, node);
+
+ n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ n->parent_node_id = RTE_TM_NODE_ID_NULL;
+ n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ n->weight = 0;
+ n->tm_id = RTE_TM_NODE_ID_NULL;
+ n->parent_node = NULL;
+ n->n_children = 0;
+ tm->h.n_vt_nodes--;
+
+ TAILQ_REMOVE(nl, n, node);
+ }
+
+ nl = &tm->h.cos_commit_node_list;
+ for (n = TAILQ_FIRST(nl); n != NULL; n = nn) {
+ nn = TAILQ_NEXT(n, node);
+
+ n->node_state = IPN3KE_TM_NODE_STATE_IDLE;
+ n->parent_node_id = RTE_TM_NODE_ID_NULL;
+ n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0;
+ n->weight = 0;
+ n->tm_id = RTE_TM_NODE_ID_NULL;
+ n->parent_node = NULL;
+ tm->h.n_cos_nodes--;
+
+ TAILQ_REMOVE(nl, n, node);
+ }
+
+ return 0;
+}
+
+static void
+ipn3ke_tm_show(struct rte_eth_dev *dev)
+{
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ uint32_t tm_id;
+ struct ipn3ke_tm_node_list *vt_nl, *cos_nl;
+ struct ipn3ke_tm_node *port_n, *vt_n, *cos_n;
+ const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle",
+ "CfgAdd",
+ "CfgDel",
+ "Committed"};
+
+ tm_id = tm->tm_id;
+
+ IPN3KE_AFU_PMD_DEBUG("***HQoS Tree(%d)***\n", tm_id);
+
+ port_n = tm->h.port_node;
+ IPN3KE_AFU_PMD_DEBUG("Port: (%d|%s)\n", port_n->node_index,
+ str_state[port_n->node_state]);
+
+ vt_nl = &tm->h.port_node->children_node_list;
+ TAILQ_FOREACH(vt_n, vt_nl, node) {
+ cos_nl = &vt_n->children_node_list;
+ IPN3KE_AFU_PMD_DEBUG(" VT%d: ", vt_n->node_index);
+ TAILQ_FOREACH(cos_n, cos_nl, node) {
+ if (cos_n->parent_node_id !=
+ (vt_n->node_index + IPN3KE_TM_NODE_LEVEL_MOD))
+ IPN3KE_AFU_PMD_ERR("(%d|%s), ",
+ cos_n->node_index,
+ str_state[cos_n->node_state]);
+ }
+ IPN3KE_AFU_PMD_DEBUG("\n");
+ }
+}
+
+static void
+ipn3ke_tm_show_commmit(struct rte_eth_dev *dev)
+{
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ uint32_t tm_id;
+ struct ipn3ke_tm_node_list *nl;
+ struct ipn3ke_tm_node *n;
+ const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle",
+ "CfgAdd",
+ "CfgDel",
+ "Committed"};
+
+ tm_id = tm->tm_id;
+
+ IPN3KE_AFU_PMD_DEBUG("***Commit Tree(%d)***\n", tm_id);
+ n = tm->h.port_commit_node;
+ IPN3KE_AFU_PMD_DEBUG("Port: ");
+ if (n)
+ IPN3KE_AFU_PMD_DEBUG("(%d|%s)",
+ n->node_index,
+ str_state[n->node_state]);
+ IPN3KE_AFU_PMD_DEBUG("\n");
+
+ nl = &tm->h.vt_commit_node_list;
+ IPN3KE_AFU_PMD_DEBUG("VT : ");
+ TAILQ_FOREACH(n, nl, node) {
+ IPN3KE_AFU_PMD_DEBUG("(%d|%s), ",
+ n->node_index,
+ str_state[n->node_state]);
+ }
+ IPN3KE_AFU_PMD_DEBUG("\n");
+
+ nl = &tm->h.cos_commit_node_list;
+ IPN3KE_AFU_PMD_DEBUG("COS : ");
+ TAILQ_FOREACH(n, nl, node) {
+ IPN3KE_AFU_PMD_DEBUG("(%d|%s), ",
+ n->node_index,
+ str_state[n->node_state]);
+ }
+ IPN3KE_AFU_PMD_DEBUG("\n");
+}
+
+/* Traffic manager hierarchy commit */
+static int
+ipn3ke_tm_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail, struct rte_tm_error *error)
+{
+ struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev);
+ int status;
+
+ /* Checks */
+ if (tm->hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ ipn3ke_tm_show_commmit(dev);
+
+ status = ipn3ke_tm_hierarchy_commit_check(dev, error);
+ if (status) {
+ if (clear_on_fail)
+ ipn3ke_tm_hierarchy_commit_clear(dev);
+ return status;
+ }
+
+ ipn3ke_tm_hierarchy_hw_commit(dev, error);
+ ipn3ke_tm_show(dev);
+
+ return 0;
+}
+
+const struct rte_tm_ops ipn3ke_tm_ops = {
+ .node_type_get = ipn3ke_pmd_tm_node_type_get,
+ .capabilities_get = ipn3ke_tm_capabilities_get,
+ .level_capabilities_get = ipn3ke_tm_level_capabilities_get,
+ .node_capabilities_get = ipn3ke_tm_node_capabilities_get,
+
+ .wred_profile_add = ipn3ke_tm_tdrop_profile_add,
+ .wred_profile_delete = ipn3ke_tm_tdrop_profile_delete,
+ .shared_wred_context_add_update = NULL,
+ .shared_wred_context_delete = NULL,
+
+ .shaper_profile_add = ipn3ke_tm_shaper_profile_add,
+ .shaper_profile_delete = ipn3ke_tm_shaper_profile_delete,
+ .shared_shaper_add_update = NULL,
+ .shared_shaper_delete = NULL,
+
+ .node_add = ipn3ke_tm_node_add,
+ .node_delete = ipn3ke_pmd_tm_node_delete,
+ .node_suspend = NULL,
+ .node_resume = NULL,
+ .hierarchy_commit = ipn3ke_tm_hierarchy_commit,
+
+ .node_parent_update = NULL,
+ .node_shaper_update = NULL,
+ .node_shared_shaper_update = NULL,
+ .node_stats_update = NULL,
+ .node_wfq_weight_mode_update = NULL,
+ .node_cman_update = NULL,
+ .node_wred_context_update = NULL,
+ .node_shared_wred_context_update = NULL,
+
+ .node_stats_read = NULL,
+};
+
+int
+ipn3ke_tm_ops_get(struct rte_eth_dev *ethdev,
+ void *arg)
+{
+ struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
+ struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
+ struct rte_eth_dev *i40e_pf_eth;
+ const struct rte_tm_ops *ops;
+
+ if (!arg)
+ return -EINVAL;
+
+ if (hw->acc_tm) {
+ *(const void **)arg = &ipn3ke_tm_ops;
+ } else if (rpst->i40e_pf_eth) {
+ i40e_pf_eth = rpst->i40e_pf_eth;
+ if (i40e_pf_eth->dev_ops->tm_ops_get == NULL ||
+ i40e_pf_eth->dev_ops->tm_ops_get(i40e_pf_eth,
+ &ops) != 0 ||
+ ops == NULL) {
+ return -EINVAL;
+ }
+ *(const void **)arg = ops;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/meson.build b/src/spdk/dpdk/drivers/net/ipn3ke/meson.build
new file mode 100644
index 000000000..ec9cb7daf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ipn3ke/meson.build
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2019 Intel Corporation
+
+#
+# Add the experimenatal APIs called from this PMD
+# rte_eth_switch_domain_alloc()
+# rte_eth_dev_create()
+# rte_eth_dev_destroy()
+# rte_eth_switch_domain_free()
+#
+
+dep = dependency('libfdt', required: false)
+if not dep.found()
+ dep = cc.find_library('libfdt', required: false)
+endif
+if not dep.found()
+ build = false
+ reason = 'missing dependency, "libfdt"'
+ subdir_done()
+endif
+
+includes += include_directories('../../raw/ifpga')
+
+sources += files('ipn3ke_ethdev.c',
+ 'ipn3ke_representor.c',
+ 'ipn3ke_tm.c',
+ 'ipn3ke_flow.c')
+deps += ['bus_ifpga', 'ethdev', 'sched']
diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/rte_pmd_ipn3ke_version.map b/src/spdk/dpdk/drivers/net/ipn3ke/rte_pmd_ipn3ke_version.map
new file mode 100644
index 000000000..7e348e99b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/ipn3ke/rte_pmd_ipn3ke_version.map
@@ -0,0 +1,9 @@
+DPDK_20.0 {
+ local: *;
+};
+
+EXPERIMENTAL {
+ global:
+
+ ipn3ke_bridge_func;
+};