summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell/octeontx2/af
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/ethernet/marvell/octeontx2/af
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c1870
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h185
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h266
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h234
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h158
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c416
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h2153
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.c1617
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.h246
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c277
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h1129
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c926
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h626
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h15266
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c530
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c502
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h100
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c3426
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h912
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c1254
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c543
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c1086
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c3423
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c1632
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h82
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c5440
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c601
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c3463
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c1666
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c2053
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h239
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h733
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c111
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h823
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c262
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h106
41 files changed, 54504 insertions, 0 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
new file mode 100644
index 000000000..3cf4c8285
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's RVU Admin Function driver
+#
+
+ccflags-y += -I$(src)
+obj-$(CONFIG_OCTEONTX2_MBOX) += rvu_mbox.o
+obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o
+
+rvu_mbox-y := mbox.o rvu_trace.o
+rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
+ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
+ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
+ rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
new file mode 100644
index 000000000..90be87dc1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -0,0 +1,1870 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "cgx.h"
+#include "rvu.h"
+#include "lmac_common.h"
+
+#define DRV_NAME "Marvell-CGX/RPM"
+#define DRV_STRING "Marvell CGX/RPM Driver"
+
+static LIST_HEAD(cgx_list);
+
+/* Convert firmware speed encoding to user format(Mbps) */
+static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = {
+ [CGX_LINK_NONE] = 0,
+ [CGX_LINK_10M] = 10,
+ [CGX_LINK_100M] = 100,
+ [CGX_LINK_1G] = 1000,
+ [CGX_LINK_2HG] = 2500,
+ [CGX_LINK_5G] = 5000,
+ [CGX_LINK_10G] = 10000,
+ [CGX_LINK_20G] = 20000,
+ [CGX_LINK_25G] = 25000,
+ [CGX_LINK_40G] = 40000,
+ [CGX_LINK_50G] = 50000,
+ [CGX_LINK_80G] = 80000,
+ [CGX_LINK_100G] = 100000,
+};
+
+/* Convert firmware lmac type encoding to string */
+static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
+ [LMAC_MODE_SGMII] = "SGMII",
+ [LMAC_MODE_XAUI] = "XAUI",
+ [LMAC_MODE_RXAUI] = "RXAUI",
+ [LMAC_MODE_10G_R] = "10G_R",
+ [LMAC_MODE_40G_R] = "40G_R",
+ [LMAC_MODE_QSGMII] = "QSGMII",
+ [LMAC_MODE_25G_R] = "25G_R",
+ [LMAC_MODE_50G_R] = "50G_R",
+ [LMAC_MODE_100G_R] = "100G_R",
+ [LMAC_MODE_USXGMII] = "USXGMII",
+};
+
+/* CGX PHY management internal APIs */
+static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
+
+/* Supported devices */
+static const struct pci_device_id cgx_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
+ { 0, } /* end of table */
+};
+
+MODULE_DEVICE_TABLE(pci, cgx_id_table);
+
+static bool is_dev_rpm(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return (cgx->pdev->device == PCI_DEVID_CN10K_RPM);
+}
+
+bool is_lmac_valid(struct cgx *cgx, int lmac_id)
+{
+ if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac)
+ return false;
+ return test_bit(lmac_id, &cgx->lmac_bmap);
+}
+
+/* Helper function to get sequential index
+ * given the enabled LMAC of a CGX
+ */
+static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
+{
+ int tmp, id = 0;
+
+ for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
+ if (tmp == lmac_id)
+ break;
+ id++;
+ }
+
+ return id;
+}
+
+struct mac_ops *get_mac_ops(void *cgxd)
+{
+ if (!cgxd)
+ return cgxd;
+
+ return ((struct cgx *)cgxd)->mac_ops;
+}
+
+void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
+{
+ writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
+ offset);
+}
+
+u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
+{
+ return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
+ offset);
+}
+
+struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
+{
+ if (!cgx || lmac_id >= cgx->max_lmac_per_mac)
+ return NULL;
+
+ return cgx->lmac_idmap[lmac_id];
+}
+
+int cgx_get_cgxcnt_max(void)
+{
+ struct cgx *cgx_dev;
+ int idmax = -ENODEV;
+
+ list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
+ if (cgx_dev->cgx_id > idmax)
+ idmax = cgx_dev->cgx_id;
+
+ if (idmax < 0)
+ return 0;
+
+ return idmax + 1;
+}
+
+int cgx_get_lmac_cnt(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -ENODEV;
+
+ return cgx->lmac_count;
+}
+
+void *cgx_get_pdata(int cgx_id)
+{
+ struct cgx *cgx_dev;
+
+ list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
+ if (cgx_dev->cgx_id == cgx_id)
+ return cgx_dev;
+ }
+ return NULL;
+}
+
+void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+
+ /* Software must not access disabled LMAC registers */
+ if (!is_lmac_valid(cgx_dev, lmac_id))
+ return;
+ cgx_write(cgx_dev, lmac_id, offset, val);
+}
+
+u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+
+ /* Software must not access disabled LMAC registers */
+ if (!is_lmac_valid(cgx_dev, lmac_id))
+ return 0;
+
+ return cgx_read(cgx_dev, lmac_id, offset);
+}
+
+int cgx_get_cgxid(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -EINVAL;
+
+ return cgx->cgx_id;
+}
+
+u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ u64 cfg;
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
+
+ return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
+}
+
+/* Ensure the required lock for event queue(where asynchronous events are
+ * posted) is acquired before calling this API. Else an asynchronous event(with
+ * latest link status) can reach the destination before this function returns
+ * and could make the link status appear wrong.
+ */
+int cgx_get_link_info(void *cgxd, int lmac_id,
+ struct cgx_link_user_info *linfo)
+{
+ struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
+
+ if (!lmac)
+ return -ENODEV;
+
+ *linfo = lmac->link_info;
+ return 0;
+}
+
+static u64 mac2u64 (u8 *mac_addr)
+{
+ u64 mac = 0;
+ int index;
+
+ for (index = ETH_ALEN - 1; index >= 0; index--)
+ mac |= ((u64)*mac_addr++) << (8 * index);
+ return mac;
+}
+
+static void cfg2mac(u64 cfg, u8 *mac_addr)
+{
+ int i, index = 0;
+
+ for (i = ETH_ALEN - 1; i >= 0; i--, index++)
+ mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
+}
+
+int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index, id;
+ u64 cfg;
+
+ /* access mac_ops to know csr_offset */
+ mac_ops = cgx_dev->mac_ops;
+
+ /* copy 6bytes from macaddr */
+ /* memcpy(&cfg, mac_addr, 6); */
+
+ cfg = mac2u64 (mac_addr);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
+ cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
+ CGX_DMAC_MCAST_MODE);
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return 0;
+}
+
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx = cgxd;
+
+ if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
+ return 0;
+
+ cgx = cgxd;
+ /* Get mac_ops to know csr offset */
+ mac_ops = cgx->mac_ops;
+
+ return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+}
+
+u64 cgx_read_dmac_entry(void *cgxd, int index)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx;
+
+ if (!cgxd)
+ return 0;
+
+ cgx = cgxd;
+ mac_ops = cgx->mac_ops;
+ return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
+}
+
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index, idx;
+ u64 cfg = 0;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Get available index where entry is to be installed */
+ idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
+ if (idx < 0)
+ return idx;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + idx;
+
+ cfg = mac2u64 (mac_addr);
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cfg |= ((u64)lmac_id << 49);
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
+
+ if (is_multicast_ether_addr(mac_addr)) {
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE_CAM;
+ lmac->mcast_filters_count++;
+ } else if (!lmac->mcast_filters_count) {
+ cfg |= CGX_DMAC_MCAST_MODE;
+ }
+
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return idx;
+}
+
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 index = 0, id;
+ u64 cfg;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Restore index 0 to its default init value as done during
+ * cgx_lmac_init
+ */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return 0;
+}
+
+/* Allows caller to change macaddress associated with index
+ * in dmac filter table including index 0 reserved for
+ * interface mac address
+ */
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct mac_ops *mac_ops;
+ struct lmac *lmac;
+ u64 cfg;
+ int id;
+
+ lmac = lmac_pdata(lmac_id, cgx_dev);
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* ensure index is already set */
+ if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
+ return -EINVAL;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+ cfg &= ~CGX_RX_DMAC_ADR_MASK;
+ cfg |= mac2u64 (mac_addr);
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+ return 0;
+}
+
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 mac[ETH_ALEN];
+ u64 cfg;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* Skip deletion for reserved index i.e. index 0 */
+ if (index == 0)
+ return 0;
+
+ rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ /* Read MAC address to check whether it is ucast or mcast */
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+
+ cfg2mac(cfg, mac);
+ if (is_multicast_ether_addr(mac))
+ lmac->mcast_filters_count--;
+
+ if (!lmac->mcast_filters_count) {
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE;
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+ }
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ return 0;
+}
+
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+
+ if (lmac)
+ return lmac->mac_to_index_bmap.max;
+
+ return 0;
+}
+
+u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index;
+ u64 cfg;
+ int id;
+
+ mac_ops = cgx_dev->mac_ops;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
+ return cfg & CGX_RX_DMAC_ADR_MASK;
+}
+
+int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
+ return 0;
+}
+
+static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
+}
+
+static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u8 num_lmacs;
+ u32 fifo_len;
+
+ fifo_len = cgx->mac_ops->fifo_len;
+ num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx);
+
+ switch (num_lmacs) {
+ case 1:
+ return fifo_len;
+ case 2:
+ return fifo_len / 2;
+ case 3:
+ /* LMAC0 gets half of the FIFO, reset 1/4th */
+ if (lmac_id == 0)
+ return fifo_len / 2;
+ return fifo_len / 4;
+ case 4:
+ default:
+ return fifo_len / 4;
+ }
+ return 0;
+}
+
+/* Configure CGX LMAC in internal loopback mode */
+int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u8 lmac_type;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac_id);
+ if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
+ if (enable)
+ cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
+ else
+ cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
+ if (enable)
+ cfg |= CGXX_SPUX_CONTROL1_LBK;
+ else
+ cfg &= ~CGXX_SPUX_CONTROL1_LBK;
+ cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
+ }
+ return 0;
+}
+
+void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx);
+ u16 max_dmac = lmac->mac_to_index_bmap.max;
+ struct mac_ops *mac_ops;
+ int index, i;
+ u64 cfg = 0;
+ int id;
+
+ if (!cgx)
+ return;
+
+ id = get_sequence_id_of_lmac(cgx, lmac_id);
+
+ mac_ops = cgx->mac_ops;
+ if (enable) {
+ /* Enable promiscuous mode on LMAC */
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
+ cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
+ }
+ } else {
+ /* Disable promiscuous mode */
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
+ cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 +
+ index * 0x8),
+ cfg);
+ }
+ }
+ }
+}
+
+static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (is_dev_rpm(cgx))
+ return 0;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
+ return 0;
+}
+
+/* Enable or disable forwarding received pause frames to Tx block */
+void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u8 rx_pause, tx_pause;
+ bool is_pfc_enabled;
+ struct lmac *lmac;
+ u64 cfg;
+
+ if (!cgx)
+ return;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return;
+
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
+ cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause);
+ is_pfc_enabled = rx_pause ? false : true;
+
+ if (enable) {
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
+ } else {
+
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
+ }
+}
+
+int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+ *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
+ return 0;
+}
+
+int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+ *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
+ return 0;
+}
+
+u64 cgx_features_get(void *cgxd)
+{
+ return ((struct cgx *)cgxd)->hw_features;
+}
+
+static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
+{
+ if (!linfo->fec)
+ return 0;
+
+ switch (linfo->lmac_type_id) {
+ case LMAC_MODE_SGMII:
+ case LMAC_MODE_XAUI:
+ case LMAC_MODE_RXAUI:
+ case LMAC_MODE_QSGMII:
+ return 0;
+ case LMAC_MODE_10G_R:
+ case LMAC_MODE_25G_R:
+ case LMAC_MODE_100G_R:
+ case LMAC_MODE_USXGMII:
+ return 1;
+ case LMAC_MODE_40G_R:
+ return 4;
+ case LMAC_MODE_50G_R:
+ if (linfo->fec == OTX2_FEC_BASER)
+ return 2;
+ else
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
+{
+ int stats, fec_stats_count = 0;
+ int corr_reg, uncorr_reg;
+ struct cgx *cgx = cgxd;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+ fec_stats_count =
+ cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
+ if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
+ corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
+ uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
+ } else {
+ corr_reg = CGXX_SPUX_RSFEC_CORR;
+ uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
+ }
+ for (stats = 0; stats < fec_stats_count; stats++) {
+ rsp->fec_corr_blks +=
+ cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
+ rsp->fec_uncorr_blks +=
+ cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
+ }
+ return 0;
+}
+
+int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ if (enable)
+ cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
+ else
+ cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
+ cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+ return 0;
+}
+
+int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg, last;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ last = cfg;
+ if (enable)
+ cfg |= DATA_PKT_TX_EN;
+ else
+ cfg &= ~DATA_PKT_TX_EN;
+
+ if (cfg != last)
+ cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+ return !!(last & DATA_PKT_TX_EN);
+}
+
+static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (is_dev_rpm(cgx))
+ return 0;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ } else {
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ }
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+ return 0;
+}
+
+static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return;
+
+ if (enable) {
+ /* Set pause time and interval */
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
+ DEFAULT_PAUSE_TIME);
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
+ cfg &= ~0xFFFFULL;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
+ cfg | (DEFAULT_PAUSE_TIME / 2));
+
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
+ DEFAULT_PAUSE_TIME);
+
+ cfg = cgx_read(cgx, lmac_id,
+ CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
+ cfg &= ~0xFFFFULL;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
+ cfg | (DEFAULT_PAUSE_TIME / 2));
+ }
+
+ /* ALL pause frames received are completely ignored */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+
+ /* Disable all PFC classes by default */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+}
+
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx)
+{
+ struct cgx *cgx = cgxd;
+ struct lmac *lmac;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ if (!rx_pause)
+ clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+
+ if (!tx_pause)
+ clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+
+ /* check if other pfvfs are using flow control */
+ if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Receive Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Transmit Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ /* Return as no traffic classes are requested */
+ if (tx_pause && !pfc_en)
+ return 0;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ pfc_en |= FIELD_GET(CGX_PFC_CLASS_MASK, cfg);
+
+ if (rx_pause) {
+ cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
+ } else {
+ cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
+ }
+
+ if (tx_pause) {
+ cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
+ } else {
+ cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
+ }
+
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+
+ /* Write source MAC address which will be filled into PFC packet */
+ cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg);
+
+ return 0;
+}
+
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+
+ *rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN);
+ *tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN);
+
+ return 0;
+}
+
+void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx)
+ return;
+
+ if (enable) {
+ /* Enable inbound PTP timestamping */
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ /* Disable inbound PTP stamping */
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
+}
+
+/* CGX Firmware interface low level support */
+int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
+{
+ struct cgx *cgx = lmac->cgx;
+ struct device *dev;
+ int err = 0;
+ u64 cmd;
+
+ /* Ensure no other command is in progress */
+ err = mutex_lock_interruptible(&lmac->cmd_lock);
+ if (err)
+ return err;
+
+ /* Ensure command register is free */
+ cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG);
+ if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ /* Update ownership in command request */
+ req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
+
+ /* Mark this lmac as pending, before we start */
+ lmac->cmd_pend = true;
+
+ /* Start command in hardware */
+ cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
+
+ /* Ensure command is completed without errors */
+ if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
+ msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
+ dev = &cgx->pdev->dev;
+ dev_err(dev, "cgx port %d:%d cmd %lld timeout\n",
+ cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req));
+ err = LMAC_AF_ERR_CMD_TIMEOUT;
+ goto unlock;
+ }
+
+ /* we have a valid command response */
+ smp_rmb(); /* Ensure the latest updates are visible */
+ *resp = lmac->resp;
+
+unlock:
+ mutex_unlock(&lmac->cmd_lock);
+
+ return err;
+}
+
+int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
+{
+ struct lmac *lmac;
+ int err;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ err = cgx_fwi_cmd_send(req, resp, lmac);
+
+ /* Check for valid response */
+ if (!err) {
+ if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
+ return -EIO;
+ else
+ return 0;
+ }
+
+ return err;
+}
+
+static int cgx_link_usertable_index_map(int speed)
+{
+ switch (speed) {
+ case SPEED_10:
+ return CGX_LINK_10M;
+ case SPEED_100:
+ return CGX_LINK_100M;
+ case SPEED_1000:
+ return CGX_LINK_1G;
+ case SPEED_2500:
+ return CGX_LINK_2HG;
+ case SPEED_5000:
+ return CGX_LINK_5G;
+ case SPEED_10000:
+ return CGX_LINK_10G;
+ case SPEED_20000:
+ return CGX_LINK_20G;
+ case SPEED_25000:
+ return CGX_LINK_25G;
+ case SPEED_40000:
+ return CGX_LINK_40G;
+ case SPEED_50000:
+ return CGX_LINK_50G;
+ case 80000:
+ return CGX_LINK_80G;
+ case SPEED_100000:
+ return CGX_LINK_100G;
+ case SPEED_UNKNOWN:
+ return CGX_LINK_NONE;
+ }
+ return CGX_LINK_NONE;
+}
+
+static void set_mod_args(struct cgx_set_link_mode_args *args,
+ u32 speed, u8 duplex, u8 autoneg, u64 mode)
+{
+ /* Fill default values incase of user did not pass
+ * valid parameters
+ */
+ if (args->duplex == DUPLEX_UNKNOWN)
+ args->duplex = duplex;
+ if (args->speed == SPEED_UNKNOWN)
+ args->speed = speed;
+ if (args->an == AUTONEG_UNKNOWN)
+ args->an = autoneg;
+ args->mode = mode;
+ args->ports = 0;
+}
+
+static void otx2_map_ethtool_link_modes(u64 bitmask,
+ struct cgx_set_link_mode_args *args)
+{
+ switch (bitmask) {
+ case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
+ set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_10baseT_Full_BIT:
+ set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_100baseT_Half_BIT:
+ set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
+ set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
+ set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
+ set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
+ set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
+ set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
+ set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
+ set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
+ set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
+ set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
+ set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
+ set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
+ set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
+ set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
+ set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
+ break;
+ default:
+ set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
+ break;
+ }
+}
+
+static inline void link_status_user_format(u64 lstat,
+ struct cgx_link_user_info *linfo,
+ struct cgx *cgx, u8 lmac_id)
+{
+ const char *lmac_string;
+
+ linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
+ linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
+ linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
+ linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
+ linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
+ linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
+
+ if (linfo->lmac_type_id >= LMAC_MODE_MAX) {
+ dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d",
+ linfo->lmac_type_id, cgx->cgx_id, lmac_id);
+ strncpy(linfo->lmac_type, "Unknown", LMACTYPE_STR_LEN - 1);
+ return;
+ }
+
+ lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
+ strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
+}
+
+/* Hardware event handlers */
+static inline void cgx_link_change_handler(u64 lstat,
+ struct lmac *lmac)
+{
+ struct cgx_link_user_info *linfo;
+ struct cgx *cgx = lmac->cgx;
+ struct cgx_link_event event;
+ struct device *dev;
+ int err_type;
+
+ dev = &cgx->pdev->dev;
+
+ link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
+ err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
+
+ event.cgx_id = cgx->cgx_id;
+ event.lmac_id = lmac->lmac_id;
+
+ /* update the local copy of link status */
+ lmac->link_info = event.link_uinfo;
+ linfo = &lmac->link_info;
+
+ if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
+ return;
+
+ /* Ensure callback doesn't get unregistered until we finish it */
+ spin_lock(&lmac->event_cb_lock);
+
+ if (!lmac->event_cb.notify_link_chg) {
+ dev_dbg(dev, "cgx port %d:%d Link change handler null",
+ cgx->cgx_id, lmac->lmac_id);
+ if (err_type != CGX_ERR_NONE) {
+ dev_err(dev, "cgx port %d:%d Link error %d\n",
+ cgx->cgx_id, lmac->lmac_id, err_type);
+ }
+ dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
+ cgx->cgx_id, lmac->lmac_id,
+ linfo->link_up ? "UP" : "DOWN", linfo->speed);
+ goto err;
+ }
+
+ if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
+ dev_err(dev, "event notification failure\n");
+err:
+ spin_unlock(&lmac->event_cb_lock);
+}
+
+static inline bool cgx_cmdresp_is_linkevent(u64 event)
+{
+ u8 id;
+
+ id = FIELD_GET(EVTREG_ID, event);
+ if (id == CGX_CMD_LINK_BRING_UP ||
+ id == CGX_CMD_LINK_BRING_DOWN ||
+ id == CGX_CMD_MODE_CHANGE)
+ return true;
+ else
+ return false;
+}
+
+static inline bool cgx_event_is_linkevent(u64 event)
+{
+ if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
+ return true;
+ else
+ return false;
+}
+
+static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
+{
+ u64 event, offset, clear_bit;
+ struct lmac *lmac = data;
+ struct cgx *cgx;
+
+ cgx = lmac->cgx;
+
+ /* Clear SW_INT for RPM and CMR_INT for CGX */
+ offset = cgx->mac_ops->int_register;
+ clear_bit = cgx->mac_ops->int_ena_bit;
+
+ event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
+
+ if (!FIELD_GET(EVTREG_ACK, event))
+ return IRQ_NONE;
+
+ switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
+ case CGX_EVT_CMD_RESP:
+ /* Copy the response. Since only one command is active at a
+ * time, there is no way a response can get overwritten
+ */
+ lmac->resp = event;
+ /* Ensure response is updated before thread context starts */
+ smp_wmb();
+
+ /* There wont be separate events for link change initiated from
+ * software; Hence report the command responses as events
+ */
+ if (cgx_cmdresp_is_linkevent(event))
+ cgx_link_change_handler(event, lmac);
+
+ /* Release thread waiting for completion */
+ lmac->cmd_pend = false;
+ wake_up_interruptible(&lmac->wq_cmd_cmplt);
+ break;
+ case CGX_EVT_ASYNC:
+ if (cgx_event_is_linkevent(event))
+ cgx_link_change_handler(event, lmac);
+ break;
+ }
+
+ /* Any new event or command response will be posted by firmware
+ * only after the current status is acked.
+ * Ack the interrupt register as well.
+ */
+ cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
+ cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
+
+ return IRQ_HANDLED;
+}
+
+/* APIs for PHY management using CGX firmware interface */
+
+/* callback registration for hardware events like link change */
+int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ struct lmac *lmac;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ lmac->event_cb = *cb;
+
+ return 0;
+}
+
+int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
+{
+ struct lmac *lmac;
+ unsigned long flags;
+ struct cgx *cgx = cgxd;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ spin_lock_irqsave(&lmac->event_cb_lock, flags);
+ lmac->event_cb.notify_link_chg = NULL;
+ lmac->event_cb.data = NULL;
+ spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
+
+ return 0;
+}
+
+int cgx_get_fwdata_base(u64 *base)
+{
+ u64 req = 0, resp;
+ struct cgx *cgx;
+ int first_lmac;
+ int err;
+
+ cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
+ if (!cgx)
+ return -ENXIO;
+
+ first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
+ if (!err)
+ *base = FIELD_GET(RESP_FWD_BASE, resp);
+
+ return err;
+}
+
+int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ int cgx_id, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ if (args.mode)
+ otx2_map_ethtool_link_modes(args.mode, &args);
+ if (!args.speed && args.duplex && !args.an)
+ return -EINVAL;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
+ req = FIELD_SET(CMDMODECHANGE_SPEED,
+ cgx_link_usertable_index_map(args.speed), req);
+ req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
+ req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
+ req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
+ req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
+
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
+{
+ u64 req = 0, resp;
+ struct cgx *cgx;
+ int err = 0;
+
+ cgx = cgx_get_pdata(cgx_id);
+ if (!cgx)
+ return -ENXIO;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
+ req = FIELD_SET(CMDSETFEC, fec, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+ if (err)
+ return err;
+
+ cgx->lmac_idmap[lmac_id]->link_info.fec =
+ FIELD_GET(RESP_LINKSTAT_FEC, resp);
+ return cgx->lmac_idmap[lmac_id]->link_info.fec;
+}
+
+int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
+static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
+{
+ u64 req = 0;
+ u64 resp;
+
+ if (enable) {
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
+ /* On CN10K firmware offloads link bring up/down operations to ECP
+ * On Octeontx2 link operations are handled by firmware itself
+ * which can cause mbox errors so configure maximum time firmware
+ * poll for Link as 1000 ms
+ */
+ if (!is_dev_rpm(cgx))
+ req = FIELD_SET(LINKCFG_TIMEOUT, 1000, req);
+
+ } else {
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
+ }
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
+static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
+{
+ int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
+ u64 req = 0;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
+ return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
+}
+
+static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
+{
+ struct device *dev = &cgx->pdev->dev;
+ int major_ver, minor_ver;
+ u64 resp;
+ int err;
+
+ if (!cgx->lmac_count)
+ return 0;
+
+ err = cgx_fwi_read_version(&resp, cgx);
+ if (err)
+ return err;
+
+ major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
+ minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
+ dev_dbg(dev, "Firmware command interface version = %d.%d\n",
+ major_ver, minor_ver);
+ if (major_ver != CGX_FIRMWARE_MAJOR_VER)
+ return -EIO;
+ else
+ return 0;
+}
+
+static void cgx_lmac_linkup_work(struct work_struct *work)
+{
+ struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
+ struct device *dev = &cgx->pdev->dev;
+ int i, err;
+
+ /* Do Link up for all the enabled lmacs */
+ for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
+ err = cgx_fwi_link_change(cgx, i, true);
+ if (err)
+ dev_info(dev, "cgx port %d:%d Link up command failed\n",
+ cgx->cgx_id, i);
+ }
+}
+
+int cgx_lmac_linkup_start(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -ENODEV;
+
+ queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
+
+ return 0;
+}
+
+static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
+ int cnt, bool req_free)
+{
+ struct mac_ops *mac_ops = cgx->mac_ops;
+ u64 offset, ena_bit;
+ unsigned int irq;
+ int err;
+
+ irq = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
+ cnt * mac_ops->irq_offset);
+ offset = mac_ops->int_set_reg;
+ ena_bit = mac_ops->int_ena_bit;
+
+ if (req_free) {
+ free_irq(irq, lmac);
+ return 0;
+ }
+
+ err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
+ if (err)
+ return err;
+
+ /* Enable interrupt */
+ cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
+ return 0;
+}
+
+int cgx_get_nr_lmacs(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
+}
+
+u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx->lmac_idmap[lmac_index]->lmac_id;
+}
+
+unsigned long cgx_get_lmac_bmap(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx->lmac_bmap;
+}
+
+static int cgx_lmac_init(struct cgx *cgx)
+{
+ struct lmac *lmac;
+ u64 lmac_list;
+ int i, err;
+
+ /* lmac_list specifies which lmacs are enabled
+ * when bit n is set to 1, LMAC[n] is enabled
+ */
+ if (cgx->mac_ops->non_contiguous_serdes_lane)
+ lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
+
+ if (cgx->lmac_count > cgx->max_lmac_per_mac)
+ cgx->lmac_count = cgx->max_lmac_per_mac;
+
+ for (i = 0; i < cgx->lmac_count; i++) {
+ lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
+ if (!lmac)
+ return -ENOMEM;
+ lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
+ if (!lmac->name) {
+ err = -ENOMEM;
+ goto err_lmac_free;
+ }
+ sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
+ if (cgx->mac_ops->non_contiguous_serdes_lane) {
+ lmac->lmac_id = __ffs64(lmac_list);
+ lmac_list &= ~BIT_ULL(lmac->lmac_id);
+ } else {
+ lmac->lmac_id = i;
+ }
+
+ lmac->cgx = cgx;
+ lmac->mac_to_index_bmap.max =
+ MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
+ err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
+ if (err)
+ goto err_name_free;
+
+ /* Reserve first entry for default MAC address */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
+ lmac->rx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap);
+ if (err)
+ goto err_dmac_bmap_free;
+
+ lmac->tx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap);
+ if (err)
+ goto err_rx_fc_bmap_free;
+
+ init_waitqueue_head(&lmac->wq_cmd_cmplt);
+ mutex_init(&lmac->cmd_lock);
+ spin_lock_init(&lmac->event_cb_lock);
+ err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
+ if (err)
+ goto err_bitmap_free;
+
+ /* Add reference */
+ cgx->lmac_idmap[lmac->lmac_id] = lmac;
+ set_bit(lmac->lmac_id, &cgx->lmac_bmap);
+ cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
+ }
+
+ return cgx_lmac_verify_fwi_version(cgx);
+
+err_bitmap_free:
+ rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
+err_rx_fc_bmap_free:
+ rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
+err_dmac_bmap_free:
+ rvu_free_bitmap(&lmac->mac_to_index_bmap);
+err_name_free:
+ kfree(lmac->name);
+err_lmac_free:
+ kfree(lmac);
+ return err;
+}
+
+static int cgx_lmac_exit(struct cgx *cgx)
+{
+ struct lmac *lmac;
+ int i;
+
+ if (cgx->cgx_cmd_workq) {
+ destroy_workqueue(cgx->cgx_cmd_workq);
+ cgx->cgx_cmd_workq = NULL;
+ }
+
+ /* Free all lmac related resources */
+ for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
+ lmac = cgx->lmac_idmap[i];
+ if (!lmac)
+ continue;
+ cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
+ cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
+ kfree(lmac->mac_to_index_bmap.bmap);
+ kfree(lmac->name);
+ kfree(lmac);
+ }
+
+ return 0;
+}
+
+static void cgx_populate_features(struct cgx *cgx)
+{
+ u64 cfg;
+
+ cfg = cgx_read(cgx, 0, CGX_CONST);
+ cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
+ cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
+
+ if (is_dev_rpm(cgx))
+ cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
+ RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
+ else
+ cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 |
+ RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
+}
+
+static struct mac_ops cgx_mac_ops = {
+ .name = "cgx",
+ .csr_offset = 0,
+ .lmac_offset = 18,
+ .int_register = CGXX_CMRX_INT,
+ .int_set_reg = CGXX_CMRX_INT_ENA_W1S,
+ .irq_offset = 9,
+ .int_ena_bit = FW_CGX_INT,
+ .lmac_fwi = CGX_LMAC_FWI,
+ .non_contiguous_serdes_lane = false,
+ .rx_stats_cnt = 9,
+ .tx_stats_cnt = 18,
+ .get_nr_lmacs = cgx_get_nr_lmacs,
+ .get_lmac_type = cgx_get_lmac_type,
+ .lmac_fifo_len = cgx_get_lmac_fifo_len,
+ .mac_lmac_intl_lbk = cgx_lmac_internal_loopback,
+ .mac_get_rx_stats = cgx_get_rx_stats,
+ .mac_get_tx_stats = cgx_get_tx_stats,
+ .mac_enadis_rx_pause_fwding = cgx_lmac_enadis_rx_pause_fwding,
+ .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status,
+ .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
+ .mac_pause_frm_config = cgx_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = cgx_lmac_ptp_config,
+ .mac_rx_tx_enable = cgx_lmac_rx_tx_enable,
+ .mac_tx_enable = cgx_lmac_tx_enable,
+ .pfc_config = cgx_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg,
+};
+
+static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct cgx *cgx;
+ int err, nvec;
+
+ cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
+ if (!cgx)
+ return -ENOMEM;
+ cgx->pdev = pdev;
+
+ pci_set_drvdata(pdev, cgx);
+
+ /* Use mac_ops to get MAC specific features */
+ if (pdev->device == PCI_DEVID_CN10K_RPM)
+ cgx->mac_ops = rpm_get_mac_ops();
+ else
+ cgx->mac_ops = &cgx_mac_ops;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!cgx->reg_base) {
+ dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
+ if (!cgx->lmac_count) {
+ dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
+ err = -EOPNOTSUPP;
+ goto err_release_regions;
+ }
+
+ nvec = pci_msix_vec_count(cgx->pdev);
+ err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+ if (err < 0 || err != nvec) {
+ dev_err(dev, "Request for %d msix vectors failed, err %d\n",
+ nvec, err);
+ goto err_release_regions;
+ }
+
+ cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+ & CGX_ID_MASK;
+
+ /* init wq for processing linkup requests */
+ INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
+ cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
+ if (!cgx->cgx_cmd_workq) {
+ dev_err(dev, "alloc workqueue failed for cgx cmd");
+ err = -ENOMEM;
+ goto err_free_irq_vectors;
+ }
+
+ list_add(&cgx->cgx_list, &cgx_list);
+
+
+ cgx_populate_features(cgx);
+
+ mutex_init(&cgx->lock);
+
+ err = cgx_lmac_init(cgx);
+ if (err)
+ goto err_release_lmac;
+
+ return 0;
+
+err_release_lmac:
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+err_free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void cgx_remove(struct pci_dev *pdev)
+{
+ struct cgx *cgx = pci_get_drvdata(pdev);
+
+ if (cgx) {
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+ }
+ pci_free_irq_vectors(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+struct pci_driver cgx_driver = {
+ .name = DRV_NAME,
+ .id_table = cgx_id_table,
+ .probe = cgx_probe,
+ .remove = cgx_remove,
+};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
new file mode 100644
index 000000000..09ddb00f6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef CGX_H
+#define CGX_H
+
+#include "mbox.h"
+#include "cgx_fw_if.h"
+#include "rpm.h"
+
+ /* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_CGX 0xA059
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM 0
+
+#define CGX_ID_MASK 0xF
+#define MAX_DMAC_ENTRIES_PER_CGX 32
+
+/* Registers */
+#define CGXX_CMRX_CFG 0x00
+#define CMR_P2X_SEL_MASK GENMASK_ULL(61, 59)
+#define CMR_P2X_SEL_SHIFT 59ULL
+#define CMR_P2X_SEL_NIX0 1ULL
+#define CMR_P2X_SEL_NIX1 2ULL
+#define DATA_PKT_TX_EN BIT_ULL(53)
+#define DATA_PKT_RX_EN BIT_ULL(54)
+#define CGX_LMAC_TYPE_SHIFT 40
+#define CGX_LMAC_TYPE_MASK 0xF
+#define CGXX_CMRX_INT 0x040
+#define FW_CGX_INT BIT_ULL(1)
+#define CGXX_CMRX_INT_ENA_W1S 0x058
+#define CGXX_CMRX_RX_ID_MAP 0x060
+#define CGXX_CMRX_RX_STAT0 0x070
+#define CGXX_CMRX_RX_LMACS 0x128
+#define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset)
+#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
+#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE_CAM BIT_ULL(2)
+#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
+#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
+#define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset)
+#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
+#define CGX_DMAC_CAM_ENTRY_LMACID GENMASK_ULL(50, 49)
+#define CGXX_CMRX_RX_DMAC_CAM1 0x400
+#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
+#define CGXX_CMRX_TX_STAT0 0x700
+#define CGXX_SCRATCH0_REG 0x1050
+#define CGXX_SCRATCH1_REG 0x1058
+#define CGX_CONST 0x2000
+#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(23, 0)
+#define CGX_CONST_MAX_LMACS GENMASK_ULL(31, 24)
+#define CGXX_SPUX_CONTROL1 0x10000
+#define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700
+#define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800
+#define CGXX_SPUX_RSFEC_CORR 0x10088
+#define CGXX_SPUX_RSFEC_UNCORR 0x10090
+
+#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
+#define CGXX_GMP_PCS_MRX_CTL 0x30000
+#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
+
+#define CGXX_SMUX_RX_FRM_CTL 0x20020
+#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGX_SMUX_RX_FRM_CTL_PTP_MODE BIT_ULL(12)
+#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
+#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE BIT_ULL(12)
+#define CGXX_SMUX_TX_CTL 0x20178
+#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
+#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
+#define CGXX_SMUX_SMAC 0x20108
+#define CGXX_SMUX_CBFC_CTL 0x20218
+#define CGXX_SMUX_CBFC_CTL_RX_EN BIT_ULL(0)
+#define CGXX_SMUX_CBFC_CTL_TX_EN BIT_ULL(1)
+#define CGXX_SMUX_CBFC_CTL_DRP_EN BIT_ULL(2)
+#define CGXX_SMUX_CBFC_CTL_BCK_EN BIT_ULL(3)
+#define CGX_PFC_CLASS_MASK GENMASK_ULL(47, 32)
+#define CGXX_GMP_GMI_TX_PAUSE_PKT_TIME 0x38230
+#define CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL 0x38248
+#define CGX_SMUX_TX_CTL_L2P_BP_CONV BIT_ULL(7)
+#define CGXX_CMR_RX_OVR_BP 0x130
+#define CGX_CMR_RX_OVR_BP_EN(X) BIT_ULL(((X) + 8))
+#define CGX_CMR_RX_OVR_BP_BP(X) BIT_ULL(((X) + 4))
+
+#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
+#define CGX_EVENT_REG CGXX_SCRATCH0_REG
+#define CGX_CMD_TIMEOUT 5000 /* msecs */
+#define DEFAULT_PAUSE_TIME 0x7FF
+
+#define CGX_LMAC_FWI 0
+
+enum cgx_nix_stat_type {
+ NIX_STATS_RX,
+ NIX_STATS_TX,
+};
+
+enum LMAC_TYPE {
+ LMAC_MODE_SGMII = 0,
+ LMAC_MODE_XAUI = 1,
+ LMAC_MODE_RXAUI = 2,
+ LMAC_MODE_10G_R = 3,
+ LMAC_MODE_40G_R = 4,
+ LMAC_MODE_QSGMII = 6,
+ LMAC_MODE_25G_R = 7,
+ LMAC_MODE_50G_R = 8,
+ LMAC_MODE_100G_R = 9,
+ LMAC_MODE_USXGMII = 10,
+ LMAC_MODE_MAX,
+};
+
+struct cgx_link_event {
+ struct cgx_link_user_info link_uinfo;
+ u8 cgx_id;
+ u8 lmac_id;
+};
+
+/**
+ * struct cgx_event_cb
+ * @notify_link_chg: callback for link change notification
+ * @data: data passed to callback function
+ */
+struct cgx_event_cb {
+ int (*notify_link_chg)(struct cgx_link_event *event, void *data);
+ void *data;
+};
+
+extern struct pci_driver cgx_driver;
+
+int cgx_get_cgxcnt_max(void);
+int cgx_get_cgxid(void *cgxd);
+int cgx_get_lmac_cnt(void *cgxd);
+void *cgx_get_pdata(int cgx_id);
+int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
+int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
+int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
+int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
+int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
+int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
+int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
+int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id);
+u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index);
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id);
+void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
+void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
+int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
+int cgx_get_link_info(void *cgxd, int lmac_id,
+ struct cgx_link_user_info *linfo);
+int cgx_lmac_linkup_start(void *cgxd);
+int cgx_get_fwdata_base(u64 *base);
+int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause);
+void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
+u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id);
+int cgx_set_fec(u64 fec, int cgx_id, int lmac_id);
+int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
+int cgx_get_phy_fec_stats(void *cgxd, int lmac_id);
+int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ int cgx_id, int lmac_id);
+u64 cgx_features_get(void *cgxd);
+struct mac_ops *get_mac_ops(void *cgxd);
+int cgx_get_nr_lmacs(void *cgxd);
+u8 cgx_get_lmacid(void *cgxd, u8 lmac_index);
+unsigned long cgx_get_lmac_bmap(void *cgxd);
+void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val);
+u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index);
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id);
+u64 cgx_read_dmac_entry(void *cgxd, int index);
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx);
+#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
new file mode 100644
index 000000000..d4a27c882
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef __CGX_FW_INTF_H__
+#define __CGX_FW_INTF_H__
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#define CGX_FIRMWARE_MAJOR_VER 1
+#define CGX_FIRMWARE_MINOR_VER 0
+
+#define CGX_EVENT_ACK 1UL
+
+/* CGX error types. set for cmd response status as CGX_STAT_FAIL */
+enum cgx_error_type {
+ CGX_ERR_NONE,
+ CGX_ERR_LMAC_NOT_ENABLED,
+ CGX_ERR_LMAC_MODE_INVALID,
+ CGX_ERR_REQUEST_ID_INVALID,
+ CGX_ERR_PREV_ACK_NOT_CLEAR,
+ CGX_ERR_PHY_LINK_DOWN,
+ CGX_ERR_PCS_RESET_FAIL,
+ CGX_ERR_AN_CPT_FAIL,
+ CGX_ERR_TX_NOT_IDLE,
+ CGX_ERR_RX_NOT_IDLE,
+ CGX_ERR_SPUX_BR_BLKLOCK_FAIL,
+ CGX_ERR_SPUX_RX_ALIGN_FAIL,
+ CGX_ERR_SPUX_TX_FAULT,
+ CGX_ERR_SPUX_RX_FAULT,
+ CGX_ERR_SPUX_RESET_FAIL,
+ CGX_ERR_SPUX_AN_RESET_FAIL,
+ CGX_ERR_SPUX_USX_AN_RESET_FAIL,
+ CGX_ERR_SMUX_RX_LINK_NOT_OK,
+ CGX_ERR_PCS_RECV_LINK_FAIL,
+ CGX_ERR_TRAINING_FAIL,
+ CGX_ERR_RX_EQU_FAIL,
+ CGX_ERR_SPUX_BER_FAIL,
+ CGX_ERR_SPUX_RSFEC_ALGN_FAIL,
+ CGX_ERR_SPUX_MARKER_LOCK_FAIL,
+ CGX_ERR_SET_FEC_INVALID,
+ CGX_ERR_SET_FEC_FAIL,
+ CGX_ERR_MODULE_INVALID,
+ CGX_ERR_MODULE_NOT_PRESENT,
+ CGX_ERR_SPEED_CHANGE_INVALID,
+};
+
+/* LINK speed types */
+enum cgx_link_speed {
+ CGX_LINK_NONE,
+ CGX_LINK_10M,
+ CGX_LINK_100M,
+ CGX_LINK_1G,
+ CGX_LINK_2HG,
+ CGX_LINK_5G,
+ CGX_LINK_10G,
+ CGX_LINK_20G,
+ CGX_LINK_25G,
+ CGX_LINK_40G,
+ CGX_LINK_50G,
+ CGX_LINK_80G,
+ CGX_LINK_100G,
+ CGX_LINK_SPEED_MAX,
+};
+
+enum CGX_MODE_ {
+ CGX_MODE_SGMII,
+ CGX_MODE_1000_BASEX,
+ CGX_MODE_QSGMII,
+ CGX_MODE_10G_C2C,
+ CGX_MODE_10G_C2M,
+ CGX_MODE_10G_KR,
+ CGX_MODE_20G_C2C,
+ CGX_MODE_25G_C2C,
+ CGX_MODE_25G_C2M,
+ CGX_MODE_25G_2_C2C,
+ CGX_MODE_25G_CR,
+ CGX_MODE_25G_KR,
+ CGX_MODE_40G_C2C,
+ CGX_MODE_40G_C2M,
+ CGX_MODE_40G_CR4,
+ CGX_MODE_40G_KR4,
+ CGX_MODE_40GAUI_C2C,
+ CGX_MODE_50G_C2C,
+ CGX_MODE_50G_C2M,
+ CGX_MODE_50G_4_C2C,
+ CGX_MODE_50G_CR,
+ CGX_MODE_50G_KR,
+ CGX_MODE_80GAUI_C2C,
+ CGX_MODE_100G_C2C,
+ CGX_MODE_100G_C2M,
+ CGX_MODE_100G_CR4,
+ CGX_MODE_100G_KR4,
+ CGX_MODE_MAX /* = 29 */
+};
+/* REQUEST ID types. Input to firmware */
+enum cgx_cmd_id {
+ CGX_CMD_NONE,
+ CGX_CMD_GET_FW_VER,
+ CGX_CMD_GET_MAC_ADDR,
+ CGX_CMD_SET_MTU,
+ CGX_CMD_GET_LINK_STS, /* optional to user */
+ CGX_CMD_LINK_BRING_UP,
+ CGX_CMD_LINK_BRING_DOWN,
+ CGX_CMD_INTERNAL_LBK,
+ CGX_CMD_EXTERNAL_LBK,
+ CGX_CMD_HIGIG,
+ CGX_CMD_LINK_STAT_CHANGE,
+ CGX_CMD_MODE_CHANGE, /* hot plug support */
+ CGX_CMD_INTF_SHUTDOWN,
+ CGX_CMD_GET_MKEX_PRFL_SIZE,
+ CGX_CMD_GET_MKEX_PRFL_ADDR,
+ CGX_CMD_GET_FWD_BASE, /* get base address of shared FW data */
+ CGX_CMD_GET_LINK_MODES, /* Supported Link Modes */
+ CGX_CMD_SET_LINK_MODE,
+ CGX_CMD_GET_SUPPORTED_FEC,
+ CGX_CMD_SET_FEC,
+ CGX_CMD_GET_AN,
+ CGX_CMD_SET_AN,
+ CGX_CMD_GET_ADV_LINK_MODES,
+ CGX_CMD_GET_ADV_FEC,
+ CGX_CMD_GET_PHY_MOD_TYPE, /* line-side modulation type: NRZ or PAM4 */
+ CGX_CMD_SET_PHY_MOD_TYPE,
+ CGX_CMD_PRBS,
+ CGX_CMD_DISPLAY_EYE,
+ CGX_CMD_GET_PHY_FEC_STATS,
+};
+
+/* async event ids */
+enum cgx_evt_id {
+ CGX_EVT_NONE,
+ CGX_EVT_LINK_CHANGE,
+};
+
+/* event types - cause of interrupt */
+enum cgx_evt_type {
+ CGX_EVT_ASYNC,
+ CGX_EVT_CMD_RESP
+};
+
+enum cgx_stat {
+ CGX_STAT_SUCCESS,
+ CGX_STAT_FAIL
+};
+
+enum cgx_cmd_own {
+ CGX_CMD_OWN_NS,
+ CGX_CMD_OWN_FIRMWARE,
+};
+
+/* m - bit mask
+ * y - value to be written in the bitrange
+ * x - input value whose bitrange to be modified
+ */
+#define FIELD_SET(m, y, x) \
+ (((x) & ~(m)) | \
+ FIELD_PREP((m), (y)))
+
+/* scratchx(0) CSR used for ATF->non-secure SW communication.
+ * This acts as the status register
+ * Provides details on command ack/status, command response, error details
+ */
+#define EVTREG_ACK BIT_ULL(0)
+#define EVTREG_EVT_TYPE BIT_ULL(1)
+#define EVTREG_STAT BIT_ULL(2)
+#define EVTREG_ID GENMASK_ULL(8, 3)
+
+/* Response to command IDs with command status as CGX_STAT_FAIL
+ *
+ * Not applicable for commands :
+ * CGX_CMD_LINK_BRING_UP/DOWN/CGX_EVT_LINK_CHANGE
+ */
+#define EVTREG_ERRTYPE GENMASK_ULL(18, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_FW_VER with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MAJOR_VER GENMASK_ULL(12, 9)
+#define RESP_MINOR_VER GENMASK_ULL(16, 13)
+
+/* Response to cmd ID as CGX_CMD_GET_MAC_ADDR with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MAC_ADDR GENMASK_ULL(56, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_SIZE with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MKEX_PRFL_SIZE GENMASK_ULL(63, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_ADDR with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MKEX_PRFL_ADDR GENMASK_ULL(63, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_FWD_BASE with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_FWD_BASE GENMASK_ULL(56, 9)
+#define RESP_LINKSTAT_LMAC_TYPE GENMASK_ULL(35, 28)
+
+/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
+ * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
+ *
+ * In case of CGX_STAT_FAIL, it indicates CGX configuration failed
+ * when processing link up/down/change command.
+ * Both err_type and current link status will be updated
+ *
+ * In case of CGX_STAT_SUCCESS, err_type will be CGX_ERR_NONE and current
+ * link status will be updated
+ */
+struct cgx_lnk_sts {
+ uint64_t reserved1:9;
+ uint64_t link_up:1;
+ uint64_t full_duplex:1;
+ uint64_t speed:4; /* cgx_link_speed */
+ uint64_t err_type:10;
+ uint64_t an:1; /* AN supported or not */
+ uint64_t fec:2; /* FEC type if enabled, if not 0 */
+ uint64_t port:8;
+ uint64_t reserved2:28;
+};
+
+#define RESP_LINKSTAT_UP GENMASK_ULL(9, 9)
+#define RESP_LINKSTAT_FDUPLEX GENMASK_ULL(10, 10)
+#define RESP_LINKSTAT_SPEED GENMASK_ULL(14, 11)
+#define RESP_LINKSTAT_ERRTYPE GENMASK_ULL(24, 15)
+#define RESP_LINKSTAT_AN GENMASK_ULL(25, 25)
+#define RESP_LINKSTAT_FEC GENMASK_ULL(27, 26)
+#define RESP_LINKSTAT_PORT GENMASK_ULL(35, 28)
+
+/* scratchx(1) CSR used for non-secure SW->ATF communication
+ * This CSR acts as a command register
+ */
+#define CMDREG_OWN BIT_ULL(0)
+#define CMDREG_ID GENMASK_ULL(7, 2)
+
+/* Any command using enable/disable as an argument need
+ * to set this bitfield.
+ * Ex: Loopback, HiGig...
+ */
+#define CMDREG_ENABLE BIT_ULL(8)
+
+/* command argument to be passed for cmd ID - CGX_CMD_SET_MTU */
+#define CMDMTU_SIZE GENMASK_ULL(23, 8)
+
+/* command argument to be passed for cmd ID - CGX_CMD_LINK_CHANGE */
+#define CMDLINKCHANGE_LINKUP BIT_ULL(8)
+#define CMDLINKCHANGE_FULLDPLX BIT_ULL(9)
+#define CMDLINKCHANGE_SPEED GENMASK_ULL(13, 10)
+
+#define CMDSETFEC GENMASK_ULL(9, 8)
+/* command argument to be passed for cmd ID - CGX_CMD_MODE_CHANGE */
+#define CMDMODECHANGE_SPEED GENMASK_ULL(11, 8)
+#define CMDMODECHANGE_DUPLEX GENMASK_ULL(12, 12)
+#define CMDMODECHANGE_AN GENMASK_ULL(13, 13)
+#define CMDMODECHANGE_PORT GENMASK_ULL(21, 14)
+#define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22)
+
+/* LINK_BRING_UP command timeout */
+#define LINKCFG_TIMEOUT GENMASK_ULL(21, 8)
+#endif /* __CGX_FW_INTF_H__ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
new file mode 100644
index 000000000..8931864ee
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ */
+
+#ifndef COMMON_H
+#define COMMON_H
+
+#include "rvu_struct.h"
+
+#define OTX2_ALIGN 128 /* Align to cacheline */
+
+#define Q_SIZE_16 0ULL /* 16 entries */
+#define Q_SIZE_64 1ULL /* 64 entries */
+#define Q_SIZE_256 2ULL
+#define Q_SIZE_1K 3ULL
+#define Q_SIZE_4K 4ULL
+#define Q_SIZE_16K 5ULL
+#define Q_SIZE_64K 6ULL
+#define Q_SIZE_256K 7ULL
+#define Q_SIZE_1M 8ULL /* Million entries */
+#define Q_SIZE_MIN Q_SIZE_16
+#define Q_SIZE_MAX Q_SIZE_1M
+
+#define Q_COUNT(x) (16ULL << (2 * x))
+#define Q_SIZE(x, n) ((ilog2(x) - (n)) / 2)
+
+/* Admin queue info */
+
+/* Since we intend to add only one instruction at a time,
+ * keep queue size to it's minimum.
+ */
+#define AQ_SIZE Q_SIZE_16
+/* HW head & tail pointer mask */
+#define AQ_PTR_MASK 0xFFFFF
+
+struct qmem {
+ void *base;
+ dma_addr_t iova;
+ int alloc_sz;
+ u16 entry_sz;
+ u8 align;
+ u32 qsize;
+};
+
+static inline int qmem_alloc(struct device *dev, struct qmem **q,
+ int qsize, int entry_sz)
+{
+ struct qmem *qmem;
+ int aligned_addr;
+
+ if (!qsize)
+ return -EINVAL;
+
+ *q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL);
+ if (!*q)
+ return -ENOMEM;
+ qmem = *q;
+
+ qmem->entry_sz = entry_sz;
+ qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
+ qmem->base = dma_alloc_attrs(dev, qmem->alloc_sz, &qmem->iova,
+ GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
+ if (!qmem->base)
+ return -ENOMEM;
+
+ qmem->qsize = qsize;
+
+ aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN);
+ qmem->align = (aligned_addr - qmem->iova);
+ qmem->base += qmem->align;
+ qmem->iova += qmem->align;
+ return 0;
+}
+
+static inline void qmem_free(struct device *dev, struct qmem *qmem)
+{
+ if (!qmem)
+ return;
+
+ if (qmem->base)
+ dma_free_attrs(dev, qmem->alloc_sz,
+ qmem->base - qmem->align,
+ qmem->iova - qmem->align,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+ devm_kfree(dev, qmem);
+}
+
+struct admin_queue {
+ struct qmem *inst;
+ struct qmem *res;
+ spinlock_t lock; /* Serialize inst enqueue from PFs */
+};
+
+/* NPA aura count */
+enum npa_aura_sz {
+ NPA_AURA_SZ_0,
+ NPA_AURA_SZ_128,
+ NPA_AURA_SZ_256,
+ NPA_AURA_SZ_512,
+ NPA_AURA_SZ_1K,
+ NPA_AURA_SZ_2K,
+ NPA_AURA_SZ_4K,
+ NPA_AURA_SZ_8K,
+ NPA_AURA_SZ_16K,
+ NPA_AURA_SZ_32K,
+ NPA_AURA_SZ_64K,
+ NPA_AURA_SZ_128K,
+ NPA_AURA_SZ_256K,
+ NPA_AURA_SZ_512K,
+ NPA_AURA_SZ_1M,
+ NPA_AURA_SZ_MAX,
+};
+
+#define NPA_AURA_COUNT(x) (1ULL << ((x) + 6))
+
+/* NPA AQ result structure for init/read/write of aura HW contexts */
+struct npa_aq_aura_res {
+ struct npa_aq_res_s res;
+ struct npa_aura_s aura_ctx;
+ struct npa_aura_s ctx_mask;
+};
+
+/* NPA AQ result structure for init/read/write of pool HW contexts */
+struct npa_aq_pool_res {
+ struct npa_aq_res_s res;
+ struct npa_pool_s pool_ctx;
+ struct npa_pool_s ctx_mask;
+};
+
+/* NIX Transmit schedulers */
+enum nix_scheduler {
+ NIX_TXSCH_LVL_SMQ = 0x0,
+ NIX_TXSCH_LVL_MDQ = 0x0,
+ NIX_TXSCH_LVL_TL4 = 0x1,
+ NIX_TXSCH_LVL_TL3 = 0x2,
+ NIX_TXSCH_LVL_TL2 = 0x3,
+ NIX_TXSCH_LVL_TL1 = 0x4,
+ NIX_TXSCH_LVL_CNT = 0x5,
+};
+
+#define TXSCH_RR_QTM_MAX ((1 << 24) - 1)
+#define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX
+#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
+#define CN10K_MAX_DWRR_WEIGHT 16384 /* Weight is 14bit on CN10K */
+
+/* Min/Max packet sizes, excluding FCS */
+#define NIC_HW_MIN_FRS 40
+#define NIC_HW_MAX_FRS 9212
+#define SDP_HW_MAX_FRS 65535
+#define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */
+#define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */
+
+/* NIX RX action operation*/
+#define NIX_RX_ACTIONOP_DROP (0x0ull)
+#define NIX_RX_ACTIONOP_UCAST (0x1ull)
+#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull)
+#define NIX_RX_ACTIONOP_MCAST (0x3ull)
+#define NIX_RX_ACTIONOP_RSS (0x4ull)
+/* Use the RX action set in the default unicast entry */
+#define NIX_RX_ACTION_DEFAULT (0xfull)
+
+/* NIX TX action operation*/
+#define NIX_TX_ACTIONOP_DROP (0x0ull)
+#define NIX_TX_ACTIONOP_UCAST_DEFAULT (0x1ull)
+#define NIX_TX_ACTIONOP_UCAST_CHAN (0x2ull)
+#define NIX_TX_ACTIONOP_MCAST (0x3ull)
+#define NIX_TX_ACTIONOP_DROP_VIOL (0x5ull)
+
+#define NPC_MCAM_KEY_X1 0
+#define NPC_MCAM_KEY_X2 1
+#define NPC_MCAM_KEY_X4 2
+
+#define NIX_INTFX_RX(a) (0x0ull | (a) << 1)
+#define NIX_INTFX_TX(a) (0x1ull | (a) << 1)
+
+/* Default interfaces are NIX0_RX and NIX0_TX */
+#define NIX_INTF_RX NIX_INTFX_RX(0)
+#define NIX_INTF_TX NIX_INTFX_TX(0)
+
+#define NIX_INTF_TYPE_CGX 0
+#define NIX_INTF_TYPE_LBK 1
+#define NIX_INTF_TYPE_SDP 2
+
+#define MAX_LMAC_PKIND 12
+#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
+#define NIX_LINK_LBK(a) (12 + (a))
+#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
+#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
+#define NIX_CHAN_SDP_CH_START (0x700ull)
+#define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a))
+#define NIX_CHAN_SDP_NUM_CHANS 256
+#define NIX_CHAN_CPT_CH_START (0x800ull)
+
+/* The mask is to extract lower 10-bits of channel number
+ * which CPT will pass to X2P.
+ */
+#define NIX_CHAN_CPT_X2P_MASK (0x3ffull)
+
+/* NIX LSO format indices.
+ * As of now TSO is the only one using, so statically assigning indices.
+ */
+#define NIX_LSO_FORMAT_IDX_TSOV4 0
+#define NIX_LSO_FORMAT_IDX_TSOV6 1
+
+/* RSS info */
+#define MAX_RSS_GROUPS 8
+/* Group 0 has to be used in default pkt forwarding MCAM entries
+ * reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple
+ * filters.
+ */
+#define DEFAULT_RSS_CONTEXT_GROUP 0
+#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */
+
+/* NDC info */
+enum ndc_idx_e {
+ NIX0_RX = 0x0,
+ NIX0_TX = 0x1,
+ NPA0_U = 0x2,
+ NIX1_RX = 0x4,
+ NIX1_TX = 0x5,
+};
+
+enum ndc_ctype_e {
+ CACHING = 0x0,
+ BYPASS = 0x1,
+};
+
+#define NDC_MAX_PORT 6
+#define NDC_READ_TRANS 0
+#define NDC_WRITE_TRANS 1
+
+#endif /* COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
new file mode 100644
index 000000000..697cfec74
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CN10K RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef LMAC_COMMON_H
+#define LMAC_COMMON_H
+
+#include "rvu.h"
+#include "cgx.h"
+/**
+ * struct lmac - per lmac locks and properties
+ * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
+ * @cmd_lock: Lock to serialize the command interface
+ * @resp: command response
+ * @link_info: link related information
+ * @mac_to_index_bmap: Mac address to CGX table index mapping
+ * @rx_fc_pfvf_bmap: Receive flow control enabled netdev mapping
+ * @tx_fc_pfvf_bmap: Transmit flow control enabled netdev mapping
+ * @event_cb: callback for linkchange events
+ * @event_cb_lock: lock for serializing callback with unregister
+ * @cgx: parent cgx port
+ * @mcast_filters_count: Number of multicast filters installed
+ * @lmac_id: lmac port id
+ * @cmd_pend: flag set before new command is started
+ * flag cleared after command response is received
+ * @name: lmac port name
+ */
+struct lmac {
+ wait_queue_head_t wq_cmd_cmplt;
+ /* Lock to serialize the command interface */
+ struct mutex cmd_lock;
+ u64 resp;
+ struct cgx_link_user_info link_info;
+ struct rsrc_bmap mac_to_index_bmap;
+ struct rsrc_bmap rx_fc_pfvf_bmap;
+ struct rsrc_bmap tx_fc_pfvf_bmap;
+ struct cgx_event_cb event_cb;
+ /* lock for serializing callback with unregister */
+ spinlock_t event_cb_lock;
+ struct cgx *cgx;
+ u8 mcast_filters_count;
+ u8 lmac_id;
+ bool cmd_pend;
+ char *name;
+};
+
+/* CGX & RPM has different feature set
+ * update the structure fields with different one
+ */
+struct mac_ops {
+ char *name;
+ /* Features like RXSTAT, TXSTAT, DMAC FILTER csrs differs by fixed
+ * bar offset for example
+ * CGX DMAC_CTL0 0x1f8
+ * RPM DMAC_CTL0 0x4ff8
+ */
+ u64 csr_offset;
+ /* For ATF to send events to kernel, there is no dedicated interrupt
+ * defined hence CGX uses OVERFLOW bit in CMR_INT. RPM block supports
+ * SW_INT so that ATF triggers this interrupt after processing of
+ * requested command
+ */
+ u64 int_register;
+ u64 int_set_reg;
+ /* lmac offset is different is RPM */
+ u8 lmac_offset;
+ u8 irq_offset;
+ u8 int_ena_bit;
+ u8 lmac_fwi;
+ u32 fifo_len;
+ bool non_contiguous_serdes_lane;
+ /* RPM & CGX differs in number of Receive/transmit stats */
+ u8 rx_stats_cnt;
+ u8 tx_stats_cnt;
+ /* Incase of RPM get number of lmacs from RPMX_CMR_RX_LMACS[LMAC_EXIST]
+ * number of setbits in lmac_exist tells number of lmacs
+ */
+ int (*get_nr_lmacs)(void *cgx);
+ u8 (*get_lmac_type)(void *cgx, int lmac_id);
+ u32 (*lmac_fifo_len)(void *cgx, int lmac_id);
+ int (*mac_lmac_intl_lbk)(void *cgx, int lmac_id,
+ bool enable);
+ /* Register Stats related functions */
+ int (*mac_get_rx_stats)(void *cgx, int lmac_id,
+ int idx, u64 *rx_stat);
+ int (*mac_get_tx_stats)(void *cgx, int lmac_id,
+ int idx, u64 *tx_stat);
+
+ /* Enable LMAC Pause Frame Configuration */
+ void (*mac_enadis_rx_pause_fwding)(void *cgxd,
+ int lmac_id,
+ bool enable);
+
+ int (*mac_get_pause_frm_status)(void *cgxd,
+ int lmac_id,
+ u8 *tx_pause,
+ u8 *rx_pause);
+
+ int (*mac_enadis_pause_frm)(void *cgxd,
+ int lmac_id,
+ u8 tx_pause,
+ u8 rx_pause);
+
+ void (*mac_pause_frm_config)(void *cgxd,
+ int lmac_id,
+ bool enable);
+
+ /* Enable/Disable Inbound PTP */
+ void (*mac_enadis_ptp_config)(void *cgxd,
+ int lmac_id,
+ bool enable);
+
+ int (*mac_rx_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*mac_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*pfc_config)(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause, u16 pfc_en);
+
+ int (*mac_get_pfc_frm_cfg)(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+
+};
+
+struct cgx {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ u8 cgx_id;
+ u8 lmac_count;
+ /* number of LMACs per MAC could be 4 or 8 */
+ u8 max_lmac_per_mac;
+#define MAX_LMAC_COUNT 8
+ struct lmac *lmac_idmap[MAX_LMAC_COUNT];
+ struct work_struct cgx_cmd_work;
+ struct workqueue_struct *cgx_cmd_workq;
+ struct list_head cgx_list;
+ u64 hw_features;
+ struct mac_ops *mac_ops;
+ unsigned long lmac_bmap; /* bitmap of enabled lmacs */
+ /* Lock to serialize read/write of global csrs like
+ * RPMX_MTI_STAT_DATA_HI_CDC etc
+ */
+ struct mutex lock;
+};
+
+typedef struct cgx rpm_t;
+
+/* Function Declarations */
+void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val);
+u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset);
+struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx);
+int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac);
+int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id);
+bool is_lmac_valid(struct cgx *cgx, int lmac_id);
+struct mac_ops *rpm_get_mac_ops(void);
+
+#endif /* LMAC_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
new file mode 100644
index 000000000..9690ac01f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
+#include "rvu_reg.h"
+#include "mbox.h"
+#include "rvu_trace.h"
+
+static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr, *rx_hdr;
+ void *hw_mbase = mdev->hwbase;
+
+ tx_hdr = hw_mbase + mbox->tx_start;
+ rx_hdr = hw_mbase + mbox->rx_start;
+
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ tx_hdr->num_msgs = 0;
+ tx_hdr->msg_size = 0;
+ rx_hdr->num_msgs = 0;
+ rx_hdr->msg_size = 0;
+}
+EXPORT_SYMBOL(__otx2_mbox_reset);
+
+void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+
+ spin_lock(&mdev->mbox_lock);
+ __otx2_mbox_reset(mbox, devid);
+ spin_unlock(&mdev->mbox_lock);
+}
+EXPORT_SYMBOL(otx2_mbox_reset);
+
+void otx2_mbox_destroy(struct otx2_mbox *mbox)
+{
+ mbox->reg_base = NULL;
+ mbox->hwbase = NULL;
+
+ kfree(mbox->dev);
+ mbox->dev = NULL;
+}
+EXPORT_SYMBOL(otx2_mbox_destroy);
+
+static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
+{
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_PFVF:
+ mbox->tx_start = MBOX_DOWN_TX_START;
+ mbox->rx_start = MBOX_DOWN_RX_START;
+ mbox->tx_size = MBOX_DOWN_TX_SIZE;
+ mbox->rx_size = MBOX_DOWN_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_VFPF:
+ mbox->tx_start = MBOX_DOWN_RX_START;
+ mbox->rx_start = MBOX_DOWN_TX_START;
+ mbox->tx_size = MBOX_DOWN_RX_SIZE;
+ mbox->rx_size = MBOX_DOWN_TX_SIZE;
+ break;
+ case MBOX_DIR_AFPF_UP:
+ case MBOX_DIR_PFVF_UP:
+ mbox->tx_start = MBOX_UP_TX_START;
+ mbox->rx_start = MBOX_UP_RX_START;
+ mbox->tx_size = MBOX_UP_TX_SIZE;
+ mbox->rx_size = MBOX_UP_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF_UP:
+ case MBOX_DIR_VFPF_UP:
+ mbox->tx_start = MBOX_UP_RX_START;
+ mbox->rx_start = MBOX_UP_TX_START;
+ mbox->tx_size = MBOX_UP_RX_SIZE;
+ mbox->rx_size = MBOX_UP_TX_SIZE;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_AFPF_UP:
+ mbox->trigger = RVU_AF_AFPF_MBOX0;
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_PFAF_UP:
+ mbox->trigger = RVU_PF_PFAF_MBOX1;
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_PFVF:
+ case MBOX_DIR_PFVF_UP:
+ mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
+ mbox->tr_shift = 12;
+ break;
+ case MBOX_DIR_VFPF:
+ case MBOX_DIR_VFPF_UP:
+ mbox->trigger = RVU_VF_VFPF_MBOX1;
+ mbox->tr_shift = 0;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ mbox->reg_base = reg_base;
+ mbox->pdev = pdev;
+
+ mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
+ if (!mbox->dev) {
+ otx2_mbox_destroy(mbox);
+ return -ENOMEM;
+ }
+ mbox->ndevs = ndevs;
+
+ return 0;
+}
+
+int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid, err;
+
+ err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
+ if (err)
+ return err;
+
+ mbox->hwbase = hwbase;
+
+ for (devid = 0; devid < ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ mdev->hwbase = mdev->mbase;
+ spin_lock_init(&mdev->mbox_lock);
+ /* Init header to reset value */
+ otx2_mbox_reset(mbox, devid);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_init);
+
+/* Initialize mailbox with the set of mailbox region addresses
+ * in the array hwbase.
+ */
+int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
+ struct pci_dev *pdev, void *reg_base,
+ int direction, int ndevs, unsigned long *pf_bmap)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid, err;
+
+ err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
+ if (err)
+ return err;
+
+ mbox->hwbase = hwbase[0];
+
+ for (devid = 0; devid < ndevs; devid++) {
+ if (!test_bit(devid, pf_bmap))
+ continue;
+
+ mdev = &mbox->dev[devid];
+ mdev->mbase = hwbase[devid];
+ mdev->hwbase = hwbase[devid];
+ spin_lock_init(&mdev->mbox_lock);
+ /* Init header to reset value */
+ otx2_mbox_reset(mbox, devid);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_regions_init);
+
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct device *sender = &mbox->pdev->dev;
+
+ while (!time_after(jiffies, timeout)) {
+ if (mdev->num_msgs == mdev->msgs_acked)
+ return 0;
+ usleep_range(800, 1000);
+ }
+ dev_dbg(sender, "timed out while waiting for rsp\n");
+ return -EIO;
+}
+EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
+
+int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ unsigned long timeout = jiffies + 1 * HZ;
+
+ while (!time_after(jiffies, timeout)) {
+ if (mdev->num_msgs == mdev->msgs_acked)
+ return 0;
+ cpu_relax();
+ }
+ return -EIO;
+}
+EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
+
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr, *rx_hdr;
+ void *hw_mbase = mdev->hwbase;
+
+ tx_hdr = hw_mbase + mbox->tx_start;
+ rx_hdr = hw_mbase + mbox->rx_start;
+
+ /* If bounce buffer is implemented copy mbox messages from
+ * bounce buffer to hw mbox memory.
+ */
+ if (mdev->mbase != hw_mbase)
+ memcpy(hw_mbase + mbox->tx_start + msgs_offset,
+ mdev->mbase + mbox->tx_start + msgs_offset,
+ mdev->msg_size);
+
+ spin_lock(&mdev->mbox_lock);
+
+ tx_hdr->msg_size = mdev->msg_size;
+
+ /* Reset header for next messages */
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ mdev->msgs_acked = 0;
+
+ /* Sync mbox data into memory */
+ smp_wmb();
+
+ /* num_msgs != 0 signals to the peer that the buffer has a number of
+ * messages. So this should be written after writing all the messages
+ * to the shared memory.
+ */
+ tx_hdr->num_msgs = mdev->num_msgs;
+ rx_hdr->num_msgs = 0;
+
+ trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
+
+ spin_unlock(&mdev->mbox_lock);
+
+ /* The interrupt should be fired after num_msgs is written
+ * to the shared memory
+ */
+ writeq(1, (void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+}
+EXPORT_SYMBOL(otx2_mbox_msg_send);
+
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ int size, int size_rsp)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_msghdr *msghdr = NULL;
+
+ spin_lock(&mdev->mbox_lock);
+ size = ALIGN(size, MBOX_MSG_ALIGN);
+ size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
+ /* Check if there is space in mailbox */
+ if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
+ goto exit;
+ if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
+ goto exit;
+
+ if (mdev->msg_size == 0)
+ mdev->num_msgs = 0;
+ mdev->num_msgs++;
+
+ msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
+
+ /* Clear the whole msg region */
+ memset(msghdr, 0, size);
+ /* Init message header with reset values */
+ msghdr->ver = OTX2_MBOX_VERSION;
+ mdev->msg_size += size;
+ mdev->rsp_size += size_rsp;
+ msghdr->next_msgoff = mdev->msg_size + msgs_offset;
+exit:
+ spin_unlock(&mdev->mbox_lock);
+
+ return msghdr;
+}
+EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
+
+struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *msg)
+{
+ unsigned long imsg = mbox->tx_start + msgs_offset;
+ unsigned long irsp = mbox->rx_start + msgs_offset;
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ u16 msgs;
+
+ spin_lock(&mdev->mbox_lock);
+
+ if (mdev->num_msgs != mdev->msgs_acked)
+ goto error;
+
+ for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
+ struct mbox_msghdr *pmsg = mdev->mbase + imsg;
+ struct mbox_msghdr *prsp = mdev->mbase + irsp;
+
+ if (msg == pmsg) {
+ if (pmsg->id != prsp->id)
+ goto error;
+ spin_unlock(&mdev->mbox_lock);
+ return prsp;
+ }
+
+ imsg = mbox->tx_start + pmsg->next_msgoff;
+ irsp = mbox->rx_start + prsp->next_msgoff;
+ }
+
+error:
+ spin_unlock(&mdev->mbox_lock);
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL(otx2_mbox_get_rsp);
+
+int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
+{
+ unsigned long ireq = mbox->tx_start + msgs_offset;
+ unsigned long irsp = mbox->rx_start + msgs_offset;
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ int rc = -ENODEV;
+ u16 msgs;
+
+ spin_lock(&mdev->mbox_lock);
+
+ if (mdev->num_msgs != mdev->msgs_acked)
+ goto exit;
+
+ for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
+ struct mbox_msghdr *preq = mdev->mbase + ireq;
+ struct mbox_msghdr *prsp = mdev->mbase + irsp;
+
+ if (preq->id != prsp->id) {
+ trace_otx2_msg_check(mbox->pdev, preq->id,
+ prsp->id, prsp->rc);
+ goto exit;
+ }
+ if (prsp->rc) {
+ rc = prsp->rc;
+ trace_otx2_msg_check(mbox->pdev, preq->id,
+ prsp->id, prsp->rc);
+ goto exit;
+ }
+
+ ireq = mbox->tx_start + preq->next_msgoff;
+ irsp = mbox->rx_start + prsp->next_msgoff;
+ }
+ rc = 0;
+exit:
+ spin_unlock(&mdev->mbox_lock);
+ return rc;
+}
+EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
+
+int
+otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
+{
+ struct msg_rsp *rsp;
+
+ rsp = (struct msg_rsp *)
+ otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+ rsp->hdr.id = id;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.rc = MBOX_MSG_INVALID;
+ rsp->hdr.pcifunc = pcifunc;
+ return 0;
+}
+EXPORT_SYMBOL(otx2_reply_invalid_msg);
+
+bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ bool ret;
+
+ spin_lock(&mdev->mbox_lock);
+ ret = mdev->num_msgs != 0;
+ spin_unlock(&mdev->mbox_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(otx2_mbox_nonempty);
+
+const char *otx2_mbox_id2name(u16 id)
+{
+ switch (id) {
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
+ MBOX_MESSAGES
+#undef M
+ default:
+ return "INVALID ID";
+ }
+}
+EXPORT_SYMBOL(otx2_mbox_id2name);
+
+MODULE_AUTHOR("Marvell.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
new file mode 100644
index 000000000..03ebabd61
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -0,0 +1,2153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef MBOX_H
+#define MBOX_H
+
+#include <linux/etherdevice.h>
+#include <linux/sizes.h>
+
+#include "rvu_struct.h"
+#include "common.h"
+
+#define MBOX_SIZE SZ_64K
+
+/* AF/PF: PF initiated, PF/VF VF initiated */
+#define MBOX_DOWN_RX_START 0
+#define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
+#define MBOX_DOWN_TX_START (MBOX_DOWN_RX_START + MBOX_DOWN_RX_SIZE)
+#define MBOX_DOWN_TX_SIZE (16 * SZ_1K)
+/* AF/PF: AF initiated, PF/VF PF initiated */
+#define MBOX_UP_RX_START (MBOX_DOWN_TX_START + MBOX_DOWN_TX_SIZE)
+#define MBOX_UP_RX_SIZE SZ_1K
+#define MBOX_UP_TX_START (MBOX_UP_RX_START + MBOX_UP_RX_SIZE)
+#define MBOX_UP_TX_SIZE SZ_1K
+
+#if MBOX_UP_TX_SIZE + MBOX_UP_TX_START != MBOX_SIZE
+# error "incorrect mailbox area sizes"
+#endif
+
+#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
+
+#define MBOX_RSP_TIMEOUT 6000 /* Time(ms) to wait for mbox response */
+
+#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
+
+/* Mailbox directions */
+#define MBOX_DIR_AFPF 0 /* AF replies to PF */
+#define MBOX_DIR_PFAF 1 /* PF sends messages to AF */
+#define MBOX_DIR_PFVF 2 /* PF replies to VF */
+#define MBOX_DIR_VFPF 3 /* VF sends messages to PF */
+#define MBOX_DIR_AFPF_UP 4 /* AF sends messages to PF */
+#define MBOX_DIR_PFAF_UP 5 /* PF replies to AF */
+#define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */
+#define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */
+
+struct otx2_mbox_dev {
+ void *mbase; /* This dev's mbox region */
+ void *hwbase;
+ spinlock_t mbox_lock;
+ u16 msg_size; /* Total msg size to be sent */
+ u16 rsp_size; /* Total rsp size to be sure the reply is ok */
+ u16 num_msgs; /* No of msgs sent or waiting for response */
+ u16 msgs_acked; /* No of msgs for which response is received */
+};
+
+struct otx2_mbox {
+ struct pci_dev *pdev;
+ void *hwbase; /* Mbox region advertised by HW */
+ void *reg_base;/* CSR base for this dev */
+ u64 trigger; /* Trigger mbox notification */
+ u16 tr_shift; /* Mbox trigger shift */
+ u64 rx_start; /* Offset of Rx region in mbox memory */
+ u64 tx_start; /* Offset of Tx region in mbox memory */
+ u16 rx_size; /* Size of Rx region */
+ u16 tx_size; /* Size of Tx region */
+ u16 ndevs; /* The number of peers */
+ struct otx2_mbox_dev *dev;
+};
+
+/* Header which precedes all mbox messages */
+struct mbox_hdr {
+ u64 msg_size; /* Total msgs size embedded */
+ u16 num_msgs; /* No of msgs embedded */
+};
+
+/* Header which precedes every msg and is also part of it */
+struct mbox_msghdr {
+ u16 pcifunc; /* Who's sending this msg */
+ u16 id; /* Mbox message ID */
+#define OTX2_MBOX_REQ_SIG (0xdead)
+#define OTX2_MBOX_RSP_SIG (0xbeef)
+ u16 sig; /* Signature, for validating corrupted msgs */
+#define OTX2_MBOX_VERSION (0x000a)
+ u16 ver; /* Version of msg's structure for this ID */
+ u16 next_msgoff; /* Offset of next msg within mailbox region */
+ int rc; /* Msg process'ed response code */
+};
+
+void otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
+void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
+void otx2_mbox_destroy(struct otx2_mbox *mbox);
+int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
+ struct pci_dev *pdev, void __force *reg_base,
+ int direction, int ndevs);
+
+int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
+ struct pci_dev *pdev, void __force *reg_base,
+ int direction, int ndevs, unsigned long *bmap);
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ int size, int size_rsp);
+struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *msg);
+int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid);
+int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid,
+ u16 pcifunc, u16 id);
+bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid);
+const char *otx2_mbox_id2name(u16 id);
+static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
+ int devid, int size)
+{
+ return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
+}
+
+/* Mailbox message types */
+#define MBOX_MSG_MASK 0xFFFF
+#define MBOX_MSG_INVALID 0xFFFE
+#define MBOX_MSG_MAX 0xFFFF
+
+#define MBOX_MESSAGES \
+/* Generic mbox IDs (range 0x000 - 0x1FF) */ \
+M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
+M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
+M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
+M(FREE_RSRC_CNT, 0x004, free_rsrc_cnt, msg_req, free_rsrcs_rsp) \
+M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
+M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
+M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
+M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
+ msg_rsp) \
+M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
+/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
+M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
+M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
+M(CGX_STATS, 0x202, cgx_stats, msg_req, cgx_stats_rsp) \
+M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set, cgx_mac_addr_set_or_get, \
+ cgx_mac_addr_set_or_get) \
+M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_get, cgx_mac_addr_set_or_get, \
+ cgx_mac_addr_set_or_get) \
+M(CGX_PROMISC_ENABLE, 0x205, cgx_promisc_enable, msg_req, msg_rsp) \
+M(CGX_PROMISC_DISABLE, 0x206, cgx_promisc_disable, msg_req, msg_rsp) \
+M(CGX_START_LINKEVENTS, 0x207, cgx_start_linkevents, msg_req, msg_rsp) \
+M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
+M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
+M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
+M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
+M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
+ cgx_pause_frm_cfg) \
+M(CGX_FW_DATA_GET, 0x20F, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
+M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
+M(CGX_MAC_ADDR_ADD, 0x211, cgx_mac_addr_add, cgx_mac_addr_add_req, \
+ cgx_mac_addr_add_rsp) \
+M(CGX_MAC_ADDR_DEL, 0x212, cgx_mac_addr_del, cgx_mac_addr_del_req, \
+ msg_rsp) \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x213, cgx_mac_max_entries_get, msg_req, \
+ cgx_max_dmac_entries_get_rsp) \
+M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
+M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\
+ cgx_set_link_mode_rsp) \
+M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
+M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \
+ cgx_features_info_msg) \
+M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, cgx_mac_addr_reset_req, \
+ msg_rsp) \
+M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \
+ cgx_mac_addr_update_rsp) \
+M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \
+ cgx_pfc_rsp) \
+/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
+M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
+ npa_lf_alloc_req, npa_lf_alloc_rsp) \
+M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \
+M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \
+M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\
+/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
+/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
+/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
+M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \
+ msg_rsp) \
+M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \
+M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \
+ cpt_rd_wr_reg_msg) \
+M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \
+ cpt_inline_ipsec_cfg_msg, msg_rsp) \
+M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
+M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
+ msg_rsp) \
+M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \
+/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
+M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
+M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
+/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
+M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\
+ npc_mcam_alloc_entry_rsp) \
+M(NPC_MCAM_FREE_ENTRY, 0x6001, npc_mcam_free_entry, \
+ npc_mcam_free_entry_req, msg_rsp) \
+M(NPC_MCAM_WRITE_ENTRY, 0x6002, npc_mcam_write_entry, \
+ npc_mcam_write_entry_req, msg_rsp) \
+M(NPC_MCAM_ENA_ENTRY, 0x6003, npc_mcam_ena_entry, \
+ npc_mcam_ena_dis_entry_req, msg_rsp) \
+M(NPC_MCAM_DIS_ENTRY, 0x6004, npc_mcam_dis_entry, \
+ npc_mcam_ena_dis_entry_req, msg_rsp) \
+M(NPC_MCAM_SHIFT_ENTRY, 0x6005, npc_mcam_shift_entry, npc_mcam_shift_entry_req,\
+ npc_mcam_shift_entry_rsp) \
+M(NPC_MCAM_ALLOC_COUNTER, 0x6006, npc_mcam_alloc_counter, \
+ npc_mcam_alloc_counter_req, \
+ npc_mcam_alloc_counter_rsp) \
+M(NPC_MCAM_FREE_COUNTER, 0x6007, npc_mcam_free_counter, \
+ npc_mcam_oper_counter_req, msg_rsp) \
+M(NPC_MCAM_UNMAP_COUNTER, 0x6008, npc_mcam_unmap_counter, \
+ npc_mcam_unmap_counter_req, msg_rsp) \
+M(NPC_MCAM_CLEAR_COUNTER, 0x6009, npc_mcam_clear_counter, \
+ npc_mcam_oper_counter_req, msg_rsp) \
+M(NPC_MCAM_COUNTER_STATS, 0x600a, npc_mcam_counter_stats, \
+ npc_mcam_oper_counter_req, \
+ npc_mcam_oper_counter_rsp) \
+M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \
+ npc_mcam_alloc_and_write_entry_req, \
+ npc_mcam_alloc_and_write_entry_rsp) \
+M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
+ msg_req, npc_get_kex_cfg_rsp) \
+M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \
+ npc_install_flow_req, npc_install_flow_rsp) \
+M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
+ npc_delete_flow_req, npc_delete_flow_rsp) \
+M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
+ npc_mcam_read_entry_req, \
+ npc_mcam_read_entry_rsp) \
+M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \
+ npc_set_pkind, msg_rsp) \
+M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
+ msg_req, npc_mcam_read_base_rule_rsp) \
+M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \
+ npc_mcam_get_stats_req, \
+ npc_mcam_get_stats_rsp) \
+M(NPC_GET_FIELD_HASH_INFO, 0x6013, npc_get_field_hash_info, \
+ npc_get_field_hash_info_req, \
+ npc_get_field_hash_info_rsp) \
+M(NPC_GET_FIELD_STATUS, 0x6014, npc_get_field_status, \
+ npc_get_field_status_req, \
+ npc_get_field_status_rsp) \
+/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
+M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
+ nix_lf_alloc_req, nix_lf_alloc_rsp) \
+M(NIX_LF_FREE, 0x8001, nix_lf_free, nix_lf_free_req, msg_rsp) \
+M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \
+M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \
+ hwctx_disable_req, msg_rsp) \
+M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \
+ nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
+M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
+M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, \
+ nix_txschq_config) \
+M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
+M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, \
+ nix_vtag_config_rsp) \
+M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \
+ nix_rss_flowkey_cfg, \
+ nix_rss_flowkey_cfg_rsp) \
+M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, nix_set_mac_addr, msg_rsp) \
+M(NIX_SET_RX_MODE, 0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp) \
+M(NIX_SET_HW_FRS, 0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp) \
+M(NIX_LF_START_RX, 0x800d, nix_lf_start_rx, msg_req, msg_rsp) \
+M(NIX_LF_STOP_RX, 0x800e, nix_lf_stop_rx, msg_req, msg_rsp) \
+M(NIX_MARK_FORMAT_CFG, 0x800f, nix_mark_format_cfg, \
+ nix_mark_format_cfg, \
+ nix_mark_format_cfg_rsp) \
+M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
+M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
+ nix_lso_format_cfg, \
+ nix_lso_format_cfg_rsp) \
+M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, msg_rsp) \
+M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, msg_rsp) \
+M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
+ nix_bp_cfg_rsp) \
+M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
+M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
+M(NIX_INLINE_IPSEC_CFG, 0x8019, nix_inline_ipsec_cfg, \
+ nix_inline_ipsec_cfg, msg_rsp) \
+M(NIX_INLINE_IPSEC_LF_CFG, 0x801a, nix_inline_ipsec_lf_cfg, \
+ nix_inline_ipsec_lf_cfg, msg_rsp) \
+M(NIX_CN10K_AQ_ENQ, 0x801b, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \
+ nix_cn10k_aq_enq_rsp) \
+M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \
+M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \
+ nix_bandprof_alloc_rsp) \
+M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
+ msg_rsp) \
+M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
+ nix_bandprof_get_hwinfo_rsp) \
+/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
+M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
+ mcs_alloc_rsrc_rsp) \
+M(MCS_FREE_RESOURCES, 0xa001, mcs_free_resources, mcs_free_rsrc_req, msg_rsp) \
+M(MCS_FLOWID_ENTRY_WRITE, 0xa002, mcs_flowid_entry_write, mcs_flowid_entry_write_req, \
+ msg_rsp) \
+M(MCS_SECY_PLCY_WRITE, 0xa003, mcs_secy_plcy_write, mcs_secy_plcy_write_req, \
+ msg_rsp) \
+M(MCS_RX_SC_CAM_WRITE, 0xa004, mcs_rx_sc_cam_write, mcs_rx_sc_cam_write_req, \
+ msg_rsp) \
+M(MCS_SA_PLCY_WRITE, 0xa005, mcs_sa_plcy_write, mcs_sa_plcy_write_req, \
+ msg_rsp) \
+M(MCS_TX_SC_SA_MAP_WRITE, 0xa006, mcs_tx_sc_sa_map_write, mcs_tx_sc_sa_map, \
+ msg_rsp) \
+M(MCS_RX_SC_SA_MAP_WRITE, 0xa007, mcs_rx_sc_sa_map_write, mcs_rx_sc_sa_map, \
+ msg_rsp) \
+M(MCS_FLOWID_ENA_ENTRY, 0xa008, mcs_flowid_ena_entry, mcs_flowid_ena_dis_entry, \
+ msg_rsp) \
+M(MCS_PN_TABLE_WRITE, 0xa009, mcs_pn_table_write, mcs_pn_table_write_req, \
+ msg_rsp) \
+M(MCS_SET_ACTIVE_LMAC, 0xa00a, mcs_set_active_lmac, mcs_set_active_lmac, \
+ msg_rsp) \
+M(MCS_GET_HW_INFO, 0xa00b, mcs_get_hw_info, msg_req, mcs_hw_info) \
+M(MCS_GET_FLOWID_STATS, 0xa00c, mcs_get_flowid_stats, mcs_stats_req, \
+ mcs_flowid_stats) \
+M(MCS_GET_SECY_STATS, 0xa00d, mcs_get_secy_stats, mcs_stats_req, \
+ mcs_secy_stats) \
+M(MCS_GET_SC_STATS, 0xa00e, mcs_get_sc_stats, mcs_stats_req, mcs_sc_stats) \
+M(MCS_GET_SA_STATS, 0xa00f, mcs_get_sa_stats, mcs_stats_req, mcs_sa_stats) \
+M(MCS_GET_PORT_STATS, 0xa010, mcs_get_port_stats, mcs_stats_req, \
+ mcs_port_stats) \
+M(MCS_CLEAR_STATS, 0xa011, mcs_clear_stats, mcs_clear_stats, msg_rsp) \
+M(MCS_INTR_CFG, 0xa012, mcs_intr_cfg, mcs_intr_cfg, msg_rsp) \
+M(MCS_SET_LMAC_MODE, 0xa013, mcs_set_lmac_mode, mcs_set_lmac_mode, msg_rsp) \
+M(MCS_SET_PN_THRESHOLD, 0xa014, mcs_set_pn_threshold, mcs_set_pn_threshold, \
+ msg_rsp) \
+M(MCS_ALLOC_CTRL_PKT_RULE, 0xa015, mcs_alloc_ctrl_pkt_rule, \
+ mcs_alloc_ctrl_pkt_rule_req, \
+ mcs_alloc_ctrl_pkt_rule_rsp) \
+M(MCS_FREE_CTRL_PKT_RULE, 0xa016, mcs_free_ctrl_pkt_rule, \
+ mcs_free_ctrl_pkt_rule_req, msg_rsp) \
+M(MCS_CTRL_PKT_RULE_WRITE, 0xa017, mcs_ctrl_pkt_rule_write, \
+ mcs_ctrl_pkt_rule_write_req, msg_rsp) \
+M(MCS_PORT_RESET, 0xa018, mcs_port_reset, mcs_port_reset_req, msg_rsp) \
+M(MCS_PORT_CFG_SET, 0xa019, mcs_port_cfg_set, mcs_port_cfg_set_req, msg_rsp)\
+M(MCS_PORT_CFG_GET, 0xa020, mcs_port_cfg_get, mcs_port_cfg_get_req, \
+ mcs_port_cfg_get_rsp) \
+M(MCS_CUSTOM_TAG_CFG_GET, 0xa021, mcs_custom_tag_cfg_get, \
+ mcs_custom_tag_cfg_get_req, \
+ mcs_custom_tag_cfg_get_rsp)
+
+/* Messages initiated by AF (range 0xC00 - 0xEFF) */
+#define MBOX_UP_CGX_MESSAGES \
+M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
+
+#define MBOX_UP_CPT_MESSAGES \
+M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
+
+#define MBOX_UP_MCS_MESSAGES \
+M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp)
+
+enum {
+#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
+MBOX_MESSAGES
+MBOX_UP_CGX_MESSAGES
+MBOX_UP_CPT_MESSAGES
+MBOX_UP_MCS_MESSAGES
+#undef M
+};
+
+/* Mailbox message formats */
+
+#define RVU_DEFAULT_PF_FUNC 0xFFFF
+
+/* Generic request msg used for those mbox messages which
+ * don't send any data in the request.
+ */
+struct msg_req {
+ struct mbox_msghdr hdr;
+};
+
+/* Generic response msg used an ack or response for those mbox
+ * messages which don't have a specific rsp msg format.
+ */
+struct msg_rsp {
+ struct mbox_msghdr hdr;
+};
+
+/* RVU mailbox error codes
+ * Range 256 - 300.
+ */
+enum rvu_af_status {
+ RVU_INVALID_VF_ID = -256,
+};
+
+struct ready_msg_rsp {
+ struct mbox_msghdr hdr;
+ u16 sclk_freq; /* SCLK frequency (in MHz) */
+ u16 rclk_freq; /* RCLK frequency (in MHz) */
+};
+
+/* Structure for requesting resource provisioning.
+ * 'modify' flag to be used when either requesting more
+ * or to detach partial of a certain resource type.
+ * Rest of the fields specify how many of what type to
+ * be attached.
+ * To request LFs from two blocks of same type this mailbox
+ * can be sent twice as below:
+ * struct rsrc_attach *attach;
+ * .. Allocate memory for message ..
+ * attach->cptlfs = 3; <3 LFs from CPT0>
+ * .. Send message ..
+ * .. Allocate memory for message ..
+ * attach->modify = 1;
+ * attach->cpt_blkaddr = BLKADDR_CPT1;
+ * attach->cptlfs = 2; <2 LFs from CPT1>
+ * .. Send message ..
+ */
+struct rsrc_attach {
+ struct mbox_msghdr hdr;
+ u8 modify:1;
+ u8 npalf:1;
+ u8 nixlf:1;
+ u16 sso;
+ u16 ssow;
+ u16 timlfs;
+ u16 cptlfs;
+ int cpt_blkaddr; /* BLKADDR_CPT0/BLKADDR_CPT1 or 0 for BLKADDR_CPT0 */
+};
+
+/* Structure for relinquishing resources.
+ * 'partial' flag to be used when relinquishing all resources
+ * but only of a certain type. If not set, all resources of all
+ * types provisioned to the RVU function will be detached.
+ */
+struct rsrc_detach {
+ struct mbox_msghdr hdr;
+ u8 partial:1;
+ u8 npalf:1;
+ u8 nixlf:1;
+ u8 sso:1;
+ u8 ssow:1;
+ u8 timlfs:1;
+ u8 cptlfs:1;
+};
+
+/* Number of resources available to the caller.
+ * In reply to MBOX_MSG_FREE_RSRC_CNT.
+ */
+struct free_rsrcs_rsp {
+ struct mbox_msghdr hdr;
+ u16 schq[NIX_TXSCH_LVL_CNT];
+ u16 sso;
+ u16 tim;
+ u16 ssow;
+ u16 cpt;
+ u8 npa;
+ u8 nix;
+ u16 schq_nix1[NIX_TXSCH_LVL_CNT];
+ u8 nix1;
+ u8 cpt1;
+ u8 ree0;
+ u8 ree1;
+};
+
+#define MSIX_VECTOR_INVALID 0xFFFF
+#define MAX_RVU_BLKLF_CNT 256
+
+struct msix_offset_rsp {
+ struct mbox_msghdr hdr;
+ u16 npa_msixoff;
+ u16 nix_msixoff;
+ u16 sso;
+ u16 ssow;
+ u16 timlfs;
+ u16 cptlfs;
+ u16 sso_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 ssow_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 timlf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 cpt1_lfs;
+ u16 ree0_lfs;
+ u16 ree1_lfs;
+ u16 cpt1_lf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 ree0_lf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 ree1_lf_msixoff[MAX_RVU_BLKLF_CNT];
+};
+
+struct get_hw_cap_rsp {
+ struct mbox_msghdr hdr;
+ u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
+ u8 nix_shaping; /* Is shaping and coloring supported */
+ u8 npc_hash_extract; /* Is hash extract supported */
+};
+
+/* CGX mbox message formats */
+
+struct cgx_stats_rsp {
+ struct mbox_msghdr hdr;
+#define CGX_RX_STATS_COUNT 9
+#define CGX_TX_STATS_COUNT 18
+ u64 rx_stats[CGX_RX_STATS_COUNT];
+ u64 tx_stats[CGX_TX_STATS_COUNT];
+};
+
+struct cgx_fec_stats_rsp {
+ struct mbox_msghdr hdr;
+ u64 fec_corr_blks;
+ u64 fec_uncorr_blks;
+};
+/* Structure for requesting the operation for
+ * setting/getting mac address in the CGX interface
+ */
+struct cgx_mac_addr_set_or_get {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+ u32 index;
+};
+
+/* Structure for requesting the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Structure for response against the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_rsp {
+ struct mbox_msghdr hdr;
+ u32 index;
+};
+
+/* Structure for requesting the operation to
+ * delete DMAC filter entry from CGX interface
+ */
+struct cgx_mac_addr_del_req {
+ struct mbox_msghdr hdr;
+ u32 index;
+};
+
+/* Structure for response against the operation to
+ * get maximum supported DMAC filter entries
+ */
+struct cgx_max_dmac_entries_get_rsp {
+ struct mbox_msghdr hdr;
+ u32 max_dmac_filters;
+};
+
+struct cgx_link_user_info {
+ uint64_t link_up:1;
+ uint64_t full_duplex:1;
+ uint64_t lmac_type_id:4;
+ uint64_t speed:20; /* speed in Mbps */
+ uint64_t an:1; /* AN supported or not */
+ uint64_t fec:2; /* FEC type if enabled else 0 */
+#define LMACTYPE_STR_LEN 16
+ char lmac_type[LMACTYPE_STR_LEN];
+};
+
+struct cgx_link_info_msg {
+ struct mbox_msghdr hdr;
+ struct cgx_link_user_info link_info;
+};
+
+struct cgx_pause_frm_cfg {
+ struct mbox_msghdr hdr;
+ u8 set;
+ /* set = 1 if the request is to config pause frames */
+ /* set = 0 if the request is to fetch pause frames config */
+ u8 rx_pause;
+ u8 tx_pause;
+};
+
+enum fec_type {
+ OTX2_FEC_NONE,
+ OTX2_FEC_BASER,
+ OTX2_FEC_RS,
+ OTX2_FEC_STATS_CNT = 2,
+ OTX2_FEC_OFF,
+};
+
+struct fec_mode {
+ struct mbox_msghdr hdr;
+ int fec;
+};
+
+struct sfp_eeprom_s {
+#define SFP_EEPROM_SIZE 256
+ u16 sff_id;
+ u8 buf[SFP_EEPROM_SIZE];
+ u64 reserved;
+};
+
+struct phy_s {
+ struct {
+ u64 can_change_mod_type:1;
+ u64 mod_type:1;
+ u64 has_fec_stats:1;
+ } misc;
+ struct fec_stats_s {
+ u32 rsfec_corr_cws;
+ u32 rsfec_uncorr_cws;
+ u32 brfec_corr_blks;
+ u32 brfec_uncorr_blks;
+ } fec_stats;
+};
+
+struct cgx_lmac_fwdata_s {
+ u16 rw_valid;
+ u64 supported_fec;
+ u64 supported_an;
+ u64 supported_link_modes;
+ /* only applicable if AN is supported */
+ u64 advertised_fec;
+ u64 advertised_link_modes;
+ /* Only applicable if SFP/QSFP slot is present */
+ struct sfp_eeprom_s sfp_eeprom;
+ struct phy_s phy;
+#define LMAC_FWDATA_RESERVED_MEM 1021
+ u64 reserved[LMAC_FWDATA_RESERVED_MEM];
+};
+
+struct cgx_fw_data {
+ struct mbox_msghdr hdr;
+ struct cgx_lmac_fwdata_s fwdata;
+};
+
+struct cgx_set_link_mode_args {
+ u32 speed;
+ u8 duplex;
+ u8 an;
+ u8 ports;
+ u64 mode;
+};
+
+struct cgx_set_link_mode_req {
+#define AUTONEG_UNKNOWN 0xff
+ struct mbox_msghdr hdr;
+ struct cgx_set_link_mode_args args;
+};
+
+struct cgx_set_link_mode_rsp {
+ struct mbox_msghdr hdr;
+ int status;
+};
+
+struct cgx_mac_addr_reset_req {
+ struct mbox_msghdr hdr;
+ u32 index;
+};
+
+struct cgx_mac_addr_update_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+ u32 index;
+};
+
+struct cgx_mac_addr_update_rsp {
+ struct mbox_msghdr hdr;
+ u32 index;
+};
+
+#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
+#define RVU_LMAC_FEAT_HIGIG2 BIT_ULL(1)
+ /* flow control from physical link higig2 messages */
+#define RVU_LMAC_FEAT_PTP BIT_ULL(2) /* precison time protocol */
+#define RVU_LMAC_FEAT_DMACF BIT_ULL(3) /* DMAC FILTER */
+#define RVU_MAC_VERSION BIT_ULL(4)
+#define RVU_MAC_CGX BIT_ULL(5)
+#define RVU_MAC_RPM BIT_ULL(6)
+
+struct cgx_features_info_msg {
+ struct mbox_msghdr hdr;
+ u64 lmac_features;
+};
+
+struct rpm_stats_rsp {
+ struct mbox_msghdr hdr;
+#define RPM_RX_STATS_COUNT 43
+#define RPM_TX_STATS_COUNT 34
+ u64 rx_stats[RPM_RX_STATS_COUNT];
+ u64 tx_stats[RPM_TX_STATS_COUNT];
+};
+
+struct cgx_pfc_cfg {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+ u16 pfc_en; /* bitmap indicating pfc enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+};
+
+ /* NPA mbox message formats */
+
+struct npc_set_pkind {
+ struct mbox_msghdr hdr;
+#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
+#define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63)
+ u64 mode;
+#define PKIND_TX BIT_ULL(0)
+#define PKIND_RX BIT_ULL(1)
+ u8 dir;
+ u8 pkind; /* valid only in case custom flag */
+ u8 var_len_off; /* Offset of custom header length field.
+ * Valid only for pkind NPC_RX_CUSTOM_PRE_L2_PKIND
+ */
+ u8 var_len_off_mask; /* Mask for length with in offset */
+ u8 shift_dir; /* shift direction to get length of the header at var_len_off */
+};
+
+/* NPA mbox message formats */
+
+/* NPA mailbox error codes
+ * Range 301 - 400.
+ */
+enum npa_af_status {
+ NPA_AF_ERR_PARAM = -301,
+ NPA_AF_ERR_AQ_FULL = -302,
+ NPA_AF_ERR_AQ_ENQUEUE = -303,
+ NPA_AF_ERR_AF_LF_INVALID = -304,
+ NPA_AF_ERR_AF_LF_ALLOC = -305,
+ NPA_AF_ERR_LF_RESET = -306,
+};
+
+/* For NPA LF context alloc and init */
+struct npa_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int node;
+ int aura_sz; /* No of auras */
+ u32 nr_pools; /* No of pools */
+ u64 way_mask;
+};
+
+struct npa_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u32 stack_pg_ptrs; /* No of ptrs per stack page */
+ u32 stack_pg_bytes; /* Size of stack page */
+ u16 qints; /* NPA_AF_CONST::QINTS */
+ u8 cache_lines; /*BATCH ALLOC DMA */
+};
+
+/* NPA AQ enqueue msg */
+struct npa_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 aura_id;
+ u8 ctype;
+ u8 op;
+ union {
+ /* Valid when op == WRITE/INIT and ctype == AURA.
+ * LF fills the pool_id in aura.pool_addr. AF will translate
+ * the pool_id to pool context pointer.
+ */
+ struct npa_aura_s aura;
+ /* Valid when op == WRITE/INIT and ctype == POOL */
+ struct npa_pool_s pool;
+ };
+ /* Mask data when op == WRITE (1=write, 0=don't write) */
+ union {
+ /* Valid when op == WRITE and ctype == AURA */
+ struct npa_aura_s aura_mask;
+ /* Valid when op == WRITE and ctype == POOL */
+ struct npa_pool_s pool_mask;
+ };
+};
+
+struct npa_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ /* Valid when op == READ and ctype == AURA */
+ struct npa_aura_s aura;
+ /* Valid when op == READ and ctype == POOL */
+ struct npa_pool_s pool;
+ };
+};
+
+/* Disable all contexts of type 'ctype' */
+struct hwctx_disable_req {
+ struct mbox_msghdr hdr;
+ u8 ctype;
+};
+
+/* NIX mbox message formats */
+
+/* NIX mailbox error codes
+ * Range 401 - 500.
+ */
+enum nix_af_status {
+ NIX_AF_ERR_PARAM = -401,
+ NIX_AF_ERR_AQ_FULL = -402,
+ NIX_AF_ERR_AQ_ENQUEUE = -403,
+ NIX_AF_ERR_AF_LF_INVALID = -404,
+ NIX_AF_ERR_AF_LF_ALLOC = -405,
+ NIX_AF_ERR_TLX_ALLOC_FAIL = -406,
+ NIX_AF_ERR_TLX_INVALID = -407,
+ NIX_AF_ERR_RSS_SIZE_INVALID = -408,
+ NIX_AF_ERR_RSS_GRPS_INVALID = -409,
+ NIX_AF_ERR_FRS_INVALID = -410,
+ NIX_AF_ERR_RX_LINK_INVALID = -411,
+ NIX_AF_INVAL_TXSCHQ_CFG = -412,
+ NIX_AF_SMQ_FLUSH_FAILED = -413,
+ NIX_AF_ERR_LF_RESET = -414,
+ NIX_AF_ERR_RSS_NOSPC_FIELD = -415,
+ NIX_AF_ERR_RSS_NOSPC_ALGO = -416,
+ NIX_AF_ERR_MARK_CFG_FAIL = -417,
+ NIX_AF_ERR_LSO_CFG_FAIL = -418,
+ NIX_AF_INVAL_NPA_PF_FUNC = -419,
+ NIX_AF_INVAL_SSO_PF_FUNC = -420,
+ NIX_AF_ERR_TX_VTAG_NOSPC = -421,
+ NIX_AF_ERR_RX_VTAG_INUSE = -422,
+ NIX_AF_ERR_PTP_CONFIG_FAIL = -423,
+ NIX_AF_ERR_NPC_KEY_NOT_SUPP = -424,
+ NIX_AF_ERR_INVALID_NIXBLK = -425,
+ NIX_AF_ERR_INVALID_BANDPROF = -426,
+ NIX_AF_ERR_IPOLICER_NOTSUPP = -427,
+ NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
+ NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
+ NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
+ NIX_AF_ERR_LINK_CREDITS = -431,
+};
+
+/* For NIX RX vtag action */
+enum nix_rx_vtag0_type {
+ NIX_AF_LFX_RX_VTAG_TYPE0, /* reserved for rx vlan offload */
+ NIX_AF_LFX_RX_VTAG_TYPE1,
+ NIX_AF_LFX_RX_VTAG_TYPE2,
+ NIX_AF_LFX_RX_VTAG_TYPE3,
+ NIX_AF_LFX_RX_VTAG_TYPE4,
+ NIX_AF_LFX_RX_VTAG_TYPE5,
+ NIX_AF_LFX_RX_VTAG_TYPE6,
+ NIX_AF_LFX_RX_VTAG_TYPE7,
+};
+
+/* For NIX LF context alloc and init */
+struct nix_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u32 rq_cnt; /* No of receive queues */
+ u32 sq_cnt; /* No of send queues */
+ u32 cq_cnt; /* No of completion queues */
+ u8 xqe_sz;
+ u16 rss_sz;
+ u8 rss_grps;
+ u16 npa_func;
+ u16 sso_func;
+ u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
+ u64 way_mask;
+#define NIX_LF_RSS_TAG_LSB_AS_ADDER BIT_ULL(0)
+#define NIX_LF_LBK_BLK_SEL BIT_ULL(1)
+ u64 flags;
+};
+
+struct nix_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u16 sqb_size;
+ u16 rx_chan_base;
+ u16 tx_chan_base;
+ u8 rx_chan_cnt; /* total number of RX channels */
+ u8 tx_chan_cnt; /* total number of TX channels */
+ u8 lso_tsov4_idx;
+ u8 lso_tsov6_idx;
+ u8 mac_addr[ETH_ALEN];
+ u8 lf_rx_stats; /* NIX_AF_CONST1::LF_RX_STATS */
+ u8 lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */
+ u16 cints; /* NIX_AF_CONST2::CINTS */
+ u16 qints; /* NIX_AF_CONST2::QINTS */
+ u8 cgx_links; /* No. of CGX links present in HW */
+ u8 lbk_links; /* No. of LBK links present in HW */
+ u8 sdp_links; /* No. of SDP links present in HW */
+ u8 tx_link; /* Transmit channel link number */
+};
+
+struct nix_lf_free_req {
+ struct mbox_msghdr hdr;
+#define NIX_LF_DISABLE_FLOWS BIT_ULL(0)
+#define NIX_LF_DONT_FREE_TX_VTAG BIT_ULL(1)
+ u64 flags;
+};
+
+/* CN10K NIX AQ enqueue msg */
+struct nix_cn10k_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 qidx;
+ u8 ctype;
+ u8 op;
+ union {
+ struct nix_cn10k_rq_ctx_s rq;
+ struct nix_cn10k_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
+ };
+ union {
+ struct nix_cn10k_rq_ctx_s rq_mask;
+ struct nix_cn10k_sq_ctx_s sq_mask;
+ struct nix_cq_ctx_s cq_mask;
+ struct nix_rsse_s rss_mask;
+ struct nix_rx_mce_s mce_mask;
+ struct nix_bandprof_s prof_mask;
+ };
+};
+
+struct nix_cn10k_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ struct nix_cn10k_rq_ctx_s rq;
+ struct nix_cn10k_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
+ };
+};
+
+/* NIX AQ enqueue msg */
+struct nix_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 qidx;
+ u8 ctype;
+ u8 op;
+ union {
+ struct nix_rq_ctx_s rq;
+ struct nix_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ u64 prof;
+ };
+ union {
+ struct nix_rq_ctx_s rq_mask;
+ struct nix_sq_ctx_s sq_mask;
+ struct nix_cq_ctx_s cq_mask;
+ struct nix_rsse_s rss_mask;
+ struct nix_rx_mce_s mce_mask;
+ u64 prof_mask;
+ };
+};
+
+struct nix_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ struct nix_rq_ctx_s rq;
+ struct nix_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
+ };
+};
+
+/* Tx scheduler/shaper mailbox messages */
+
+#define MAX_TXSCHQ_PER_FUNC 128
+
+struct nix_txsch_alloc_req {
+ struct mbox_msghdr hdr;
+ /* Scheduler queue count request at each level */
+ u16 schq_contig[NIX_TXSCH_LVL_CNT]; /* No of contiguous queues */
+ u16 schq[NIX_TXSCH_LVL_CNT]; /* No of non-contiguous queues */
+};
+
+struct nix_txsch_alloc_rsp {
+ struct mbox_msghdr hdr;
+ /* Scheduler queue count allocated at each level */
+ u16 schq_contig[NIX_TXSCH_LVL_CNT];
+ u16 schq[NIX_TXSCH_LVL_CNT];
+ /* Scheduler queue list allocated at each level */
+ u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u8 aggr_level; /* Traffic aggregation scheduler level */
+ u8 aggr_lvl_rr_prio; /* Aggregation lvl's RR_PRIO config */
+ u8 link_cfg_lvl; /* LINKX_CFG CSRs mapped to TL3 or TL2's index ? */
+};
+
+struct nix_txsch_free_req {
+ struct mbox_msghdr hdr;
+#define TXSCHQ_FREE_ALL BIT_ULL(0)
+ u16 flags;
+ /* Scheduler queue level to be freed */
+ u16 schq_lvl;
+ /* List of scheduler queues to be freed */
+ u16 schq;
+};
+
+struct nix_txschq_config {
+ struct mbox_msghdr hdr;
+ u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
+ u8 read;
+#define TXSCHQ_IDX_SHIFT 16
+#define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1)
+#define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
+ u8 num_regs;
+#define MAX_REGS_PER_MBOX_MSG 20
+ u64 reg[MAX_REGS_PER_MBOX_MSG];
+ u64 regval[MAX_REGS_PER_MBOX_MSG];
+ /* All 0's => overwrite with new value */
+ u64 regval_mask[MAX_REGS_PER_MBOX_MSG];
+};
+
+struct nix_vtag_config {
+ struct mbox_msghdr hdr;
+ /* '0' for 4 octet VTAG, '1' for 8 octet VTAG */
+ u8 vtag_size;
+ /* cfg_type is '0' for tx vlan cfg
+ * cfg_type is '1' for rx vlan cfg
+ */
+ u8 cfg_type;
+ union {
+ /* valid when cfg_type is '0' */
+ struct {
+ u64 vtag0;
+ u64 vtag1;
+
+ /* cfg_vtag0 & cfg_vtag1 fields are valid
+ * when free_vtag0 & free_vtag1 are '0's.
+ */
+ /* cfg_vtag0 = 1 to configure vtag0 */
+ u8 cfg_vtag0 :1;
+ /* cfg_vtag1 = 1 to configure vtag1 */
+ u8 cfg_vtag1 :1;
+
+ /* vtag0_idx & vtag1_idx are only valid when
+ * both cfg_vtag0 & cfg_vtag1 are '0's,
+ * these fields are used along with free_vtag0
+ * & free_vtag1 to free the nix lf's tx_vlan
+ * configuration.
+ *
+ * Denotes the indices of tx_vtag def registers
+ * that needs to be cleared and freed.
+ */
+ int vtag0_idx;
+ int vtag1_idx;
+
+ /* free_vtag0 & free_vtag1 fields are valid
+ * when cfg_vtag0 & cfg_vtag1 are '0's.
+ */
+ /* free_vtag0 = 1 clears vtag0 configuration
+ * vtag0_idx denotes the index to be cleared.
+ */
+ u8 free_vtag0 :1;
+ /* free_vtag1 = 1 clears vtag1 configuration
+ * vtag1_idx denotes the index to be cleared.
+ */
+ u8 free_vtag1 :1;
+ } tx;
+
+ /* valid when cfg_type is '1' */
+ struct {
+ /* rx vtag type index, valid values are in 0..7 range */
+ u8 vtag_type;
+ /* rx vtag strip */
+ u8 strip_vtag :1;
+ /* rx vtag capture */
+ u8 capture_vtag :1;
+ } rx;
+ };
+};
+
+struct nix_vtag_config_rsp {
+ struct mbox_msghdr hdr;
+ int vtag0_idx;
+ int vtag1_idx;
+ /* Indices of tx_vtag def registers used to configure
+ * tx vtag0 & vtag1 headers, these indices are valid
+ * when nix_vtag_config mbox requested for vtag0 and/
+ * or vtag1 configuration.
+ */
+};
+
+struct nix_rss_flowkey_cfg {
+ struct mbox_msghdr hdr;
+ int mcam_index; /* MCAM entry index to modify */
+#define NIX_FLOW_KEY_TYPE_PORT BIT(0)
+#define NIX_FLOW_KEY_TYPE_IPV4 BIT(1)
+#define NIX_FLOW_KEY_TYPE_IPV6 BIT(2)
+#define NIX_FLOW_KEY_TYPE_TCP BIT(3)
+#define NIX_FLOW_KEY_TYPE_UDP BIT(4)
+#define NIX_FLOW_KEY_TYPE_SCTP BIT(5)
+#define NIX_FLOW_KEY_TYPE_NVGRE BIT(6)
+#define NIX_FLOW_KEY_TYPE_VXLAN BIT(7)
+#define NIX_FLOW_KEY_TYPE_GENEVE BIT(8)
+#define NIX_FLOW_KEY_TYPE_ETH_DMAC BIT(9)
+#define NIX_FLOW_KEY_TYPE_IPV6_EXT BIT(10)
+#define NIX_FLOW_KEY_TYPE_GTPU BIT(11)
+#define NIX_FLOW_KEY_TYPE_INNR_IPV4 BIT(12)
+#define NIX_FLOW_KEY_TYPE_INNR_IPV6 BIT(13)
+#define NIX_FLOW_KEY_TYPE_INNR_TCP BIT(14)
+#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
+#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
+#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
+#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
+#define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21)
+#define NIX_FLOW_KEY_TYPE_AH BIT(22)
+#define NIX_FLOW_KEY_TYPE_ESP BIT(23)
+ u32 flowkey_cfg; /* Flowkey types selected */
+ u8 group; /* RSS context or group */
+};
+
+struct nix_rss_flowkey_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u8 alg_idx; /* Selected algo index */
+};
+
+struct nix_set_mac_addr {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */
+};
+
+struct nix_get_mac_addr_rsp {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct nix_mark_format_cfg {
+ struct mbox_msghdr hdr;
+ u8 offset;
+ u8 y_mask;
+ u8 y_val;
+ u8 r_mask;
+ u8 r_val;
+};
+
+struct nix_mark_format_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u8 mark_format_idx;
+};
+
+struct nix_rx_mode {
+ struct mbox_msghdr hdr;
+#define NIX_RX_MODE_UCAST BIT(0)
+#define NIX_RX_MODE_PROMISC BIT(1)
+#define NIX_RX_MODE_ALLMULTI BIT(2)
+#define NIX_RX_MODE_USE_MCE BIT(3)
+ u16 mode;
+};
+
+struct nix_rx_cfg {
+ struct mbox_msghdr hdr;
+#define NIX_RX_OL3_VERIFY BIT(0)
+#define NIX_RX_OL4_VERIFY BIT(1)
+ u8 len_verify; /* Outer L3/L4 len check */
+#define NIX_RX_CSUM_OL4_VERIFY BIT(0)
+ u8 csum_verify; /* Outer L4 checksum verification */
+};
+
+struct nix_frs_cfg {
+ struct mbox_msghdr hdr;
+ u8 update_smq; /* Update SMQ's min/max lens */
+ u8 update_minlen; /* Set minlen also */
+ u8 sdp_link; /* Set SDP RX link */
+ u16 maxlen;
+ u16 minlen;
+};
+
+struct nix_lso_format_cfg {
+ struct mbox_msghdr hdr;
+ u64 field_mask;
+#define NIX_LSO_FIELD_MAX 8
+ u64 fields[NIX_LSO_FIELD_MAX];
+};
+
+struct nix_lso_format_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u8 lso_format_idx;
+};
+
+struct nix_bp_cfg_req {
+ struct mbox_msghdr hdr;
+ u16 chan_base; /* Starting channel number */
+ u8 chan_cnt; /* Number of channels */
+ u8 bpid_per_chan;
+ /* bpid_per_chan = 0 assigns single bp id for range of channels */
+ /* bpid_per_chan = 1 assigns separate bp id for each channel */
+};
+
+/* PF can be mapped to either CGX or LBK interface,
+ * so maximum 64 channels are possible.
+ */
+#define NIX_MAX_BPID_CHAN 64
+struct nix_bp_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u16 chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */
+ u8 chan_cnt; /* Number of channel for which bpids are assigned */
+};
+
+/* Global NIX inline IPSec configuration */
+struct nix_inline_ipsec_cfg {
+ struct mbox_msghdr hdr;
+ u32 cpt_credit;
+ struct {
+ u8 egrp;
+ u8 opcode;
+ u16 param1;
+ u16 param2;
+ } gen_cfg;
+ struct {
+ u16 cpt_pf_func;
+ u8 cpt_slot;
+ } inst_qsel;
+ u8 enable;
+};
+
+/* Per NIX LF inline IPSec configuration */
+struct nix_inline_ipsec_lf_cfg {
+ struct mbox_msghdr hdr;
+ u64 sa_base_addr;
+ struct {
+ u32 tag_const;
+ u16 lenm1_max;
+ u8 sa_pow2_size;
+ u8 tt;
+ } ipsec_cfg0;
+ struct {
+ u32 sa_idx_max;
+ u8 sa_idx_w;
+ } ipsec_cfg1;
+ u8 enable;
+};
+
+struct nix_hw_info {
+ struct mbox_msghdr hdr;
+ u16 rsvs16;
+ u16 max_mtu;
+ u16 min_mtu;
+ u32 rpm_dwrr_mtu;
+ u32 sdp_dwrr_mtu;
+ u64 rsvd[16]; /* Add reserved fields for future expansion */
+};
+
+struct nix_bandprof_alloc_req {
+ struct mbox_msghdr hdr;
+ /* Count of profiles needed per layer */
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+};
+
+struct nix_bandprof_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+
+ /* There is no need to allocate morethan 1 bandwidth profile
+ * per RQ of a PF_FUNC's NIXLF. So limit the maximum
+ * profiles to 64 per PF_FUNC.
+ */
+#define MAX_BANDPROF_PER_PFFUNC 64
+ u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
+};
+
+struct nix_bandprof_free_req {
+ struct mbox_msghdr hdr;
+ u8 free_all;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+ u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
+};
+
+struct nix_bandprof_get_hwinfo_rsp {
+ struct mbox_msghdr hdr;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+ u32 policer_timeunit;
+};
+
+/* NPC mbox message structs */
+
+#define NPC_MCAM_ENTRY_INVALID 0xFFFF
+#define NPC_MCAM_INVALID_MAP 0xFFFF
+
+/* NPC mailbox error codes
+ * Range 701 - 800.
+ */
+enum npc_af_status {
+ NPC_MCAM_INVALID_REQ = -701,
+ NPC_MCAM_ALLOC_DENIED = -702,
+ NPC_MCAM_ALLOC_FAILED = -703,
+ NPC_MCAM_PERM_DENIED = -704,
+ NPC_FLOW_INTF_INVALID = -707,
+ NPC_FLOW_CHAN_INVALID = -708,
+ NPC_FLOW_NO_NIXLF = -709,
+ NPC_FLOW_NOT_SUPPORTED = -710,
+ NPC_FLOW_VF_PERM_DENIED = -711,
+ NPC_FLOW_VF_NOT_INIT = -712,
+ NPC_FLOW_VF_OVERLAP = -713,
+};
+
+struct npc_mcam_alloc_entry_req {
+ struct mbox_msghdr hdr;
+#define NPC_MAX_NONCONTIG_ENTRIES 256
+ u8 contig; /* Contiguous entries ? */
+#define NPC_MCAM_ANY_PRIO 0
+#define NPC_MCAM_LOWER_PRIO 1
+#define NPC_MCAM_HIGHER_PRIO 2
+ u8 priority; /* Lower or higher w.r.t ref_entry */
+ u16 ref_entry;
+ u16 count; /* Number of entries requested */
+};
+
+struct npc_mcam_alloc_entry_rsp {
+ struct mbox_msghdr hdr;
+ u16 entry; /* Entry allocated or start index if contiguous.
+ * Invalid incase of non-contiguous.
+ */
+ u16 count; /* Number of entries allocated */
+ u16 free_count; /* Number of entries available */
+ u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
+};
+
+struct npc_mcam_free_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* Entry index to be freed */
+ u8 all; /* If all entries allocated to this PFVF to be freed */
+};
+
+struct mcam_entry {
+#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */
+ u64 kw[NPC_MAX_KWS_IN_KEY];
+ u64 kw_mask[NPC_MAX_KWS_IN_KEY];
+ u64 action;
+ u64 vtag_action;
+};
+
+struct npc_mcam_write_entry_req {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u16 entry; /* MCAM entry to write this match key */
+ u16 cntr; /* Counter for this MCAM entry */
+ u8 intf; /* Rx or Tx interface */
+ u8 enable_entry;/* Enable this MCAM entry ? */
+ u8 set_cntr; /* Set counter for this entry ? */
+};
+
+/* Enable/Disable a given entry */
+struct npc_mcam_ena_dis_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry;
+};
+
+struct npc_mcam_shift_entry_req {
+ struct mbox_msghdr hdr;
+#define NPC_MCAM_MAX_SHIFTS 64
+ u16 curr_entry[NPC_MCAM_MAX_SHIFTS];
+ u16 new_entry[NPC_MCAM_MAX_SHIFTS];
+ u16 shift_count; /* Number of entries to shift */
+};
+
+struct npc_mcam_shift_entry_rsp {
+ struct mbox_msghdr hdr;
+ u16 failed_entry_idx; /* Index in 'curr_entry', not entry itself */
+};
+
+struct npc_mcam_alloc_counter_req {
+ struct mbox_msghdr hdr;
+ u8 contig; /* Contiguous counters ? */
+#define NPC_MAX_NONCONTIG_COUNTERS 64
+ u16 count; /* Number of counters requested */
+};
+
+struct npc_mcam_alloc_counter_rsp {
+ struct mbox_msghdr hdr;
+ u16 cntr; /* Counter allocated or start index if contiguous.
+ * Invalid incase of non-contiguous.
+ */
+ u16 count; /* Number of counters allocated */
+ u16 cntr_list[NPC_MAX_NONCONTIG_COUNTERS];
+};
+
+struct npc_mcam_oper_counter_req {
+ struct mbox_msghdr hdr;
+ u16 cntr; /* Free a counter or clear/fetch it's stats */
+};
+
+struct npc_mcam_oper_counter_rsp {
+ struct mbox_msghdr hdr;
+ u64 stat; /* valid only while fetching counter's stats */
+};
+
+struct npc_mcam_unmap_counter_req {
+ struct mbox_msghdr hdr;
+ u16 cntr;
+ u16 entry; /* Entry and counter to be unmapped */
+ u8 all; /* Unmap all entries using this counter ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_req {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u16 ref_entry;
+ u8 priority; /* Lower or higher w.r.t ref_entry */
+ u8 intf; /* Rx or Tx interface */
+ u8 enable_entry;/* Enable this MCAM entry ? */
+ u8 alloc_cntr; /* Allocate counter and map ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_rsp {
+ struct mbox_msghdr hdr;
+ u16 entry;
+ u16 cntr;
+};
+
+struct npc_get_kex_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u64 rx_keyx_cfg; /* NPC_AF_INTF(0)_KEX_CFG */
+ u64 tx_keyx_cfg; /* NPC_AF_INTF(1)_KEX_CFG */
+#define NPC_MAX_INTF 2
+#define NPC_MAX_LID 8
+#define NPC_MAX_LT 16
+#define NPC_MAX_LD 2
+#define NPC_MAX_LFL 16
+ /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
+ u64 kex_ld_flags[NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
+ u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+#define MKEX_NAME_LEN 128
+ u8 mkex_pfl_name[MKEX_NAME_LEN];
+};
+
+struct flow_msg {
+ unsigned char dmac[6];
+ unsigned char smac[6];
+ __be16 etype;
+ __be16 vlan_etype;
+ __be16 vlan_tci;
+ union {
+ __be32 ip4src;
+ __be32 ip6src[4];
+ };
+ union {
+ __be32 ip4dst;
+ __be32 ip6dst[4];
+ };
+ u8 tos;
+ u8 ip_ver;
+ u8 ip_proto;
+ u8 tc;
+ __be16 sport;
+ __be16 dport;
+};
+
+struct npc_install_flow_req {
+ struct mbox_msghdr hdr;
+ struct flow_msg packet;
+ struct flow_msg mask;
+ u64 features;
+ u16 entry;
+ u16 channel;
+ u16 chan_mask;
+ u8 intf;
+ u8 set_cntr; /* If counter is available set counter for this entry ? */
+ u8 default_rule;
+ u8 append; /* overwrite(0) or append(1) flow to default rule? */
+ u16 vf;
+ /* action */
+ u32 index;
+ u16 match_id;
+ u8 flow_key_alg;
+ u8 op;
+ /* vtag rx action */
+ u8 vtag0_type;
+ u8 vtag0_valid;
+ u8 vtag1_type;
+ u8 vtag1_valid;
+ /* vtag tx action */
+ u16 vtag0_def;
+ u8 vtag0_op;
+ u16 vtag1_def;
+ u8 vtag1_op;
+ /* old counter value */
+ u16 cntr_val;
+};
+
+struct npc_install_flow_rsp {
+ struct mbox_msghdr hdr;
+ int counter; /* negative if no counter else counter number */
+};
+
+struct npc_delete_flow_req {
+ struct mbox_msghdr hdr;
+ u16 entry;
+ u16 start;/*Disable range of entries */
+ u16 end;
+ u8 all; /* PF + VFs */
+};
+
+struct npc_delete_flow_rsp {
+ struct mbox_msghdr hdr;
+ u16 cntr_val;
+};
+
+struct npc_mcam_read_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* MCAM entry to read */
+};
+
+struct npc_mcam_read_entry_rsp {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u8 intf;
+ u8 enable;
+};
+
+struct npc_mcam_read_base_rule_rsp {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry;
+};
+
+struct npc_mcam_get_stats_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* mcam entry */
+};
+
+struct npc_mcam_get_stats_rsp {
+ struct mbox_msghdr hdr;
+ u64 stat; /* counter stats */
+ u8 stat_ena; /* enabled */
+};
+
+struct npc_get_field_hash_info_req {
+ struct mbox_msghdr hdr;
+ u8 intf;
+};
+
+struct npc_get_field_hash_info_rsp {
+ struct mbox_msghdr hdr;
+ u64 secret_key[3];
+#define NPC_MAX_HASH 2
+#define NPC_MAX_HASH_MASK 2
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_MASK(0..1) */
+ u64 hash_mask[NPC_MAX_INTF][NPC_MAX_HASH][NPC_MAX_HASH_MASK];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_RESULT_CTRL */
+ u64 hash_ctrl[NPC_MAX_INTF][NPC_MAX_HASH];
+};
+
+enum ptp_op {
+ PTP_OP_ADJFINE = 0,
+ PTP_OP_GET_CLOCK = 1,
+ PTP_OP_GET_TSTMP = 2,
+ PTP_OP_SET_THRESH = 3,
+ PTP_OP_EXTTS_ON = 4,
+};
+
+struct ptp_req {
+ struct mbox_msghdr hdr;
+ u8 op;
+ s64 scaled_ppm;
+ u64 thresh;
+ int extts_on;
+};
+
+struct ptp_rsp {
+ struct mbox_msghdr hdr;
+ u64 clk;
+};
+
+struct npc_get_field_status_req {
+ struct mbox_msghdr hdr;
+ u8 intf;
+ u8 field;
+};
+
+struct npc_get_field_status_rsp {
+ struct mbox_msghdr hdr;
+ u8 enable;
+};
+
+struct set_vf_perm {
+ struct mbox_msghdr hdr;
+ u16 vf;
+#define RESET_VF_PERM BIT_ULL(0)
+#define VF_TRUSTED BIT_ULL(1)
+ u64 flags;
+};
+
+struct lmtst_tbl_setup_req {
+ struct mbox_msghdr hdr;
+ u64 dis_sched_early_comp :1;
+ u64 sch_ena :1;
+ u64 dis_line_pref :1;
+ u64 ssow_pf_func :13;
+ u16 base_pcifunc;
+ u8 use_local_lmt_region;
+ u64 lmt_iova;
+ u64 rsvd[4];
+};
+
+/* CPT mailbox error codes
+ * Range 901 - 1000.
+ */
+enum cpt_af_status {
+ CPT_AF_ERR_PARAM = -901,
+ CPT_AF_ERR_GRP_INVALID = -902,
+ CPT_AF_ERR_LF_INVALID = -903,
+ CPT_AF_ERR_ACCESS_DENIED = -904,
+ CPT_AF_ERR_SSO_PF_FUNC_INVALID = -905,
+ CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906,
+ CPT_AF_ERR_INLINE_IPSEC_INB_ENA = -907,
+ CPT_AF_ERR_INLINE_IPSEC_OUT_ENA = -908
+};
+
+/* CPT mbox message formats */
+struct cpt_rd_wr_reg_msg {
+ struct mbox_msghdr hdr;
+ u64 reg_offset;
+ u64 *ret_val;
+ u64 val;
+ u8 is_write;
+ int blkaddr;
+};
+
+struct cpt_lf_alloc_req_msg {
+ struct mbox_msghdr hdr;
+ u16 nix_pf_func;
+ u16 sso_pf_func;
+ u16 eng_grpmsk;
+ int blkaddr;
+};
+
+#define CPT_INLINE_INBOUND 0
+#define CPT_INLINE_OUTBOUND 1
+
+/* Mailbox message request format for CPT IPsec
+ * inline inbound and outbound configuration.
+ */
+struct cpt_inline_ipsec_cfg_msg {
+ struct mbox_msghdr hdr;
+ u8 enable;
+ u8 slot;
+ u8 dir;
+ u8 sso_pf_func_ovrd;
+ u16 sso_pf_func; /* inbound path SSO_PF_FUNC */
+ u16 nix_pf_func; /* outbound path NIX_PF_FUNC */
+};
+
+/* Mailbox message request and response format for CPT stats. */
+struct cpt_sts_req {
+ struct mbox_msghdr hdr;
+ u8 blkaddr;
+};
+
+struct cpt_sts_rsp {
+ struct mbox_msghdr hdr;
+ u64 inst_req_pc;
+ u64 inst_lat_pc;
+ u64 rd_req_pc;
+ u64 rd_lat_pc;
+ u64 rd_uc_pc;
+ u64 active_cycles_pc;
+ u64 ctx_mis_pc;
+ u64 ctx_hit_pc;
+ u64 ctx_aop_pc;
+ u64 ctx_aop_lat_pc;
+ u64 ctx_ifetch_pc;
+ u64 ctx_ifetch_lat_pc;
+ u64 ctx_ffetch_pc;
+ u64 ctx_ffetch_lat_pc;
+ u64 ctx_wback_pc;
+ u64 ctx_wback_lat_pc;
+ u64 ctx_psh_pc;
+ u64 ctx_psh_lat_pc;
+ u64 ctx_err;
+ u64 ctx_enc_id;
+ u64 ctx_flush_timer;
+ u64 rxc_time;
+ u64 rxc_time_cfg;
+ u64 rxc_active_sts;
+ u64 rxc_zombie_sts;
+ u64 busy_sts_ae;
+ u64 free_sts_ae;
+ u64 busy_sts_se;
+ u64 free_sts_se;
+ u64 busy_sts_ie;
+ u64 free_sts_ie;
+ u64 exe_err_info;
+ u64 cptclk_cnt;
+ u64 diag;
+ u64 rxc_dfrg;
+ u64 x2p_link_cfg0;
+ u64 x2p_link_cfg1;
+};
+
+/* Mailbox message request format to configure reassembly timeout. */
+struct cpt_rxc_time_cfg_req {
+ struct mbox_msghdr hdr;
+ int blkaddr;
+ u32 step;
+ u16 zombie_thres;
+ u16 zombie_limit;
+ u16 active_thres;
+ u16 active_limit;
+};
+
+/* Mailbox message request format to request for CPT_INST_S lmtst. */
+struct cpt_inst_lmtst_req {
+ struct mbox_msghdr hdr;
+ u64 inst[8];
+ u64 rsvd;
+};
+
+struct sdp_node_info {
+ /* Node to which this PF belons to */
+ u8 node_id;
+ u8 max_vfs;
+ u8 num_pf_rings;
+ u8 pf_srn;
+#define SDP_MAX_VFS 128
+ u8 vf_rings[SDP_MAX_VFS];
+};
+
+struct sdp_chan_info_msg {
+ struct mbox_msghdr hdr;
+ struct sdp_node_info info;
+};
+
+struct sdp_get_chan_info_msg {
+ struct mbox_msghdr hdr;
+ u16 chan_base;
+ u16 num_chan;
+};
+
+/* CGX mailbox error codes
+ * Range 1101 - 1200.
+ */
+enum cgx_af_status {
+ LMAC_AF_ERR_INVALID_PARAM = -1101,
+ LMAC_AF_ERR_PF_NOT_MAPPED = -1102,
+ LMAC_AF_ERR_PERM_DENIED = -1103,
+ LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED = -1104,
+ LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED = -1105,
+ LMAC_AF_ERR_CMD_TIMEOUT = -1106,
+ LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED = -1107,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED = -1108,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED = -1109,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED = -1110,
+};
+
+enum mcs_direction {
+ MCS_RX,
+ MCS_TX,
+};
+
+enum mcs_rsrc_type {
+ MCS_RSRC_TYPE_FLOWID,
+ MCS_RSRC_TYPE_SECY,
+ MCS_RSRC_TYPE_SC,
+ MCS_RSRC_TYPE_SA,
+};
+
+struct mcs_alloc_rsrc_req {
+ struct mbox_msghdr hdr;
+ u8 rsrc_type;
+ u8 rsrc_cnt; /* Resources count */
+ u8 mcs_id; /* MCS block ID */
+ u8 dir; /* Macsec ingress or egress side */
+ u8 all; /* Allocate all resource type one each */
+ u64 rsvd;
+};
+
+struct mcs_alloc_rsrc_rsp {
+ struct mbox_msghdr hdr;
+ u8 flow_ids[128]; /* Index of reserved entries */
+ u8 secy_ids[128];
+ u8 sc_ids[128];
+ u8 sa_ids[256];
+ u8 rsrc_type;
+ u8 rsrc_cnt; /* No of entries reserved */
+ u8 mcs_id;
+ u8 dir;
+ u8 all;
+ u8 rsvd[256]; /* reserved fields for future expansion */
+};
+
+struct mcs_free_rsrc_req {
+ struct mbox_msghdr hdr;
+ u8 rsrc_id; /* Index of the entry to be freed */
+ u8 rsrc_type;
+ u8 mcs_id;
+ u8 dir;
+ u8 all; /* Free all the cam resources */
+ u64 rsvd;
+};
+
+struct mcs_flowid_entry_write_req {
+ struct mbox_msghdr hdr;
+ u64 data[4];
+ u64 mask[4];
+ u64 sci; /* CNF10K-B for tx_secy_mem_map */
+ u8 flow_id;
+ u8 secy_id; /* secyid for which flowid is mapped */
+ u8 sc_id; /* Valid if dir = MCS_TX, SC_CAM id mapped to flowid */
+ u8 ena; /* Enable tcam entry */
+ u8 ctrl_pkt;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_secy_plcy_write_req {
+ struct mbox_msghdr hdr;
+ u64 plcy;
+ u8 secy_id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+/* RX SC_CAM mapping */
+struct mcs_rx_sc_cam_write_req {
+ struct mbox_msghdr hdr;
+ u64 sci; /* SCI */
+ u64 secy_id; /* secy index mapped to SC */
+ u8 sc_id; /* SC CAM entry index */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_sa_plcy_write_req {
+ struct mbox_msghdr hdr;
+ u64 plcy[2][9]; /* Support 2 SA policy */
+ u8 sa_index[2];
+ u8 sa_cnt;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_tx_sc_sa_map {
+ struct mbox_msghdr hdr;
+ u8 sa_index0;
+ u8 sa_index1;
+ u8 rekey_ena;
+ u8 sa_index0_vld;
+ u8 sa_index1_vld;
+ u8 tx_sa_active;
+ u64 sectag_sci;
+ u8 sc_id; /* used as index for SA_MEM_MAP */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_rx_sc_sa_map {
+ struct mbox_msghdr hdr;
+ u8 sa_index;
+ u8 sa_in_use;
+ u8 sc_id;
+ u8 an; /* value range 0-3, sc_id + an used as index SA_MEM_MAP */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_flowid_ena_dis_entry {
+ struct mbox_msghdr hdr;
+ u8 flow_id;
+ u8 ena;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_pn_table_write_req {
+ struct mbox_msghdr hdr;
+ u64 next_pn;
+ u8 pn_id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_hw_info {
+ struct mbox_msghdr hdr;
+ u8 num_mcs_blks; /* Number of MCS blocks */
+ u8 tcam_entries; /* RX/TX Tcam entries per mcs block */
+ u8 secy_entries; /* RX/TX SECY entries per mcs block */
+ u8 sc_entries; /* RX/TX SC CAM entries per mcs block */
+ u16 sa_entries; /* PN table entries = SA entries */
+ u64 rsvd[16];
+};
+
+struct mcs_set_active_lmac {
+ struct mbox_msghdr hdr;
+ u32 lmac_bmap; /* bitmap of active lmac per mcs block */
+ u8 mcs_id;
+ u16 chan_base; /* MCS channel base */
+ u64 rsvd;
+};
+
+struct mcs_set_lmac_mode {
+ struct mbox_msghdr hdr;
+ u8 mode; /* 1:Bypass 0:Operational */
+ u8 lmac_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_reset_req {
+ struct mbox_msghdr hdr;
+ u8 reset;
+ u8 mcs_id;
+ u8 port_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_set_req {
+ struct mbox_msghdr hdr;
+ u8 cstm_tag_rel_mode_sel;
+ u8 custom_hdr_enb;
+ u8 fifo_skid;
+ u8 port_mode;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_get_req {
+ struct mbox_msghdr hdr;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_get_rsp {
+ struct mbox_msghdr hdr;
+ u8 cstm_tag_rel_mode_sel;
+ u8 custom_hdr_enb;
+ u8 fifo_skid;
+ u8 port_mode;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_custom_tag_cfg_get_req {
+ struct mbox_msghdr hdr;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_custom_tag_cfg_get_rsp {
+ struct mbox_msghdr hdr;
+ u16 cstm_etype[8];
+ u8 cstm_indx[8];
+ u8 cstm_etype_en;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+/* MCS mailbox error codes
+ * Range 1201 - 1300.
+ */
+enum mcs_af_status {
+ MCS_AF_ERR_INVALID_MCSID = -1201,
+ MCS_AF_ERR_NOT_MAPPED = -1202,
+};
+
+struct mcs_set_pn_threshold {
+ struct mbox_msghdr hdr;
+ u64 threshold;
+ u8 xpn; /* '1' for setting xpn threshold */
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+enum mcs_ctrl_pkt_rulew_type {
+ MCS_CTRL_PKT_RULE_TYPE_ETH,
+ MCS_CTRL_PKT_RULE_TYPE_DA,
+ MCS_CTRL_PKT_RULE_TYPE_RANGE,
+ MCS_CTRL_PKT_RULE_TYPE_COMBO,
+ MCS_CTRL_PKT_RULE_TYPE_MAC,
+};
+
+struct mcs_alloc_ctrl_pkt_rule_req {
+ struct mbox_msghdr hdr;
+ u8 rule_type;
+ u8 mcs_id; /* MCS block ID */
+ u8 dir; /* Macsec ingress or egress side */
+ u64 rsvd;
+};
+
+struct mcs_alloc_ctrl_pkt_rule_rsp {
+ struct mbox_msghdr hdr;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_free_ctrl_pkt_rule_req {
+ struct mbox_msghdr hdr;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u8 all;
+ u64 rsvd;
+};
+
+struct mcs_ctrl_pkt_rule_write_req {
+ struct mbox_msghdr hdr;
+ u64 data0;
+ u64 data1;
+ u64 data2;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_stats_req {
+ struct mbox_msghdr hdr;
+ u8 id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_flowid_stats {
+ struct mbox_msghdr hdr;
+ u64 tcam_hit_cnt;
+ u64 rsvd;
+};
+
+struct mcs_secy_stats {
+ struct mbox_msghdr hdr;
+ u64 ctl_pkt_bcast_cnt;
+ u64 ctl_pkt_mcast_cnt;
+ u64 ctl_pkt_ucast_cnt;
+ u64 ctl_octet_cnt;
+ u64 unctl_pkt_bcast_cnt;
+ u64 unctl_pkt_mcast_cnt;
+ u64 unctl_pkt_ucast_cnt;
+ u64 unctl_octet_cnt;
+ /* Valid only for RX */
+ u64 octet_decrypted_cnt;
+ u64 octet_validated_cnt;
+ u64 pkt_port_disabled_cnt;
+ u64 pkt_badtag_cnt;
+ u64 pkt_nosa_cnt;
+ u64 pkt_nosaerror_cnt;
+ u64 pkt_tagged_ctl_cnt;
+ u64 pkt_untaged_cnt;
+ u64 pkt_ctl_cnt; /* CN10K-B */
+ u64 pkt_notag_cnt; /* CNF10K-B */
+ /* Valid only for TX */
+ u64 octet_encrypted_cnt;
+ u64 octet_protected_cnt;
+ u64 pkt_noactivesa_cnt;
+ u64 pkt_toolong_cnt;
+ u64 pkt_untagged_cnt;
+ u64 rsvd[4];
+};
+
+struct mcs_port_stats {
+ struct mbox_msghdr hdr;
+ u64 tcam_miss_cnt;
+ u64 parser_err_cnt;
+ u64 preempt_err_cnt; /* CNF10K-B */
+ u64 sectag_insert_err_cnt;
+ u64 rsvd[4];
+};
+
+/* Only for CN10K-B */
+struct mcs_sa_stats {
+ struct mbox_msghdr hdr;
+ /* RX */
+ u64 pkt_invalid_cnt;
+ u64 pkt_nosaerror_cnt;
+ u64 pkt_notvalid_cnt;
+ u64 pkt_ok_cnt;
+ u64 pkt_nosa_cnt;
+ /* TX */
+ u64 pkt_encrypt_cnt;
+ u64 pkt_protected_cnt;
+ u64 rsvd[4];
+};
+
+struct mcs_sc_stats {
+ struct mbox_msghdr hdr;
+ /* RX */
+ u64 hit_cnt;
+ u64 pkt_invalid_cnt;
+ u64 pkt_late_cnt;
+ u64 pkt_notvalid_cnt;
+ u64 pkt_unchecked_cnt;
+ u64 pkt_delay_cnt; /* CNF10K-B */
+ u64 pkt_ok_cnt; /* CNF10K-B */
+ u64 octet_decrypt_cnt; /* CN10K-B */
+ u64 octet_validate_cnt; /* CN10K-B */
+ /* TX */
+ u64 pkt_encrypt_cnt;
+ u64 pkt_protected_cnt;
+ u64 octet_encrypt_cnt; /* CN10K-B */
+ u64 octet_protected_cnt; /* CN10K-B */
+ u64 rsvd[4];
+};
+
+struct mcs_clear_stats {
+ struct mbox_msghdr hdr;
+#define MCS_FLOWID_STATS 0
+#define MCS_SECY_STATS 1
+#define MCS_SC_STATS 2
+#define MCS_SA_STATS 3
+#define MCS_PORT_STATS 4
+ u8 type; /* FLOWID, SECY, SC, SA, PORT */
+ u8 id; /* type = PORT, If id = FF(invalid) port no is derived from pcifunc */
+ u8 mcs_id;
+ u8 dir;
+ u8 all; /* All resources stats mapped to PF are cleared */
+};
+
+struct mcs_intr_cfg {
+ struct mbox_msghdr hdr;
+#define MCS_CPM_RX_SECTAG_V_EQ1_INT BIT_ULL(0)
+#define MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT BIT_ULL(1)
+#define MCS_CPM_RX_SECTAG_SL_GTE48_INT BIT_ULL(2)
+#define MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT BIT_ULL(3)
+#define MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT BIT_ULL(4)
+#define MCS_CPM_RX_PACKET_XPN_EQ0_INT BIT_ULL(5)
+#define MCS_CPM_RX_PN_THRESH_REACHED_INT BIT_ULL(6)
+#define MCS_CPM_TX_PACKET_XPN_EQ0_INT BIT_ULL(7)
+#define MCS_CPM_TX_PN_THRESH_REACHED_INT BIT_ULL(8)
+#define MCS_CPM_TX_SA_NOT_VALID_INT BIT_ULL(9)
+#define MCS_BBE_RX_DFIFO_OVERFLOW_INT BIT_ULL(10)
+#define MCS_BBE_RX_PLFIFO_OVERFLOW_INT BIT_ULL(11)
+#define MCS_BBE_TX_DFIFO_OVERFLOW_INT BIT_ULL(12)
+#define MCS_BBE_TX_PLFIFO_OVERFLOW_INT BIT_ULL(13)
+#define MCS_PAB_RX_CHAN_OVERFLOW_INT BIT_ULL(14)
+#define MCS_PAB_TX_CHAN_OVERFLOW_INT BIT_ULL(15)
+ u64 intr_mask; /* Interrupt enable mask */
+ u8 mcs_id;
+ u8 lmac_id;
+ u64 rsvd;
+};
+
+struct mcs_intr_info {
+ struct mbox_msghdr hdr;
+ u64 intr_mask;
+ int sa_id;
+ u8 mcs_id;
+ u8 lmac_id;
+ u64 rsvd;
+};
+
+#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
new file mode 100644
index 000000000..c1775bd01
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
@@ -0,0 +1,1617 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mcs.h"
+#include "mcs_reg.h"
+
+#define DRV_NAME "Marvell MCS Driver"
+
+#define PCI_CFG_REG_BAR_NUM 0
+
+static const struct pci_device_id mcs_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_MCS) },
+ { 0, } /* end of table */
+};
+
+static LIST_HEAD(mcs_list);
+
+void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
+{
+ u64 reg;
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(id);
+ stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(id);
+ stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(id);
+ stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(id);
+ stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(id);
+ stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(id);
+ stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(id);
+ stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(id);
+ stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(id);
+ stats->octet_encrypted_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(id);
+ stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(id);
+ stats->pkt_noactivesa_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(id);
+ stats->pkt_toolong_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(id);
+ stats->pkt_untagged_cnt = mcs_reg_read(mcs, reg);
+}
+
+void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
+{
+ u64 reg;
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(id);
+ stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(id);
+ stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(id);
+ stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(id);
+ stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(id);
+ stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(id);
+ stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(id);
+ stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(id);
+ stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(id);
+ stats->octet_decrypted_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(id);
+ stats->octet_validated_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(id);
+ stats->pkt_port_disabled_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(id);
+ stats->pkt_badtag_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(id);
+ stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(id);
+ stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
+ stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(id);
+ stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
+ stats->pkt_ctl_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(id);
+ stats->pkt_notag_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(id);
+ else
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(id);
+
+ stats->tcam_hit_cnt = mcs_reg_read(mcs, reg);
+}
+
+void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(id);
+ stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(id);
+ stats->parser_err_cnt = mcs_reg_read(mcs, reg);
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(id);
+ stats->preempt_err_cnt = mcs_reg_read(mcs, reg);
+ }
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(id);
+ stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(id);
+ stats->parser_err_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(id);
+ stats->sectag_insert_err_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(id);
+ stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(id);
+ stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(id);
+ stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(id);
+ stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(id);
+ stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(id);
+ stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(id);
+ stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(id);
+ stats->hit_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(id);
+ stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(id);
+ stats->pkt_late_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
+ stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(id);
+ stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(id);
+ stats->pkt_delay_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(id);
+ stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
+ }
+ if (mcs->hw->mcs_blks == 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(id);
+ stats->octet_decrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(id);
+ stats->octet_validate_cnt = mcs_reg_read(mcs, reg);
+ }
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(id);
+ stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(id);
+ stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks == 1) {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(id);
+ stats->octet_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(id);
+ stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
+ }
+ }
+}
+
+void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir)
+{
+ struct mcs_flowid_stats flowid_st;
+ struct mcs_port_stats port_st;
+ struct mcs_secy_stats secy_st;
+ struct mcs_sc_stats sc_st;
+ struct mcs_sa_stats sa_st;
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CSE_RX_SLAVE_CTRL;
+ else
+ reg = MCSX_CSE_TX_SLAVE_CTRL;
+
+ mcs_reg_write(mcs, reg, BIT_ULL(0));
+
+ switch (type) {
+ case MCS_FLOWID_STATS:
+ mcs_get_flowid_stats(mcs, &flowid_st, id, dir);
+ break;
+ case MCS_SECY_STATS:
+ if (dir == MCS_RX)
+ mcs_get_rx_secy_stats(mcs, &secy_st, id);
+ else
+ mcs_get_tx_secy_stats(mcs, &secy_st, id);
+ break;
+ case MCS_SC_STATS:
+ mcs_get_sc_stats(mcs, &sc_st, id, dir);
+ break;
+ case MCS_SA_STATS:
+ mcs_get_sa_stats(mcs, &sa_st, id, dir);
+ break;
+ case MCS_PORT_STATS:
+ mcs_get_port_stats(mcs, &port_st, id, dir);
+ break;
+ }
+
+ mcs_reg_write(mcs, reg, 0x0);
+}
+
+int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* Clear FLOWID stats */
+ for (id = 0; id < map->flow_ids.max; id++) {
+ if (map->flowid2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_FLOWID_STATS, id, dir);
+ }
+
+ /* Clear SECY stats */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->secy2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SECY_STATS, id, dir);
+ }
+
+ /* Clear SC stats */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->sc2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SC_STATS, id, dir);
+ }
+
+ /* Clear SA stats */
+ for (id = 0; id < map->sa.max; id++) {
+ if (map->sa2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SA_STATS, id, dir);
+ }
+ return 0;
+}
+
+void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
+ else
+ reg = MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
+ mcs_reg_write(mcs, reg, next_pn);
+}
+
+void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
+{
+ u64 reg, val;
+
+ val = (map->sa_index0 & 0xFF) |
+ (map->sa_index1 & 0xFF) << 9 |
+ (map->rekey_ena & 0x1) << 18 |
+ (map->sa_index0_vld & 0x1) << 19 |
+ (map->sa_index1_vld & 0x1) << 20 |
+ (map->tx_sa_active & 0x1) << 21 |
+ map->sectag_sci << 22;
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+
+ val = map->sectag_sci >> 42;
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
+{
+ u64 val, reg;
+
+ val = (map->sa_index & 0xFF) | map->sa_in_use << 9;
+
+ reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa_id, int dir)
+{
+ int reg_id;
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ for (reg_id = 0; reg_id < 8; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
+ mcs_reg_write(mcs, reg, plcy[reg_id]);
+ }
+ } else {
+ for (reg_id = 0; reg_id < 9; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
+ mcs_reg_write(mcs, reg, plcy[reg_id]);
+ }
+ }
+}
+
+void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int sc_id, int ena)
+{
+ u64 reg, val;
+
+ reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(0);
+ if (sc_id > 63)
+ reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(1);
+
+ if (ena)
+ val = mcs_reg_read(mcs, reg) | BIT_ULL(sc_id);
+ else
+ val = mcs_reg_read(mcs, reg) & ~BIT_ULL(sc_id);
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id)
+{
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(0, sc_id), sci);
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(1, sc_id), secy);
+ /* Enable SC CAM */
+ mcs_ena_dis_sc_cam_entry(mcs, sc_id, true);
+}
+
+void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int secy_id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(secy_id);
+ else
+ reg = MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(secy_id);
+
+ mcs_reg_write(mcs, reg, plcy);
+
+ if (mcs->hw->mcs_blks == 1 && dir == MCS_RX)
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(secy_id), 0x0ull);
+}
+
+void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
+{
+ u64 reg, val;
+
+ val = (map->secy & 0x7F) | (map->ctrl_pkt & 0x1) << 8;
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
+ } else {
+ val |= (map->sc & 0x7F) << 9;
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
+ }
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_ena_dis_flowid_entry(struct mcs *mcs, int flow_id, int dir, int ena)
+{
+ u64 reg, val;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0;
+ if (flow_id > 63)
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1;
+ } else {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0;
+ if (flow_id > 63)
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1;
+ }
+
+ /* Enable/Disable the tcam entry */
+ if (ena)
+ val = mcs_reg_read(mcs, reg) | BIT_ULL(flow_id);
+ else
+ val = mcs_reg_read(mcs, reg) & ~BIT_ULL(flow_id);
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id, int dir)
+{
+ int reg_id;
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, data[reg_id]);
+ }
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, mask[reg_id]);
+ }
+ } else {
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, data[reg_id]);
+ }
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, mask[reg_id]);
+ }
+ }
+}
+
+int mcs_install_flowid_bypass_entry(struct mcs *mcs)
+{
+ int flow_id, secy_id, reg_id;
+ struct secy_mem_map map;
+ u64 reg, plcy = 0;
+
+ /* Flow entry */
+ flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
+ __set_bit(flow_id, mcs->rx.flow_ids.bmap);
+ __set_bit(flow_id, mcs->tx.flow_ids.bmap);
+
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
+ }
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
+ }
+ /* secy */
+ secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
+ __set_bit(secy_id, mcs->rx.secy.bmap);
+ __set_bit(secy_id, mcs->tx.secy.bmap);
+
+ /* Set validate frames to NULL and enable control port */
+ plcy = 0x7ull;
+ if (mcs->hw->mcs_blks > 1)
+ plcy = BIT_ULL(0) | 0x3ull << 4;
+ mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_RX);
+
+ /* Enable control port and set mtu to max */
+ plcy = BIT_ULL(0) | GENMASK_ULL(43, 28);
+ if (mcs->hw->mcs_blks > 1)
+ plcy = BIT_ULL(0) | GENMASK_ULL(63, 48);
+ mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_TX);
+
+ /* Map flowid to secy */
+ map.secy = secy_id;
+ map.ctrl_pkt = 0;
+ map.flow_id = flow_id;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_RX);
+ map.sc = secy_id;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_TX);
+
+ /* Enable Flowid entry */
+ mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
+ mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
+
+ return 0;
+}
+
+void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int flow_id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* Clear secy memory to zero */
+ mcs_secy_plcy_write(mcs, 0, secy_id, dir);
+
+ /* Disable the tcam entry using this secy */
+ for (flow_id = 0; flow_id < map->flow_ids.max; flow_id++) {
+ if (map->flowid2secy_map[flow_id] != secy_id)
+ continue;
+ mcs_ena_dis_flowid_entry(mcs, flow_id, dir, false);
+ }
+}
+
+int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc)
+{
+ int rsrc_id;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ rsrc_id = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, offset, 1, 0);
+ if (rsrc_id >= rsrc->max)
+ return -ENOSPC;
+
+ bitmap_set(rsrc->bmap, rsrc_id, 1);
+ pf_map[rsrc_id] = pcifunc;
+
+ return rsrc_id;
+}
+
+int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ u64 dis, reg;
+ int id, rc;
+
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
+ map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
+
+ if (req->all) {
+ for (id = 0; id < map->ctrlpktrule.max; id++) {
+ if (map->ctrlpktrule2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, id, pcifunc);
+ dis = mcs_reg_read(mcs, reg);
+ dis &= ~BIT_ULL(id);
+ mcs_reg_write(mcs, reg, dis);
+ }
+ return 0;
+ }
+
+ rc = mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, req->rule_idx, pcifunc);
+ dis = mcs_reg_read(mcs, reg);
+ dis &= ~BIT_ULL(req->rule_idx);
+ mcs_reg_write(mcs, reg, dis);
+
+ return rc;
+}
+
+int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req)
+{
+ u64 reg, enb;
+ u64 idx;
+
+ switch (req->rule_type) {
+ case MCS_CTRL_PKT_RULE_TYPE_ETH:
+ req->data0 &= GENMASK(15, 0);
+ if (req->data0 != ETH_P_PAE)
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_ETYPE_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(idx) :
+ MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(idx);
+
+ mcs_reg_write(mcs, reg, req->data0);
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_DA:
+ if (!(req->data0 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_DA_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_DAX(idx) :
+ MCSX_PEX_TX_SLAVE_RULE_DAX(idx);
+
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_RANGE:
+ if (!(req->data0 & BIT_ULL(40)) || !(req->data1 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
+ if (req->dir == MCS_RX) {
+ reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ }
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_COMBO:
+ req->data2 &= GENMASK(15, 0);
+ if (req->data2 != ETH_P_PAE || !(req->data0 & BIT_ULL(40)) ||
+ !(req->data1 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_COMBO_RULE_OFFSET;
+ if (req->dir == MCS_RX) {
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(idx);
+ mcs_reg_write(mcs, reg, req->data2);
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(idx);
+ mcs_reg_write(mcs, reg, req->data2);
+ }
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_MAC:
+ if (!(req->data0 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_MAC :
+ MCSX_PEX_TX_SLAVE_RULE_MAC;
+
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ break;
+ }
+
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
+
+ enb = mcs_reg_read(mcs, reg);
+ enb |= BIT_ULL(req->rule_idx);
+ mcs_reg_write(mcs, reg, enb);
+
+ return 0;
+}
+
+int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc)
+{
+ /* Check if the rsrc_id is mapped to PF/VF */
+ if (pf_map[rsrc_id] != pcifunc)
+ return -EINVAL;
+
+ rvu_free_rsrc(rsrc, rsrc_id);
+ pf_map[rsrc_id] = 0;
+ return 0;
+}
+
+/* Free all the cam resources mapped to pf */
+int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* free tcam entries */
+ for (id = 0; id < map->flow_ids.max; id++) {
+ if (map->flowid2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map,
+ id, pcifunc);
+ mcs_ena_dis_flowid_entry(mcs, id, dir, false);
+ }
+
+ /* free secy entries */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->secy2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->secy, map->secy2pf_map,
+ id, pcifunc);
+ mcs_clear_secy_plcy(mcs, id, dir);
+ }
+
+ /* free sc entries */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->sc2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->sc, map->sc2pf_map, id, pcifunc);
+
+ /* Disable SC CAM only on RX side */
+ if (dir == MCS_RX)
+ mcs_ena_dis_sc_cam_entry(mcs, id, false);
+ }
+
+ /* free sa entries */
+ for (id = 0; id < map->sa.max; id++) {
+ if (map->sa2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->sa, map->sa2pf_map, id, pcifunc);
+ }
+ return 0;
+}
+
+int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc)
+{
+ int rsrc_id;
+
+ rsrc_id = rvu_alloc_rsrc(rsrc);
+ if (rsrc_id < 0)
+ return -ENOMEM;
+ pf_map[rsrc_id] = pcifunc;
+ return rsrc_id;
+}
+
+int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flow_id, u8 *secy_id,
+ u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *flow_id = id;
+
+ id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *secy_id = id;
+
+ id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sc_id = id;
+
+ id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sa1_id = id;
+
+ id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sa2_id = id;
+
+ return 0;
+}
+
+static void cn10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+
+ if (mcs->tx_sa_active[sc])
+ /* SA_index1 was used and got expired */
+ event.sa_id = (val >> 9) & 0xFF;
+ else
+ /* SA_index0 was used and got expired */
+ event.sa_id = val & 0xFF;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val, status;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
+
+ /* TX SA interrupt is raised only if autorekey is enabled.
+ * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
+ * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
+ * SA in SA_index1 got expired else SA in SA_index0 got expired.
+ */
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+ /* Auto rekey is enable */
+ if (!((val >> 18) & 0x1))
+ continue;
+
+ status = (val >> 21) & 0x1;
+
+ /* Check if tx_sa_active status had changed */
+ if (status == mcs->tx_sa_active[sc])
+ continue;
+ /* SA_index0 is expired */
+ if (status)
+ event.sa_id = val & 0xFF;
+ else
+ event.sa_id = (val >> 9) & 0xFF;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void mcs_rx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ int sa, reg;
+ u64 intr;
+
+ /* Check expired SAs */
+ for (reg = 0; reg < (mcs->hw->sa_entries / 64); reg++) {
+ /* Bit high in *PN_THRESH_REACHEDX implies
+ * corresponding SAs are expired.
+ */
+ intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(reg));
+ for (sa = 0; sa < 64; sa++) {
+ if (!(intr & BIT_ULL(sa)))
+ continue;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_RX_PN_THRESH_REACHED_INT;
+ event.sa_id = sa + (reg * 64);
+ event.pcifunc = mcs->rx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+ }
+}
+
+static void mcs_rx_misc_intr_handler(struct mcs *mcs, u64 intr)
+{
+ struct mcs_intr_event event = { 0 };
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ if (intr & MCS_CPM_RX_INT_SECTAG_V_EQ1)
+ event.intr_mask = MCS_CPM_RX_SECTAG_V_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SL_GTE48)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_SL_GTE48_INT;
+ if (intr & MCS_CPM_RX_INT_ES_EQ1_SC_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_PACKET_XPN_EQ0)
+ event.intr_mask |= MCS_CPM_RX_PACKET_XPN_EQ0_INT;
+
+ mcs_add_intr_wq_entry(mcs, &event);
+}
+
+static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
+{
+ struct mcs_intr_event event = { 0 };
+
+ if (!(intr & MCS_CPM_TX_INT_SA_NOT_VALID))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ event.intr_mask = MCS_CPM_TX_SA_NOT_VALID_INT;
+
+ mcs_add_intr_wq_entry(mcs, &event);
+}
+
+void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
+ enum mcs_direction dir)
+{
+ u64 val, reg;
+ int lmac;
+
+ if (!(intr & 0x6ULL))
+ return;
+
+ if (intr & BIT_ULL(1))
+ reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 :
+ MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0;
+ else
+ reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 :
+ MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0;
+ val = mcs_reg_read(mcs, reg);
+
+ /* policy/data over flow occurred */
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
+ if (!(val & BIT_ULL(lmac)))
+ continue;
+ dev_warn(mcs->dev, "BEE:Policy or data overflow occurred on lmac:%d\n", lmac);
+ }
+}
+
+void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
+ enum mcs_direction dir)
+{
+ int lmac;
+
+ if (!(intr & 0xFFFFFULL))
+ return;
+
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
+ if (intr & BIT_ULL(lmac))
+ dev_warn(mcs->dev, "PAB: overflow occurred on lmac:%d\n", lmac);
+ }
+}
+
+static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+{
+ struct mcs *mcs = (struct mcs *)mcs_irq;
+ u64 intr, cpm_intr, bbe_intr, pab_intr;
+
+ /* Disable the interrupt */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
+
+ /* Check which block has interrupt*/
+ intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
+
+ /* CPM RX */
+ if (intr & MCS_CPM_RX_INT_ENA) {
+ /* Check for PN thresh interrupt bit */
+ cpm_intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_RX_INT);
+
+ if (cpm_intr & MCS_CPM_RX_INT_PN_THRESH_REACHED)
+ mcs_rx_pn_thresh_reached_handler(mcs);
+
+ if (cpm_intr & MCS_CPM_RX_INT_ALL)
+ mcs_rx_misc_intr_handler(mcs, cpm_intr);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT, cpm_intr);
+ }
+
+ /* CPM TX */
+ if (intr & MCS_CPM_TX_INT_ENA) {
+ cpm_intr = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_INT);
+
+ if (cpm_intr & MCS_CPM_TX_INT_PN_THRESH_REACHED) {
+ if (mcs->hw->mcs_blks > 1)
+ cnf10kb_mcs_tx_pn_thresh_reached_handler(mcs);
+ else
+ cn10kb_mcs_tx_pn_thresh_reached_handler(mcs);
+ }
+
+ if (cpm_intr & MCS_CPM_TX_INT_SA_NOT_VALID)
+ mcs_tx_misc_intr_handler(mcs, cpm_intr);
+
+ if (cpm_intr & MCS_CPM_TX_INT_PACKET_XPN_EQ0) {
+ if (mcs->hw->mcs_blks > 1)
+ cnf10kb_mcs_tx_pn_wrapped_handler(mcs);
+ else
+ cn10kb_mcs_tx_pn_wrapped_handler(mcs);
+ }
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT, cpm_intr);
+ }
+
+ /* BBE RX */
+ if (intr & MCS_BBE_RX_INT_ENA) {
+ bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
+ mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT, bbe_intr);
+ }
+
+ /* BBE TX */
+ if (intr & MCS_BBE_TX_INT_ENA) {
+ bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
+ mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT, bbe_intr);
+ }
+
+ /* PAB RX */
+ if (intr & MCS_PAB_RX_INT_ENA) {
+ pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
+ mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT, pab_intr);
+ }
+
+ /* PAB TX */
+ if (intr & MCS_PAB_TX_INT_ENA) {
+ pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
+ mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
+ }
+
+ /* Clear and enable the interrupt */
+ mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
+
+ return IRQ_HANDLED;
+}
+
+static void *alloc_mem(struct mcs *mcs, int n)
+{
+ return devm_kcalloc(mcs->dev, n, sizeof(u16), GFP_KERNEL);
+}
+
+static int mcs_alloc_struct_mem(struct mcs *mcs, struct mcs_rsrc_map *res)
+{
+ struct hwinfo *hw = mcs->hw;
+ int err;
+
+ res->flowid2pf_map = alloc_mem(mcs, hw->tcam_entries);
+ if (!res->flowid2pf_map)
+ return -ENOMEM;
+
+ res->secy2pf_map = alloc_mem(mcs, hw->secy_entries);
+ if (!res->secy2pf_map)
+ return -ENOMEM;
+
+ res->sc2pf_map = alloc_mem(mcs, hw->sc_entries);
+ if (!res->sc2pf_map)
+ return -ENOMEM;
+
+ res->sa2pf_map = alloc_mem(mcs, hw->sa_entries);
+ if (!res->sa2pf_map)
+ return -ENOMEM;
+
+ res->flowid2secy_map = alloc_mem(mcs, hw->tcam_entries);
+ if (!res->flowid2secy_map)
+ return -ENOMEM;
+
+ res->ctrlpktrule2pf_map = alloc_mem(mcs, MCS_MAX_CTRLPKT_RULES);
+ if (!res->ctrlpktrule2pf_map)
+ return -ENOMEM;
+
+ res->flow_ids.max = hw->tcam_entries - MCS_RSRC_RSVD_CNT;
+ err = rvu_alloc_bitmap(&res->flow_ids);
+ if (err)
+ return err;
+
+ res->secy.max = hw->secy_entries - MCS_RSRC_RSVD_CNT;
+ err = rvu_alloc_bitmap(&res->secy);
+ if (err)
+ return err;
+
+ res->sc.max = hw->sc_entries;
+ err = rvu_alloc_bitmap(&res->sc);
+ if (err)
+ return err;
+
+ res->sa.max = hw->sa_entries;
+ err = rvu_alloc_bitmap(&res->sa);
+ if (err)
+ return err;
+
+ res->ctrlpktrule.max = MCS_MAX_CTRLPKT_RULES;
+ err = rvu_alloc_bitmap(&res->ctrlpktrule);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mcs_register_interrupts(struct mcs *mcs)
+{
+ int ret = 0;
+
+ mcs->num_vec = pci_msix_vec_count(mcs->pdev);
+
+ ret = pci_alloc_irq_vectors(mcs->pdev, mcs->num_vec,
+ mcs->num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(mcs->dev, "MCS Request for %d msix vector failed err:%d\n",
+ mcs->num_vec, ret);
+ return ret;
+ }
+
+ ret = request_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec),
+ mcs_ip_intr_handler, 0, "MCS_IP", mcs);
+ if (ret) {
+ dev_err(mcs->dev, "MCS IP irq registration failed\n");
+ goto exit;
+ }
+
+ /* MCS enable IP interrupts */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
+
+ /* Enable CPM Rx/Tx interrupts */
+ mcs_reg_write(mcs, MCSX_TOP_SLAVE_INT_SUM_ENB,
+ MCS_CPM_RX_INT_ENA | MCS_CPM_TX_INT_ENA |
+ MCS_BBE_RX_INT_ENA | MCS_BBE_TX_INT_ENA |
+ MCS_PAB_RX_INT_ENA | MCS_PAB_TX_INT_ENA);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
+
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xFFULL);
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xFFULL);
+
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
+
+ mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
+ if (!mcs->tx_sa_active) {
+ ret = -ENOMEM;
+ goto free_irq;
+ }
+
+ return ret;
+
+free_irq:
+ free_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec), mcs);
+exit:
+ pci_free_irq_vectors(mcs->pdev);
+ mcs->num_vec = 0;
+ return ret;
+}
+
+int mcs_get_blkcnt(void)
+{
+ struct mcs *mcs;
+ int idmax = -ENODEV;
+
+ /* Check MCS block is present in hardware */
+ if (!pci_dev_present(mcs_id_table))
+ return 0;
+
+ list_for_each_entry(mcs, &mcs_list, mcs_list)
+ if (mcs->mcs_id > idmax)
+ idmax = mcs->mcs_id;
+
+ if (idmax < 0)
+ return 0;
+
+ return idmax + 1;
+}
+
+struct mcs *mcs_get_pdata(int mcs_id)
+{
+ struct mcs *mcs_dev;
+
+ list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
+ if (mcs_dev->mcs_id == mcs_id)
+ return mcs_dev;
+ }
+ return NULL;
+}
+
+bool is_mcs_bypass(int mcs_id)
+{
+ struct mcs *mcs_dev;
+
+ list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
+ if (mcs_dev->mcs_id == mcs_id)
+ return mcs_dev->bypass;
+ }
+ return true;
+}
+
+void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
+{
+ u64 val = 0;
+
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id),
+ req->port_mode & MCS_PORT_MODE_MASK);
+
+ req->cstm_tag_rel_mode_sel &= 0x3;
+
+ if (mcs->hw->mcs_blks > 1) {
+ req->fifo_skid &= MCS_PORT_FIFO_SKID_MASK;
+ val = (u32)req->fifo_skid << 0x10;
+ val |= req->fifo_skid;
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id), val);
+ mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id),
+ req->cstm_tag_rel_mode_sel);
+ val = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
+
+ if (req->custom_hdr_enb)
+ val |= BIT_ULL(req->port_id);
+ else
+ val &= ~BIT_ULL(req->port_id);
+
+ mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, val);
+ } else {
+ val = mcs_reg_read(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id));
+ val |= (req->cstm_tag_rel_mode_sel << 2);
+ mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id), val);
+ }
+}
+
+void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp)
+{
+ u64 reg = 0;
+
+ rsp->port_mode = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id)) &
+ MCS_PORT_MODE_MASK;
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id);
+ rsp->fifo_skid = mcs_reg_read(mcs, reg) & MCS_PORT_FIFO_SKID_MASK;
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id);
+ rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) & 0x3;
+ if (mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION) & BIT_ULL(req->port_id))
+ rsp->custom_hdr_enb = 1;
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id);
+ rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) >> 2;
+ }
+
+ rsp->port_id = req->port_id;
+ rsp->mcs_id = req->mcs_id;
+}
+
+void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp)
+{
+ u64 reg = 0, val = 0;
+ u8 idx;
+
+ for (idx = 0; idx < MCS_MAX_CUSTOM_TAGS; idx++) {
+ if (mcs->hw->mcs_blks > 1)
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(idx) :
+ MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(idx);
+ else
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_VLAN_CFGX(idx) :
+ MCSX_PEX_TX_SLAVE_VLAN_CFGX(idx);
+
+ val = mcs_reg_read(mcs, reg);
+ if (mcs->hw->mcs_blks > 1) {
+ rsp->cstm_etype[idx] = val & GENMASK(15, 0);
+ rsp->cstm_indx[idx] = (val >> 0x16) & 0x3;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_ETYPE_ENABLE :
+ MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
+ rsp->cstm_etype_en = mcs_reg_read(mcs, reg) & 0xFF;
+ } else {
+ rsp->cstm_etype[idx] = (val >> 0x1) & GENMASK(15, 0);
+ rsp->cstm_indx[idx] = (val >> 0x11) & 0x3;
+ rsp->cstm_etype_en |= (val & 0x1) << idx;
+ }
+ }
+
+ rsp->mcs_id = req->mcs_id;
+ rsp->dir = req->dir;
+}
+
+void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
+{
+ u64 reg = MCSX_MCS_TOP_SLAVE_PORT_RESET(port_id);
+
+ mcs_reg_write(mcs, reg, reset & 0x1);
+}
+
+/* Set lmac to bypass/operational mode */
+void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
+{
+ u64 reg;
+ int id = lmac_id * 2;
+
+ reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(id);
+ mcs_reg_write(mcs, reg, (u64)mode);
+ reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG((id + 1));
+ mcs_reg_write(mcs, reg, (u64)mode);
+}
+
+void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn)
+{
+ u64 reg;
+
+ if (pn->dir == MCS_RX)
+ reg = pn->xpn ? MCSX_CPM_RX_SLAVE_XPN_THRESHOLD : MCSX_CPM_RX_SLAVE_PN_THRESHOLD;
+ else
+ reg = pn->xpn ? MCSX_CPM_TX_SLAVE_XPN_THRESHOLD : MCSX_CPM_TX_SLAVE_PN_THRESHOLD;
+
+ mcs_reg_write(mcs, reg, pn->threshold);
+}
+
+void cn10kb_mcs_parser_cfg(struct mcs *mcs)
+{
+ u64 reg, val;
+
+ /* VLAN CTag */
+ val = BIT_ULL(0) | (0x8100ull & 0xFFFF) << 1 | BIT_ULL(17);
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* VLAN STag */
+ val = BIT_ULL(0) | (0x88a8ull & 0xFFFF) << 1 | BIT_ULL(18);
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(1);
+ mcs_reg_write(mcs, reg, val);
+}
+
+static void mcs_lmac_init(struct mcs *mcs, int lmac_id)
+{
+ u64 reg;
+
+ /* Port mode 25GB */
+ reg = MCSX_PAB_RX_SLAVE_PORT_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0xe000e);
+ return;
+ }
+
+ reg = MCSX_PAB_TX_SLAVE_PORT_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0);
+}
+
+int mcs_set_lmac_channels(int mcs_id, u16 base)
+{
+ struct mcs *mcs;
+ int lmac;
+ u64 cfg;
+
+ mcs = mcs_get_pdata(mcs_id);
+ if (!mcs)
+ return -ENODEV;
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
+ cfg = mcs_reg_read(mcs, MCSX_LINK_LMACX_CFG(lmac));
+ cfg &= ~(MCSX_LINK_LMAC_BASE_MASK | MCSX_LINK_LMAC_RANGE_MASK);
+ cfg |= FIELD_PREP(MCSX_LINK_LMAC_RANGE_MASK, ilog2(16));
+ cfg |= FIELD_PREP(MCSX_LINK_LMAC_BASE_MASK, base);
+ mcs_reg_write(mcs, MCSX_LINK_LMACX_CFG(lmac), cfg);
+ base += 16;
+ }
+ return 0;
+}
+
+static int mcs_x2p_calibration(struct mcs *mcs)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(20000);
+ int i, err = 0;
+ u64 val;
+
+ /* set X2P calibration */
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+ val |= BIT_ULL(5);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+
+ /* Wait for calibration to complete */
+ while (!(mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS) & BIT_ULL(0))) {
+ if (time_before(jiffies, timeout)) {
+ usleep_range(80, 100);
+ continue;
+ } else {
+ err = -EBUSY;
+ dev_err(mcs->dev, "MCS X2P calibration failed..ignoring\n");
+ return err;
+ }
+ }
+
+ val = mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS);
+ for (i = 0; i < mcs->hw->mcs_x2p_intf; i++) {
+ if (val & BIT_ULL(1 + i))
+ continue;
+ err = -EBUSY;
+ dev_err(mcs->dev, "MCS:%d didn't respond to X2P calibration\n", i);
+ }
+ /* Clear X2P calibrate */
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, mcs_reg_read(mcs, MCSX_MIL_GLOBAL) & ~BIT_ULL(5));
+
+ return err;
+}
+
+static void mcs_set_external_bypass(struct mcs *mcs, bool bypass)
+{
+ u64 val;
+
+ /* Set MCS to external bypass */
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+ if (bypass)
+ val |= BIT_ULL(6);
+ else
+ val &= ~BIT_ULL(6);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+ mcs->bypass = bypass;
+}
+
+static void mcs_global_cfg(struct mcs *mcs)
+{
+ /* Disable external bypass */
+ mcs_set_external_bypass(mcs, false);
+
+ /* Reset TX/RX stats memory */
+ mcs_reg_write(mcs, MCSX_CSE_RX_SLAVE_STATS_CLEAR, 0x1F);
+ mcs_reg_write(mcs, MCSX_CSE_TX_SLAVE_STATS_CLEAR, 0x1F);
+
+ /* Set MCS to perform standard IEEE802.1AE macsec processing */
+ if (mcs->hw->mcs_blks == 1) {
+ mcs_reg_write(mcs, MCSX_IP_MODE, BIT_ULL(3));
+ return;
+ }
+
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_ENTRY, 0xe4);
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_LEN, 4);
+}
+
+void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+{
+ struct hwinfo *hw = mcs->hw;
+
+ hw->tcam_entries = 128; /* TCAM entries */
+ hw->secy_entries = 128; /* SecY entries */
+ hw->sc_entries = 128; /* SC CAM entries */
+ hw->sa_entries = 256; /* SA entries */
+ hw->lmac_cnt = 20; /* lmacs/ports per mcs block */
+ hw->mcs_x2p_intf = 5; /* x2p clabration intf */
+ hw->mcs_blks = 1; /* MCS blocks */
+ hw->ip_vec = MCS_CN10KB_INT_VEC_IP; /* IP vector */
+}
+
+static struct mcs_ops cn10kb_mcs_ops = {
+ .mcs_set_hw_capabilities = cn10kb_mcs_set_hw_capabilities,
+ .mcs_parser_cfg = cn10kb_mcs_parser_cfg,
+ .mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write,
+ .mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write,
+ .mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map,
+ .mcs_bbe_intr_handler = cn10kb_mcs_bbe_intr_handler,
+ .mcs_pab_intr_handler = cn10kb_mcs_pab_intr_handler,
+};
+
+static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ int lmac, err = 0;
+ struct mcs *mcs;
+
+ mcs = devm_kzalloc(dev, sizeof(*mcs), GFP_KERNEL);
+ if (!mcs)
+ return -ENOMEM;
+
+ mcs->hw = devm_kzalloc(dev, sizeof(struct hwinfo), GFP_KERNEL);
+ if (!mcs->hw)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto exit;
+ }
+
+ mcs->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!mcs->reg_base) {
+ dev_err(dev, "mcs: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ pci_set_drvdata(pdev, mcs);
+ mcs->pdev = pdev;
+ mcs->dev = &pdev->dev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
+ mcs->mcs_ops = &cn10kb_mcs_ops;
+ else
+ mcs->mcs_ops = cnf10kb_get_mac_ops();
+
+ /* Set hardware capabilities */
+ mcs->mcs_ops->mcs_set_hw_capabilities(mcs);
+
+ mcs_global_cfg(mcs);
+
+ /* Perform X2P clibration */
+ err = mcs_x2p_calibration(mcs);
+ if (err)
+ goto err_x2p;
+
+ mcs->mcs_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+ & MCS_ID_MASK;
+
+ /* Set mcs tx side resources */
+ err = mcs_alloc_struct_mem(mcs, &mcs->tx);
+ if (err)
+ goto err_x2p;
+
+ /* Set mcs rx side resources */
+ err = mcs_alloc_struct_mem(mcs, &mcs->rx);
+ if (err)
+ goto err_x2p;
+
+ /* per port config */
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
+ mcs_lmac_init(mcs, lmac);
+
+ /* Parser configuration */
+ mcs->mcs_ops->mcs_parser_cfg(mcs);
+
+ err = mcs_register_interrupts(mcs);
+ if (err)
+ goto exit;
+
+ list_add(&mcs->mcs_list, &mcs_list);
+ mutex_init(&mcs->stats_lock);
+
+ return 0;
+
+err_x2p:
+ /* Enable external bypass */
+ mcs_set_external_bypass(mcs, true);
+exit:
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void mcs_remove(struct pci_dev *pdev)
+{
+ struct mcs *mcs = pci_get_drvdata(pdev);
+
+ /* Set MCS to external bypass */
+ mcs_set_external_bypass(mcs, true);
+ free_irq(pci_irq_vector(pdev, mcs->hw->ip_vec), mcs);
+ pci_free_irq_vectors(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+struct pci_driver mcs_driver = {
+ .name = DRV_NAME,
+ .id_table = mcs_id_table,
+ .probe = mcs_probe,
+ .remove = mcs_remove,
+};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
new file mode 100644
index 000000000..f927cc61d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CN10K MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#ifndef MCS_H
+#define MCS_H
+
+#include <linux/bits.h>
+#include "rvu.h"
+
+#define PCI_DEVID_CN10K_MCS 0xA096
+
+#define MCSX_LINK_LMAC_RANGE_MASK GENMASK_ULL(19, 16)
+#define MCSX_LINK_LMAC_BASE_MASK GENMASK_ULL(11, 0)
+
+#define MCS_ID_MASK 0x7
+#define MCS_MAX_PFS 128
+
+#define MCS_PORT_MODE_MASK 0x3
+#define MCS_PORT_FIFO_SKID_MASK 0x3F
+#define MCS_MAX_CUSTOM_TAGS 0x8
+
+#define MCS_CTRLPKT_ETYPE_RULE_MAX 8
+#define MCS_CTRLPKT_DA_RULE_MAX 8
+#define MCS_CTRLPKT_DA_RANGE_RULE_MAX 4
+#define MCS_CTRLPKT_COMBO_RULE_MAX 4
+#define MCS_CTRLPKT_MAC_RULE_MAX 1
+
+#define MCS_MAX_CTRLPKT_RULES (MCS_CTRLPKT_ETYPE_RULE_MAX + \
+ MCS_CTRLPKT_DA_RULE_MAX + \
+ MCS_CTRLPKT_DA_RANGE_RULE_MAX + \
+ MCS_CTRLPKT_COMBO_RULE_MAX + \
+ MCS_CTRLPKT_MAC_RULE_MAX)
+
+#define MCS_CTRLPKT_ETYPE_RULE_OFFSET 0
+#define MCS_CTRLPKT_DA_RULE_OFFSET 8
+#define MCS_CTRLPKT_DA_RANGE_RULE_OFFSET 16
+#define MCS_CTRLPKT_COMBO_RULE_OFFSET 20
+#define MCS_CTRLPKT_MAC_EN_RULE_OFFSET 24
+
+/* Reserved resources for default bypass entry */
+#define MCS_RSRC_RSVD_CNT 1
+
+/* MCS Interrupt Vector */
+#define MCS_CNF10KB_INT_VEC_IP 0x13
+#define MCS_CN10KB_INT_VEC_IP 0x53
+
+#define MCS_MAX_BBE_INT 8ULL
+#define MCS_BBE_INT_MASK 0xFFULL
+
+#define MCS_MAX_PAB_INT 8ULL
+#define MCS_PAB_INT_MASK 0xFULL
+
+#define MCS_BBE_RX_INT_ENA BIT_ULL(0)
+#define MCS_BBE_TX_INT_ENA BIT_ULL(1)
+#define MCS_CPM_RX_INT_ENA BIT_ULL(2)
+#define MCS_CPM_TX_INT_ENA BIT_ULL(3)
+#define MCS_PAB_RX_INT_ENA BIT_ULL(4)
+#define MCS_PAB_TX_INT_ENA BIT_ULL(5)
+
+#define MCS_CPM_TX_INT_PACKET_XPN_EQ0 BIT_ULL(0)
+#define MCS_CPM_TX_INT_PN_THRESH_REACHED BIT_ULL(1)
+#define MCS_CPM_TX_INT_SA_NOT_VALID BIT_ULL(2)
+
+#define MCS_CPM_RX_INT_SECTAG_V_EQ1 BIT_ULL(0)
+#define MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 BIT_ULL(1)
+#define MCS_CPM_RX_INT_SL_GTE48 BIT_ULL(2)
+#define MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 BIT_ULL(3)
+#define MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 BIT_ULL(4)
+#define MCS_CPM_RX_INT_PACKET_XPN_EQ0 BIT_ULL(5)
+#define MCS_CPM_RX_INT_PN_THRESH_REACHED BIT_ULL(6)
+
+#define MCS_CPM_RX_INT_ALL (MCS_CPM_RX_INT_SECTAG_V_EQ1 | \
+ MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 | \
+ MCS_CPM_RX_INT_SL_GTE48 | \
+ MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 | \
+ MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 | \
+ MCS_CPM_RX_INT_PACKET_XPN_EQ0 | \
+ MCS_CPM_RX_INT_PN_THRESH_REACHED)
+
+struct mcs_pfvf {
+ u64 intr_mask; /* Enabled Interrupt mask */
+};
+
+struct mcs_intr_event {
+ u16 pcifunc;
+ u64 intr_mask;
+ u64 sa_id;
+ u8 mcs_id;
+ u8 lmac_id;
+};
+
+struct mcs_intrq_entry {
+ struct list_head node;
+ struct mcs_intr_event intr_event;
+};
+
+struct secy_mem_map {
+ u8 flow_id;
+ u8 secy;
+ u8 ctrl_pkt;
+ u8 sc;
+ u64 sci;
+};
+
+struct mcs_rsrc_map {
+ u16 *flowid2pf_map;
+ u16 *secy2pf_map;
+ u16 *sc2pf_map;
+ u16 *sa2pf_map;
+ u16 *flowid2secy_map; /* bitmap flowid mapped to secy*/
+ u16 *ctrlpktrule2pf_map;
+ struct rsrc_bmap flow_ids;
+ struct rsrc_bmap secy;
+ struct rsrc_bmap sc;
+ struct rsrc_bmap sa;
+ struct rsrc_bmap ctrlpktrule;
+};
+
+struct hwinfo {
+ u8 tcam_entries;
+ u8 secy_entries;
+ u8 sc_entries;
+ u16 sa_entries;
+ u8 mcs_x2p_intf;
+ u8 lmac_cnt;
+ u8 mcs_blks;
+ unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */
+ u16 ip_vec;
+};
+
+struct mcs {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct hwinfo *hw;
+ struct mcs_rsrc_map tx;
+ struct mcs_rsrc_map rx;
+ u16 pf_map[MCS_MAX_PFS]; /* List of PCIFUNC mapped to MCS */
+ u8 mcs_id;
+ struct mcs_ops *mcs_ops;
+ struct list_head mcs_list;
+ /* Lock for mcs stats */
+ struct mutex stats_lock;
+ struct mcs_pfvf *pf;
+ struct mcs_pfvf *vf;
+ u16 num_vec;
+ void *rvu;
+ u16 *tx_sa_active;
+ bool bypass;
+};
+
+struct mcs_ops {
+ void (*mcs_set_hw_capabilities)(struct mcs *mcs);
+ void (*mcs_parser_cfg)(struct mcs *mcs);
+ void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+ void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+ void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
+ void (*mcs_bbe_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+ void (*mcs_pab_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+};
+
+extern struct pci_driver mcs_driver;
+
+static inline void mcs_reg_write(struct mcs *mcs, u64 offset, u64 val)
+{
+ writeq(val, mcs->reg_base + offset);
+}
+
+static inline u64 mcs_reg_read(struct mcs *mcs, u64 offset)
+{
+ return readq(mcs->reg_base + offset);
+}
+
+/* MCS APIs */
+struct mcs *mcs_get_pdata(int mcs_id);
+int mcs_get_blkcnt(void);
+int mcs_set_lmac_channels(int mcs_id, u16 base);
+int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc);
+int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc);
+int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flowid, u8 *secy_id,
+ u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir);
+int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc);
+void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir);
+void mcs_ena_dis_flowid_entry(struct mcs *mcs, int id, int dir, int ena);
+void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int id, int ena);
+void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int id, int dir);
+void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int id, int dir);
+void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id);
+void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa, int dir);
+void mcs_map_sc_to_sa(struct mcs *mcs, u64 *sa_map, int sc, int dir);
+void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir);
+void mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn);
+int mcs_install_flowid_bypass_entry(struct mcs *mcs);
+void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode);
+void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset);
+void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req);
+void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp);
+void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp);
+int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
+int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
+int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
+bool is_mcs_bypass(int mcs_id);
+
+/* CN10K-B APIs */
+void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void cn10kb_mcs_parser_cfg(struct mcs *mcs);
+void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+
+/* CNF10K-B APIs */
+struct mcs_ops *cnf10kb_get_mac_ops(void);
+void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
+void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
+void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
+void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+
+/* Stats APIs */
+void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
+void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir);
+void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats, int id, int dir);
+void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats, int id, int dir);
+void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
+void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
+void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir);
+int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir);
+int mcs_set_force_clk_en(struct mcs *mcs, bool set);
+
+int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event);
+
+#endif /* MCS_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
new file mode 100644
index 000000000..9f9b904ab
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include "mcs.h"
+#include "mcs_reg.h"
+
+static struct mcs_ops cnf10kb_mcs_ops = {
+ .mcs_set_hw_capabilities = cnf10kb_mcs_set_hw_capabilities,
+ .mcs_parser_cfg = cnf10kb_mcs_parser_cfg,
+ .mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write,
+ .mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write,
+ .mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map,
+ .mcs_bbe_intr_handler = cnf10kb_mcs_bbe_intr_handler,
+ .mcs_pab_intr_handler = cnf10kb_mcs_pab_intr_handler,
+};
+
+struct mcs_ops *cnf10kb_get_mac_ops(void)
+{
+ return &cnf10kb_mcs_ops;
+}
+
+void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+{
+ struct hwinfo *hw = mcs->hw;
+
+ hw->tcam_entries = 64; /* TCAM entries */
+ hw->secy_entries = 64; /* SecY entries */
+ hw->sc_entries = 64; /* SC CAM entries */
+ hw->sa_entries = 128; /* SA entries */
+ hw->lmac_cnt = 4; /* lmacs/ports per mcs block */
+ hw->mcs_x2p_intf = 1; /* x2p clabration intf */
+ hw->mcs_blks = 7; /* MCS blocks */
+ hw->ip_vec = MCS_CNF10KB_INT_VEC_IP; /* IP vector */
+}
+
+void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
+{
+ u64 reg, val;
+
+ /* VLAN Ctag */
+ val = (0x8100ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(22);
+
+ reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* VLAN STag */
+ val = (0x88a8ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(23);
+
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* Enable custom tage 0 and 1 and sectag */
+ val = BIT_ULL(0) | BIT_ULL(1) | BIT_ULL(12);
+
+ reg = MCSX_PEX_RX_SLAVE_ETYPE_ENABLE;
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
+{
+ u64 reg, val;
+
+ val = (map->secy & 0x3F) | (map->ctrl_pkt & 0x1) << 6;
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
+ } else {
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
+ mcs_reg_write(mcs, reg, map->sci);
+ val |= (map->sc & 0x3F) << 7;
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(map->flow_id);
+ }
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
+{
+ u64 reg, val;
+
+ val = (map->sa_index0 & 0x7F) | (map->sa_index1 & 0x7F) << 7;
+
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0;
+ val = mcs_reg_read(mcs, reg);
+
+ if (map->rekey_ena)
+ val |= BIT_ULL(map->sc_id);
+ else
+ val &= ~BIT_ULL(map->sc_id);
+
+ mcs_reg_write(mcs, reg, val);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(map->sc_id), map->sa_index0_vld);
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(map->sc_id), map->sa_index1_vld);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(map->sc_id), map->tx_sa_active);
+}
+
+void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
+{
+ u64 val, reg;
+
+ val = (map->sa_index & 0x7F) | (map->sa_in_use << 7);
+
+ reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
+ mcs_reg_write(mcs, reg, val);
+}
+
+int mcs_set_force_clk_en(struct mcs *mcs, bool set)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(2000);
+ u64 val;
+
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+
+ if (set) {
+ val |= BIT_ULL(4);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+
+ /* Poll till mcsx_mil_ip_gbl_status.mcs_ip_stats_ready value is 1 */
+ while (!(mcs_reg_read(mcs, MCSX_MIL_IP_GBL_STATUS) & BIT_ULL(0))) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(mcs->dev, "MCS set force clk enable failed\n");
+ break;
+ }
+ }
+ } else {
+ val &= ~BIT_ULL(4);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+ }
+
+ return 0;
+}
+
+/* TX SA interrupt is raised only if autorekey is enabled.
+ * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
+ * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
+ * SA in SA_index1 got expired else SA in SA_index0 got expired.
+ */
+void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event;
+ struct rsrc_bmap *sc_bmap;
+ unsigned long rekey_ena;
+ u64 val, sa_status;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
+
+ rekey_ena = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0);
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ /* Auto rekey is enable */
+ if (!test_bit(sc, &rekey_ena))
+ continue;
+ sa_status = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(sc));
+ /* Check if tx_sa_active status had changed */
+ if (sa_status == mcs->tx_sa_active[sc])
+ continue;
+
+ /* SA_index0 is expired */
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+ if (sa_status)
+ event.sa_id = val & 0x7F;
+ else
+ event.sa_id = (val >> 7) & 0x7F;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+
+ if (mcs->tx_sa_active[sc])
+ /* SA_index1 was used and got expired */
+ event.sa_id = (val >> 7) & 0x7F;
+ else
+ /* SA_index0 was used and got expired */
+ event.sa_id = val & 0x7F;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
+ enum mcs_direction dir)
+{
+ struct mcs_intr_event event = { 0 };
+ int i;
+
+ if (!(intr & MCS_BBE_INT_MASK))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ for (i = 0; i < MCS_MAX_BBE_INT; i++) {
+ if (!(intr & BIT_ULL(i)))
+ continue;
+
+ /* Lower nibble denotes data fifo overflow interrupts and
+ * upper nibble indicates policy fifo overflow interrupts.
+ */
+ if (intr & 0xFULL)
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_BBE_RX_DFIFO_OVERFLOW_INT :
+ MCS_BBE_TX_DFIFO_OVERFLOW_INT;
+ else
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
+ MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
+
+ /* Notify the lmac_id info which ran into BBE fatal error */
+ event.lmac_id = i & 0x3ULL;
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
+ enum mcs_direction dir)
+{
+ struct mcs_intr_event event = { 0 };
+ int i;
+
+ if (!(intr & MCS_PAB_INT_MASK))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ for (i = 0; i < MCS_MAX_PAB_INT; i++) {
+ if (!(intr & BIT_ULL(i)))
+ continue;
+
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_PAB_RX_CHAN_OVERFLOW_INT :
+ MCS_PAB_TX_CHAN_OVERFLOW_INT;
+
+ /* Notify the lmac_id info which ran into PAB fatal error */
+ event.lmac_id = i;
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
new file mode 100644
index 000000000..f4c6de890
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
@@ -0,0 +1,1129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#ifndef MCS_REG_H
+#define MCS_REG_H
+
+#include <linux/bits.h>
+
+/* Registers */
+#define MCSX_IP_MODE 0x900c8ull
+#define MCSX_MCS_TOP_SLAVE_PORT_RESET(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x408ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa28ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+
+#define MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x808ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa68ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_MIL_GLOBAL ({ \
+ u64 offset; \
+ \
+ offset = 0x80000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60000ull; \
+ offset; })
+
+#define MCSX_MIL_RX_LMACX_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x900a8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x700a8ull; \
+ offset += (a) * 0x800ull; \
+ offset; })
+
+#define MCSX_HIL_GLOBAL ({ \
+ u64 offset; \
+ \
+ offset = 0xc0000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa0000ull; \
+ offset; })
+
+#define MCSX_LINK_LMACX_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x90000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x70000ull; \
+ offset += (a) * 0x800ull; \
+ offset; })
+
+#define MCSX_MIL_RX_GBL_STATUS ({ \
+ u64 offset; \
+ \
+ offset = 0x800c8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x600c8ull; \
+ offset; })
+
+#define MCSX_MIL_IP_GBL_STATUS ({ \
+ u64 offset; \
+ \
+ offset = 0x800d0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x600d0ull; \
+ offset; })
+
+/* PAB */
+#define MCSX_PAB_RX_SLAVE_PORT_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1718ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x280ull; \
+ offset += (a) * 0x40ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PORT_CFGX(a) (0x2930ull + (a) * 0x40ull)
+
+/* PEX registers */
+#define MCSX_PEX_RX_SLAVE_VLAN_CFGX(a) (0x3b58ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_VLAN_CFGX(a) (0x46f8ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(a) (0x788ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_PORT_CONFIG(a) (0x4738ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_PORT_CFGX(a) (0x3b98ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3fc0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x558ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x598ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4048ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5e0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4080ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x648ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4088ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x650ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4090ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x658ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_MAC ({ \
+ u64 offset; \
+ \
+ offset = 0x40e0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6d8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_ENABLE ({ \
+ u64 offset; \
+ \
+ offset = 0x40e8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6e0ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4b60ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x7d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4ba0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4be0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x858ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4be8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x860ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c20ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8c8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c28ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8d0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c30ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_MAC ({ \
+ u64 offset; \
+ \
+ offset = 0x4c80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x958ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_ENABLE ({ \
+ u64 offset; \
+ \
+ offset = 0x4c88ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x960ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION ({ \
+ u64 offset; \
+ \
+ offset = 0x3b50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4c0ull; \
+ offset; })
+
+/* CNF10K-B */
+#define MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(a) (0x4c8ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(a) (0x748ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_ETYPE_ENABLE 0x6e8ull
+#define MCSX_PEX_TX_SLAVE_ETYPE_ENABLE 0x968ull
+
+/* BEE */
+#define MCSX_BBE_RX_SLAVE_PADDING_CTL 0xe08ull
+#define MCSX_BBE_TX_SLAVE_PADDING_CTL 0x12f8ull
+#define MCSX_BBE_RX_SLAVE_CAL_ENTRY 0x180ull
+#define MCSX_BBE_RX_SLAVE_CAL_LEN 0x188ull
+#define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a) (0x290ull + (a) * 0x40ull)
+#define MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 0xe20
+#define MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0 0x1298
+#define MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 0xe40
+#define MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0 0x12b8
+#define MCSX_BBE_RX_SLAVE_BBE_INT ({ \
+ u64 offset; \
+ \
+ offset = 0xe00ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x160ull; \
+ offset; })
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0xe08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x168ull; \
+ offset; })
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0xe08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x178ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x1278ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1e0ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x1278ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1f8ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x1280ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1e8ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x16f0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x260ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x268ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x278ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x2908ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x380ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x2910ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x388ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x398ull; \
+ offset; })
+
+/* CPM registers */
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x30740ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x3bf8ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x34740ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x43f8ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0 ({ \
+ u64 offset; \
+ \
+ offset = 0x30700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x3bd8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SC_CAMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x38780ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4c08ull; \
+ offset += (a) * 0x8ull + (b) * 0x10ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SC_CAM_ENA(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x38740ull + (a) * 0x8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4bf8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23ee0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbd0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = (0x246e0ull + (a) * 0x10ull); \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = (0xdd0ull + (a) * 0x8ull); \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_KEY_LOCKOUTX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23E90ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbb0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_MAP_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x256e0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfd0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x27700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x17d8ull; \
+ offset += (a) * 0x8ull + (b) * 0x40ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x2f700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x37d8; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_XPN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x23e40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xb90ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_PN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x23e48ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xb98ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23e50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xba0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1 0x30708ull
+#define MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(a) (0x246e8ull + (a) * 0x10ull)
+
+/* TX registers */
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x51d50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa7c0ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x55d50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xafc0ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0 ({ \
+ u64 offset; \
+ \
+ offset = 0x51d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa7a0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3e508ull + (a) * 0x8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5550ull + (a) * 0x10ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3ed08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5950ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_KEY_LOCKOUTX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4c0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5538ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3fd10ull + (a) * 0x10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6150ull + (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x40d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x63a0ull; \
+ offset += (a) * 0x8ull + (b) * 0x80ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x50d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa3a0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_XPN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4b0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5528ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_PN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5530ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(a) (0x3fd18ull + (a) * 0x10ull)
+#define MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(a) (0x5558ull + (a) * 0x10ull)
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1 0x51d18ull
+#define MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(a) (0x5b50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(a) (0x5d50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(a) (0x5f50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0 0x5500ull
+
+/* CSE */
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x9e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc218ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x9680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc018ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x6e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x8e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbe18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x8680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xca18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x7e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x6680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x7680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc618ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x5e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xdc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(a)({ \
+ u64 offset; \
+ \
+ offset = 0x5680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xda18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xd680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xce18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16a80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec38ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16880ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xfe80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xde18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x10680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x10e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe218ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xae80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xc680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd618ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xce80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xbe80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xcc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_SLAVE_CTRL ({ \
+ u64 offset; \
+ \
+ offset = 0x52a0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x9c0ull; \
+ offset; })
+
+#define MCSX_CSE_RX_SLAVE_STATS_CLEAR ({ \
+ u64 offset; \
+ \
+ offset = 0x52b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x9d8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xee80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xa680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd018ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xf680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe018ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(a) (0xe680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(a) (0xde80ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(a) (0xd218 + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(a) (0xb680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(a) (0x12680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(a) (0x15680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(a) (0x13680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(a) (0x11680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(a) (0x14680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(a) (0xec58ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(a) (0xea18ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(a) (0xe618ull + (a) * 0x8ull)
+
+/* CSE TX */
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCOMMONOCTETSX(a) (0x18440ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1c440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf478ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1bc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf278ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x19440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xee78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1b440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf078ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1ac40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfc78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1a440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfa78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x18c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf678ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x19c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf878ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x17c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10878ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x17440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10678ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSCTRLPORTDISABLEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1e440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfe78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23240ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10ed8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x22c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10e98ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x22e40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10e78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x20440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10c78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1fc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10a78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x110d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1dc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10278ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1d440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10478ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1cc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10078ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_SLAVE_CTRL ({ \
+ u64 offset; \
+ \
+ offset = 0x54a0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa00ull; \
+ offset; })
+
+#define MCSX_CSE_TX_SLAVE_STATS_CLEAR ({ \
+ u64 offset; \
+ \
+ offset = 0x54b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa18ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(a) (0x1f440ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(a) (0x1ec40ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSEARLYPREEMPTERRX(a) (0x10eb8ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(a) (0x21c40ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(a) (0x20c40ull + (a) * 0x8ull)
+
+#define MCSX_IP_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x80028ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60028ull; \
+ offset; })
+
+#define MCSX_IP_INT_ENA_W1S ({ \
+ u64 offset; \
+ \
+ offset = 0x80040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60040ull; \
+ offset; })
+
+#define MCSX_IP_INT_ENA_W1C ({ \
+ u64 offset; \
+ \
+ offset = 0x80038ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60038ull; \
+ offset; })
+
+#define MCSX_TOP_SLAVE_INT_SUM ({ \
+ u64 offset; \
+ \
+ offset = 0xc20ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xab8ull; \
+ offset; })
+
+#define MCSX_TOP_SLAVE_INT_SUM_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0xc28ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xac0ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_RX_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x23c00ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x0ad8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_RX_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x23c08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xae0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_TX_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x3d490ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x54a0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_TX_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x3d498ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x54a8ull; \
+ offset; })
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
new file mode 100644
index 000000000..dfd23580e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -0,0 +1,926 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mcs.h"
+#include "rvu.h"
+#include "mcs_reg.h"
+#include "lmac_common.h"
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_UP_MCS_MESSAGES
+#undef M
+
+void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena)
+{
+ struct mcs *mcs;
+ u64 cfg;
+ u8 port;
+
+ if (!rvu->mcs_blk_cnt)
+ return;
+
+ /* When ptp is enabled, RPM appends 8B header for all
+ * RX packets. MCS PEX need to configure to skip 8B
+ * during packet parsing.
+ */
+
+ /* CNF10K-B */
+ if (rvu->mcs_blk_cnt > 1) {
+ mcs = mcs_get_pdata(rpm_id);
+ cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
+ if (ena)
+ cfg |= BIT_ULL(lmac_id);
+ else
+ cfg &= ~BIT_ULL(lmac_id);
+ mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, cfg);
+ return;
+ }
+ /* CN10KB */
+ mcs = mcs_get_pdata(0);
+ port = (rpm_id * rvu->hw->lmac_per_cgx) + lmac_id;
+ cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port));
+ if (ena)
+ cfg |= BIT_ULL(0);
+ else
+ cfg &= ~BIT_ULL(0);
+ mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port), cfg);
+}
+
+int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
+ struct mcs_set_lmac_mode *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (BIT_ULL(req->lmac_id) & mcs->hw->lmac_bmap)
+ mcs_set_lmac_mode(mcs, req->lmac_id, req->mode);
+
+ return 0;
+}
+
+int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
+{
+ struct mcs_intrq_entry *qentry;
+ u16 pcifunc = event->pcifunc;
+ struct rvu *rvu = mcs->rvu;
+ struct mcs_pfvf *pfvf;
+
+ /* Check if it is PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+
+ event->intr_mask &= pfvf->intr_mask;
+
+ /* Check PF/VF interrupt notification is enabled */
+ if (!(pfvf->intr_mask && event->intr_mask))
+ return 0;
+
+ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+ if (!qentry)
+ return -ENOMEM;
+
+ qentry->intr_event = *event;
+ spin_lock(&rvu->mcs_intrq_lock);
+ list_add_tail(&qentry->node, &rvu->mcs_intrq_head);
+ spin_unlock(&rvu->mcs_intrq_lock);
+ queue_work(rvu->mcs_intr_wq, &rvu->mcs_intr_work);
+
+ return 0;
+}
+
+static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
+{
+ struct mcs_intr_info *req;
+ int err, pf;
+
+ pf = rvu_get_pf(event->pcifunc);
+
+ req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
+ if (!req)
+ return -ENOMEM;
+
+ req->mcs_id = event->mcs_id;
+ req->intr_mask = event->intr_mask;
+ req->sa_id = event->sa_id;
+ req->hdr.pcifunc = event->pcifunc;
+ req->lmac_id = event->lmac_id;
+
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
+ err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+ if (err)
+ dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
+
+ return 0;
+}
+
+static void mcs_intr_handler_task(struct work_struct *work)
+{
+ struct rvu *rvu = container_of(work, struct rvu, mcs_intr_work);
+ struct mcs_intrq_entry *qentry;
+ struct mcs_intr_event *event;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&rvu->mcs_intrq_lock, flags);
+ qentry = list_first_entry_or_null(&rvu->mcs_intrq_head,
+ struct mcs_intrq_entry,
+ node);
+ if (qentry)
+ list_del(&qentry->node);
+
+ spin_unlock_irqrestore(&rvu->mcs_intrq_lock, flags);
+ if (!qentry)
+ break; /* nothing more to process */
+
+ event = &qentry->intr_event;
+
+ mcs_notify_pfvf(event, rvu);
+ kfree(qentry);
+ } while (1);
+}
+
+int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu,
+ struct mcs_intr_cfg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_pfvf *pfvf;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* Check if it is PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+
+ mcs->pf_map[0] = pcifunc;
+ pfvf->intr_mask = req->intr_mask;
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_hw_info(struct rvu *rvu,
+ struct msg_req *req,
+ struct mcs_hw_info *rsp)
+{
+ struct mcs *mcs;
+
+ if (!rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_NOT_MAPPED;
+
+ /* MCS resources are same across all blocks */
+ mcs = mcs_get_pdata(0);
+ rsp->num_mcs_blks = rvu->mcs_blk_cnt;
+ rsp->tcam_entries = mcs->hw->tcam_entries;
+ rsp->secy_entries = mcs->hw->secy_entries;
+ rsp->sc_entries = mcs->hw->sc_entries;
+ rsp->sa_entries = mcs->hw->sa_entries;
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_reset(struct rvu *rvu, struct mcs_port_reset_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_reset_port(mcs, req->port_id, req->reset);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_clear_stats(struct rvu *rvu,
+ struct mcs_clear_stats *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mutex_lock(&mcs->stats_lock);
+ if (req->all)
+ mcs_clear_all_stats(mcs, pcifunc, req->dir);
+ else
+ mcs_clear_stats(mcs, req->type, req->id, req->dir);
+
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_flowid_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_flowid_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* In CNF10K-B, before reading the statistics,
+ * MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP needs to be set
+ * to get accurate statistics
+ */
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_flowid_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ /* Clear MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP after reading
+ * the statistics
+ */
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_secy_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_secy_stats *rsp)
+{ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+
+ if (req->dir == MCS_RX)
+ mcs_get_rx_secy_stats(mcs, rsp, req->id);
+ else
+ mcs_get_tx_secy_stats(mcs, rsp, req->id);
+
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_sc_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_sc_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_sc_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_sa_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_sa_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_sa_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_port_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_port_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_port_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_set_active_lmac(struct rvu *rvu,
+ struct mcs_set_active_lmac *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ if (!mcs)
+ return MCS_AF_ERR_NOT_MAPPED;
+
+ mcs->hw->lmac_bmap = req->lmac_bmap;
+ mcs_set_lmac_channels(req->mcs_id, req->chan_base);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_cfg_set(struct rvu *rvu, struct mcs_port_cfg_set_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
+ return -EINVAL;
+
+ mcs_set_port_cfg(mcs, req);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_cfg_get(struct rvu *rvu, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
+ return -EINVAL;
+
+ mcs_get_port_cfg(mcs, req, rsp);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu *rvu, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_get_custom_tag_cfg(mcs, req, rsp);
+
+ return 0;
+}
+
+int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc)
+{
+ struct mcs *mcs;
+ int mcs_id;
+
+ /* CNF10K-B mcs0-6 are mapped to RPM2-8*/
+ if (rvu->mcs_blk_cnt > 1) {
+ for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
+ mcs = mcs_get_pdata(mcs_id);
+ mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
+ mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
+ }
+ } else {
+ /* CN10K-B has only one mcs block */
+ mcs = mcs_get_pdata(0);
+ mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
+ mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu *rvu,
+ struct mcs_flowid_ena_dis_entry *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_ena_dis_flowid_entry(mcs, req->flow_id, req->dir, req->ena);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_pn_table_write(struct rvu *rvu,
+ struct mcs_pn_table_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_pn_table_write(mcs, req->pn_id, req->next_pn, req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_set_pn_threshold(struct rvu *rvu,
+ struct mcs_set_pn_threshold *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_pn_threshold_set(mcs, req);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu *rvu,
+ struct mcs_rx_sc_sa_map *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs->mcs_ops->mcs_rx_sa_mem_map_write(mcs, req);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu *rvu,
+ struct mcs_tx_sc_sa_map *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs->mcs_ops->mcs_tx_sa_mem_map_write(mcs, req);
+ mcs->tx_sa_active[req->sc_id] = req->tx_sa_active;
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_sa_plcy_write(struct rvu *rvu,
+ struct mcs_sa_plcy_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int i;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ for (i = 0; i < req->sa_cnt; i++)
+ mcs_sa_plcy_write(mcs, &req->plcy[i][0],
+ req->sa_index[i], req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu *rvu,
+ struct mcs_rx_sc_cam_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_rx_sc_cam_write(mcs, req->sci, req->secy_id, req->sc_id);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_secy_plcy_write(struct rvu *rvu,
+ struct mcs_secy_plcy_write_req *req,
+ struct msg_rsp *rsp)
+{ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_secy_plcy_write(mcs, req->plcy,
+ req->secy_id, req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_flowid_entry_write(struct rvu *rvu,
+ struct mcs_flowid_entry_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct secy_mem_map map;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* TODO validate the flowid */
+ mcs_flowid_entry_write(mcs, req->data, req->mask,
+ req->flow_id, req->dir);
+ map.secy = req->secy_id;
+ map.sc = req->sc_id;
+ map.ctrl_pkt = req->ctrl_pkt;
+ map.flow_id = req->flow_id;
+ map.sci = req->sci;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, req->dir);
+ if (req->ena)
+ mcs_ena_dis_flowid_entry(mcs, req->flow_id,
+ req->dir, true);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu,
+ struct mcs_free_rsrc_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rc = 0;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (req->dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free all the cam resources mapped to PF/VF */
+ if (req->all) {
+ rc = mcs_free_all_rsrc(mcs, req->dir, pcifunc);
+ goto exit;
+ }
+
+ switch (req->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ rc = mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, req->rsrc_id, pcifunc);
+ mcs_ena_dis_flowid_entry(mcs, req->rsrc_id, req->dir, false);
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ rc = mcs_free_rsrc(&map->secy, map->secy2pf_map, req->rsrc_id, pcifunc);
+ mcs_clear_secy_plcy(mcs, req->rsrc_id, req->dir);
+ break;
+ case MCS_RSRC_TYPE_SC:
+ rc = mcs_free_rsrc(&map->sc, map->sc2pf_map, req->rsrc_id, pcifunc);
+ /* Disable SC CAM only on RX side */
+ if (req->dir == MCS_RX)
+ mcs_ena_dis_sc_cam_entry(mcs, req->rsrc_id, false);
+ break;
+ case MCS_RSRC_TYPE_SA:
+ rc = mcs_free_rsrc(&map->sa, map->sa2pf_map, req->rsrc_id, pcifunc);
+ break;
+ }
+exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return rc;
+}
+
+int rvu_mbox_handler_mcs_alloc_resources(struct rvu *rvu,
+ struct mcs_alloc_rsrc_req *req,
+ struct mcs_alloc_rsrc_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rsrc_id, i;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (req->dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ if (req->all) {
+ rsrc_id = mcs_alloc_all_rsrc(mcs, &rsp->flow_ids[0],
+ &rsp->secy_ids[0],
+ &rsp->sc_ids[0],
+ &rsp->sa_ids[0],
+ &rsp->sa_ids[1],
+ pcifunc, req->dir);
+ goto exit;
+ }
+
+ switch (req->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->flow_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->secy_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SC:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->sc_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SA:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->sa_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ }
+
+ rsp->rsrc_type = req->rsrc_type;
+ rsp->dir = req->dir;
+ rsp->mcs_id = req->mcs_id;
+ rsp->all = req->all;
+
+exit:
+ if (rsrc_id < 0)
+ dev_err(rvu->dev, "Failed to allocate the mcs resources for PCIFUNC:%d\n", pcifunc);
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu *rvu,
+ struct mcs_alloc_ctrl_pkt_rule_req *req,
+ struct mcs_alloc_ctrl_pkt_rule_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rsrc_id;
+ u16 offset;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ switch (req->rule_type) {
+ case MCS_CTRL_PKT_RULE_TYPE_ETH:
+ offset = MCS_CTRLPKT_ETYPE_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_DA:
+ offset = MCS_CTRLPKT_DA_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_RANGE:
+ offset = MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_COMBO:
+ offset = MCS_CTRLPKT_COMBO_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_MAC:
+ offset = MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
+ break;
+ }
+
+ rsrc_id = mcs_alloc_ctrlpktrule(&map->ctrlpktrule, map->ctrlpktrule2pf_map, offset,
+ pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+
+ rsp->rule_idx = rsrc_id;
+ rsp->rule_type = req->rule_type;
+ rsp->dir = req->dir;
+ rsp->mcs_id = req->mcs_id;
+
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+exit:
+ if (rsrc_id < 0)
+ dev_err(rvu->dev, "Failed to allocate the mcs ctrl pkt rule for PCIFUNC:%d\n",
+ pcifunc);
+ mutex_unlock(&rvu->rsrc_lock);
+ return rsrc_id;
+}
+
+int rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu *rvu,
+ struct mcs_free_ctrl_pkt_rule_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ rc = mcs_free_ctrlpktrule(mcs, req);
+
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return rc;
+}
+
+int rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu *rvu,
+ struct mcs_ctrl_pkt_rule_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ rc = mcs_ctrlpktrule_write(mcs, req);
+
+ return rc;
+}
+
+static void rvu_mcs_set_lmac_bmap(struct rvu *rvu)
+{
+ struct mcs *mcs = mcs_get_pdata(0);
+ unsigned long lmac_bmap;
+ int cgx, lmac, port;
+
+ for (port = 0; port < mcs->hw->lmac_cnt; port++) {
+ cgx = port / rvu->hw->lmac_per_cgx;
+ lmac = port % rvu->hw->lmac_per_cgx;
+ if (!is_lmac_valid(rvu_cgx_pdata(cgx, rvu), lmac))
+ continue;
+ set_bit(port, &lmac_bmap);
+ }
+ mcs->hw->lmac_bmap = lmac_bmap;
+}
+
+int rvu_mcs_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lmac, err = 0, mcs_id;
+ struct mcs *mcs;
+
+ rvu->mcs_blk_cnt = mcs_get_blkcnt();
+
+ if (!rvu->mcs_blk_cnt)
+ return 0;
+
+ /* Needed only for CN10K-B */
+ if (rvu->mcs_blk_cnt == 1) {
+ err = mcs_set_lmac_channels(0, hw->cgx_chan_base);
+ if (err)
+ return err;
+ /* Set active lmacs */
+ rvu_mcs_set_lmac_bmap(rvu);
+ }
+
+ /* Install default tcam bypass entry and set port to operational mode */
+ for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
+ mcs = mcs_get_pdata(mcs_id);
+ mcs_install_flowid_bypass_entry(mcs);
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
+ mcs_set_lmac_mode(mcs, lmac, 0);
+
+ mcs->rvu = rvu;
+
+ /* Allocated memory for PFVF data */
+ mcs->pf = devm_kcalloc(mcs->dev, hw->total_pfs,
+ sizeof(struct mcs_pfvf), GFP_KERNEL);
+ if (!mcs->pf)
+ return -ENOMEM;
+
+ mcs->vf = devm_kcalloc(mcs->dev, hw->total_vfs,
+ sizeof(struct mcs_pfvf), GFP_KERNEL);
+ if (!mcs->vf)
+ return -ENOMEM;
+ }
+
+ /* Initialize the wq for handling mcs interrupts */
+ INIT_LIST_HEAD(&rvu->mcs_intrq_head);
+ INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task);
+ rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0);
+ if (!rvu->mcs_intr_wq) {
+ dev_err(rvu->dev, "mcs alloc workqueue failed\n");
+ return -ENOMEM;
+ }
+
+ return err;
+}
+
+void rvu_mcs_exit(struct rvu *rvu)
+{
+ if (!rvu->mcs_intr_wq)
+ return;
+
+ flush_workqueue(rvu->mcs_intr_wq);
+ destroy_workqueue(rvu->mcs_intr_wq);
+ rvu->mcs_intr_wq = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
new file mode 100644
index 000000000..aaff91bc7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -0,0 +1,626 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef NPC_H
+#define NPC_H
+
+#define NPC_KEX_CHAN_MASK 0xFFFULL
+
+#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
+
+#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
+
+enum NPC_LID_E {
+ NPC_LID_LA = 0,
+ NPC_LID_LB,
+ NPC_LID_LC,
+ NPC_LID_LD,
+ NPC_LID_LE,
+ NPC_LID_LF,
+ NPC_LID_LG,
+ NPC_LID_LH,
+};
+
+#define NPC_LT_NA 0
+
+enum npc_kpu_la_ltype {
+ NPC_LT_LA_8023 = 1,
+ NPC_LT_LA_ETHER,
+ NPC_LT_LA_IH_NIX_ETHER,
+ NPC_LT_LA_HIGIG2_ETHER = 7,
+ NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_LT_LA_CPT_HDR,
+ NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_LT_LA_CUSTOM_PRE_L2_ETHER,
+ NPC_LT_LA_CUSTOM0 = 0xE,
+ NPC_LT_LA_CUSTOM1 = 0xF,
+};
+
+enum npc_kpu_lb_ltype {
+ NPC_LT_LB_ETAG = 1,
+ NPC_LT_LB_CTAG,
+ NPC_LT_LB_STAG_QINQ,
+ NPC_LT_LB_BTAG,
+ NPC_LT_LB_PPPOE,
+ NPC_LT_LB_DSA,
+ NPC_LT_LB_DSA_VLAN,
+ NPC_LT_LB_EDSA,
+ NPC_LT_LB_EDSA_VLAN,
+ NPC_LT_LB_EXDSA,
+ NPC_LT_LB_EXDSA_VLAN,
+ NPC_LT_LB_FDSA,
+ NPC_LT_LB_VLAN_EXDSA,
+ NPC_LT_LB_CUSTOM0 = 0xE,
+ NPC_LT_LB_CUSTOM1 = 0xF,
+};
+
+enum npc_kpu_lc_ltype {
+ NPC_LT_LC_IP = 1,
+ NPC_LT_LC_IP_OPT,
+ NPC_LT_LC_IP6,
+ NPC_LT_LC_IP6_EXT,
+ NPC_LT_LC_ARP,
+ NPC_LT_LC_RARP,
+ NPC_LT_LC_MPLS,
+ NPC_LT_LC_NSH,
+ NPC_LT_LC_PTP,
+ NPC_LT_LC_FCOE,
+ NPC_LT_LC_NGIO,
+ NPC_LT_LC_CUSTOM0 = 0xE,
+ NPC_LT_LC_CUSTOM1 = 0xF,
+};
+
+/* Don't modify Ltypes upto SCTP, otherwise it will
+ * effect flow tag calculation and thus RSS.
+ */
+enum npc_kpu_ld_ltype {
+ NPC_LT_LD_TCP = 1,
+ NPC_LT_LD_UDP,
+ NPC_LT_LD_ICMP,
+ NPC_LT_LD_SCTP,
+ NPC_LT_LD_ICMP6,
+ NPC_LT_LD_CUSTOM0,
+ NPC_LT_LD_CUSTOM1,
+ NPC_LT_LD_IGMP = 8,
+ NPC_LT_LD_AH,
+ NPC_LT_LD_GRE,
+ NPC_LT_LD_NVGRE,
+ NPC_LT_LD_NSH,
+ NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_LT_LD_TU_MPLS_IN_IP,
+};
+
+enum npc_kpu_le_ltype {
+ NPC_LT_LE_VXLAN = 1,
+ NPC_LT_LE_GENEVE,
+ NPC_LT_LE_ESP,
+ NPC_LT_LE_GTPU = 4,
+ NPC_LT_LE_VXLANGPE,
+ NPC_LT_LE_GTPC,
+ NPC_LT_LE_NSH,
+ NPC_LT_LE_TU_MPLS_IN_GRE,
+ NPC_LT_LE_TU_NSH_IN_GRE,
+ NPC_LT_LE_TU_MPLS_IN_UDP,
+ NPC_LT_LE_CUSTOM0 = 0xE,
+ NPC_LT_LE_CUSTOM1 = 0xF,
+};
+
+enum npc_kpu_lf_ltype {
+ NPC_LT_LF_TU_ETHER = 1,
+ NPC_LT_LF_TU_PPP,
+ NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ NPC_LT_LF_TU_MPLS_IN_NSH,
+ NPC_LT_LF_TU_3RD_NSH,
+ NPC_LT_LF_CUSTOM0 = 0xE,
+ NPC_LT_LF_CUSTOM1 = 0xF,
+};
+
+enum npc_kpu_lg_ltype {
+ NPC_LT_LG_TU_IP = 1,
+ NPC_LT_LG_TU_IP6,
+ NPC_LT_LG_TU_ARP,
+ NPC_LT_LG_TU_ETHER_IN_NSH,
+ NPC_LT_LG_CUSTOM0 = 0xE,
+ NPC_LT_LG_CUSTOM1 = 0xF,
+};
+
+/* Don't modify Ltypes upto SCTP, otherwise it will
+ * effect flow tag calculation and thus RSS.
+ */
+enum npc_kpu_lh_ltype {
+ NPC_LT_LH_TU_TCP = 1,
+ NPC_LT_LH_TU_UDP,
+ NPC_LT_LH_TU_ICMP,
+ NPC_LT_LH_TU_SCTP,
+ NPC_LT_LH_TU_ICMP6,
+ NPC_LT_LH_TU_IGMP = 8,
+ NPC_LT_LH_TU_ESP,
+ NPC_LT_LH_TU_AH,
+ NPC_LT_LH_CUSTOM0 = 0xE,
+ NPC_LT_LH_CUSTOM1 = 0xF,
+};
+
+/* NPC port kind defines how the incoming or outgoing packets
+ * are processed. NPC accepts packets from up to 64 pkinds.
+ * Software assigns pkind for each incoming port such as CGX
+ * Ethernet interfaces, LBK interfaces, etc.
+ */
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND
+
+enum npc_pkind_type {
+ NPC_RX_LBK_PKIND = 0ULL,
+ NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL,
+ NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
+ NPC_RX_CHLEN24B_PKIND = 57ULL,
+ NPC_RX_CPT_HDR_PKIND,
+ NPC_RX_CHLEN90B_PKIND,
+ NPC_TX_HIGIG_PKIND,
+ NPC_RX_HIGIG_PKIND,
+ NPC_RX_EDSA_PKIND,
+ NPC_TX_DEF_PKIND, /* NIX-TX PKIND */
+};
+
+enum npc_interface_type {
+ NPC_INTF_MODE_DEF,
+};
+
+/* list of known and supported fields in packet header and
+ * fields present in key structure.
+ */
+enum key_fields {
+ NPC_DMAC,
+ NPC_SMAC,
+ NPC_ETYPE,
+ NPC_VLAN_ETYPE_CTAG, /* 0x8100 */
+ NPC_VLAN_ETYPE_STAG, /* 0x88A8 */
+ NPC_OUTER_VID,
+ NPC_TOS,
+ NPC_SIP_IPV4,
+ NPC_DIP_IPV4,
+ NPC_SIP_IPV6,
+ NPC_DIP_IPV6,
+ NPC_IPPROTO_TCP,
+ NPC_IPPROTO_UDP,
+ NPC_IPPROTO_SCTP,
+ NPC_IPPROTO_AH,
+ NPC_IPPROTO_ESP,
+ NPC_IPPROTO_ICMP,
+ NPC_IPPROTO_ICMP6,
+ NPC_SPORT_TCP,
+ NPC_DPORT_TCP,
+ NPC_SPORT_UDP,
+ NPC_DPORT_UDP,
+ NPC_SPORT_SCTP,
+ NPC_DPORT_SCTP,
+ NPC_HEADER_FIELDS_MAX,
+ NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
+ NPC_PF_FUNC, /* Valid when Tx */
+ NPC_ERRLEV,
+ NPC_ERRCODE,
+ NPC_LXMB,
+ NPC_EXACT_RESULT,
+ NPC_LA,
+ NPC_LB,
+ NPC_LC,
+ NPC_LD,
+ NPC_LE,
+ NPC_LF,
+ NPC_LG,
+ NPC_LH,
+ /* Ethertype for untagged frame */
+ NPC_ETYPE_ETHER,
+ /* Ethertype for single tagged frame */
+ NPC_ETYPE_TAG1,
+ /* Ethertype for double tagged frame */
+ NPC_ETYPE_TAG2,
+ /* outer vlan tci for single tagged frame */
+ NPC_VLAN_TAG1,
+ /* outer vlan tci for double tagged frame */
+ NPC_VLAN_TAG2,
+ /* other header fields programmed to extract but not of our interest */
+ NPC_UNKNOWN,
+ NPC_KEY_FIELDS_MAX,
+};
+
+struct npc_kpu_profile_cam {
+ u8 state;
+ u8 state_mask;
+ u16 dp0;
+ u16 dp0_mask;
+ u16 dp1;
+ u16 dp1_mask;
+ u16 dp2;
+ u16 dp2_mask;
+} __packed;
+
+struct npc_kpu_profile_action {
+ u8 errlev;
+ u8 errcode;
+ u8 dp0_offset;
+ u8 dp1_offset;
+ u8 dp2_offset;
+ u8 bypass_count;
+ u8 parse_done;
+ u8 next_state;
+ u8 ptr_advance;
+ u8 cap_ena;
+ u8 lid;
+ u8 ltype;
+ u8 flags;
+ u8 offset;
+ u8 mask;
+ u8 right;
+ u8 shift;
+} __packed;
+
+struct npc_kpu_profile {
+ int cam_entries;
+ int action_entries;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_profile_action *action;
+};
+
+/* NPC KPU register formats */
+struct npc_kpu_cam {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_56 : 8;
+ u64 state : 8;
+ u64 dp2_data : 16;
+ u64 dp1_data : 16;
+ u64 dp0_data : 16;
+#else
+ u64 dp0_data : 16;
+ u64 dp1_data : 16;
+ u64 dp2_data : 16;
+ u64 state : 8;
+ u64 rsvd_63_56 : 8;
+#endif
+};
+
+struct npc_kpu_action0 {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_57 : 7;
+ u64 byp_count : 3;
+ u64 capture_ena : 1;
+ u64 parse_done : 1;
+ u64 next_state : 8;
+ u64 rsvd_43 : 1;
+ u64 capture_lid : 3;
+ u64 capture_ltype : 4;
+ u64 capture_flags : 8;
+ u64 ptr_advance : 8;
+ u64 var_len_offset : 8;
+ u64 var_len_mask : 8;
+ u64 var_len_right : 1;
+ u64 var_len_shift : 3;
+#else
+ u64 var_len_shift : 3;
+ u64 var_len_right : 1;
+ u64 var_len_mask : 8;
+ u64 var_len_offset : 8;
+ u64 ptr_advance : 8;
+ u64 capture_flags : 8;
+ u64 capture_ltype : 4;
+ u64 capture_lid : 3;
+ u64 rsvd_43 : 1;
+ u64 next_state : 8;
+ u64 parse_done : 1;
+ u64 capture_ena : 1;
+ u64 byp_count : 3;
+ u64 rsvd_63_57 : 7;
+#endif
+};
+
+struct npc_kpu_action1 {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_36 : 28;
+ u64 errlev : 4;
+ u64 errcode : 8;
+ u64 dp2_offset : 8;
+ u64 dp1_offset : 8;
+ u64 dp0_offset : 8;
+#else
+ u64 dp0_offset : 8;
+ u64 dp1_offset : 8;
+ u64 dp2_offset : 8;
+ u64 errcode : 8;
+ u64 errlev : 4;
+ u64 rsvd_63_36 : 28;
+#endif
+};
+
+struct npc_kpu_pkind_cpi_def {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 ena : 1;
+ u64 rsvd_62_59 : 4;
+ u64 lid : 3;
+ u64 ltype_match : 4;
+ u64 ltype_mask : 4;
+ u64 flags_match : 8;
+ u64 flags_mask : 8;
+ u64 add_offset : 8;
+ u64 add_mask : 8;
+ u64 rsvd_15 : 1;
+ u64 add_shift : 3;
+ u64 rsvd_11_10 : 2;
+ u64 cpi_base : 10;
+#else
+ u64 cpi_base : 10;
+ u64 rsvd_11_10 : 2;
+ u64 add_shift : 3;
+ u64 rsvd_15 : 1;
+ u64 add_mask : 8;
+ u64 add_offset : 8;
+ u64 flags_mask : 8;
+ u64 flags_match : 8;
+ u64 ltype_mask : 4;
+ u64 ltype_match : 4;
+ u64 lid : 3;
+ u64 rsvd_62_59 : 4;
+ u64 ena : 1;
+#endif
+};
+
+struct nix_rx_action {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_61 :3;
+ u64 flow_key_alg :5;
+ u64 match_id :16;
+ u64 index :20;
+ u64 pf_func :16;
+ u64 op :4;
+#else
+ u64 op :4;
+ u64 pf_func :16;
+ u64 index :20;
+ u64 match_id :16;
+ u64 flow_key_alg :5;
+ u64 rsvd_63_61 :3;
+#endif
+};
+
+/* NPC_AF_INTFX_KEX_CFG field masks */
+#define NPC_EXACT_NIBBLE_START 40
+#define NPC_EXACT_NIBBLE_END 43
+#define NPC_EXACT_NIBBLE GENMASK_ULL(43, 40)
+
+/* NPC_EXACT_KEX_S nibble definitions for each field */
+#define NPC_EXACT_NIBBLE_HIT BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_OPC BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_WAY BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_INDEX GENMASK_ULL(43, 41)
+
+#define NPC_EXACT_RESULT_HIT BIT_ULL(0)
+#define NPC_EXACT_RESULT_OPC GENMASK_ULL(2, 1)
+#define NPC_EXACT_RESULT_WAY GENMASK_ULL(4, 3)
+#define NPC_EXACT_RESULT_IDX GENMASK_ULL(15, 5)
+
+/* NPC_AF_INTFX_KEX_CFG field masks */
+#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
+
+/* NPC_PARSE_KEX_S nibble definitions for each field */
+#define NPC_PARSE_NIBBLE_CHAN GENMASK_ULL(2, 0)
+#define NPC_PARSE_NIBBLE_ERRLEV BIT_ULL(3)
+#define NPC_PARSE_NIBBLE_ERRCODE GENMASK_ULL(5, 4)
+#define NPC_PARSE_NIBBLE_L2L3_BCAST BIT_ULL(6)
+#define NPC_PARSE_NIBBLE_LA_FLAGS GENMASK_ULL(8, 7)
+#define NPC_PARSE_NIBBLE_LA_LTYPE BIT_ULL(9)
+#define NPC_PARSE_NIBBLE_LB_FLAGS GENMASK_ULL(11, 10)
+#define NPC_PARSE_NIBBLE_LB_LTYPE BIT_ULL(12)
+#define NPC_PARSE_NIBBLE_LC_FLAGS GENMASK_ULL(14, 13)
+#define NPC_PARSE_NIBBLE_LC_LTYPE BIT_ULL(15)
+#define NPC_PARSE_NIBBLE_LD_FLAGS GENMASK_ULL(17, 16)
+#define NPC_PARSE_NIBBLE_LD_LTYPE BIT_ULL(18)
+#define NPC_PARSE_NIBBLE_LE_FLAGS GENMASK_ULL(20, 19)
+#define NPC_PARSE_NIBBLE_LE_LTYPE BIT_ULL(21)
+#define NPC_PARSE_NIBBLE_LF_FLAGS GENMASK_ULL(23, 22)
+#define NPC_PARSE_NIBBLE_LF_LTYPE BIT_ULL(24)
+#define NPC_PARSE_NIBBLE_LG_FLAGS GENMASK_ULL(26, 25)
+#define NPC_PARSE_NIBBLE_LG_LTYPE BIT_ULL(27)
+#define NPC_PARSE_NIBBLE_LH_FLAGS GENMASK_ULL(29, 28)
+#define NPC_PARSE_NIBBLE_LH_LTYPE BIT_ULL(30)
+
+struct nix_tx_action {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_48 :16;
+ u64 match_id :16;
+ u64 index :20;
+ u64 rsvd_11_8 :8;
+ u64 op :4;
+#else
+ u64 op :4;
+ u64 rsvd_11_8 :8;
+ u64 index :20;
+ u64 match_id :16;
+ u64 rsvd_63_48 :16;
+#endif
+};
+
+/* NIX Receive Vtag Action Structure */
+#define RX_VTAG0_VALID_BIT BIT_ULL(15)
+#define RX_VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
+#define RX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define RX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define RX_VTAG1_VALID_BIT BIT_ULL(47)
+#define RX_VTAG1_TYPE_MASK GENMASK_ULL(46, 44)
+#define RX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
+#define RX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
+
+/* NIX Transmit Vtag Action Structure */
+#define TX_VTAG0_DEF_MASK GENMASK_ULL(25, 16)
+#define TX_VTAG0_OP_MASK GENMASK_ULL(13, 12)
+#define TX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define TX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define TX_VTAG1_DEF_MASK GENMASK_ULL(57, 48)
+#define TX_VTAG1_OP_MASK GENMASK_ULL(45, 44)
+#define TX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
+#define TX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
+
+/* NPC MCAM reserved entry index per nixlf */
+#define NIXLF_UCAST_ENTRY 0
+#define NIXLF_BCAST_ENTRY 1
+#define NIXLF_ALLMULTI_ENTRY 2
+#define NIXLF_PROMISC_ENTRY 3
+
+struct npc_coalesced_kpu_prfl {
+#define NPC_SIGN 0x00666f727063706e
+#define NPC_PRFL_NAME "npc_prfls_array"
+#define NPC_NAME_LEN 32
+ __le64 signature; /* "npcprof\0" (8 bytes/ASCII characters) */
+ u8 name[NPC_NAME_LEN]; /* KPU Profile name */
+ u64 version; /* KPU firmware/profile version */
+ u8 num_prfl; /* No of NPC profiles. */
+ u16 prfl_sz[];
+};
+
+struct npc_mcam_kex {
+ /* MKEX Profle Header */
+ u64 mkex_sign; /* "mcam-kex-profile" (8 bytes/ASCII characters) */
+ u8 name[MKEX_NAME_LEN]; /* MKEX Profile name */
+ u64 cpu_model; /* Format as profiled by CPU hardware */
+ u64 kpu_version; /* KPU firmware/profile version */
+ u64 reserved; /* Reserved for extension */
+
+ /* MKEX Profle Data */
+ u64 keyx_cfg[NPC_MAX_INTF]; /* NPC_AF_INTF(0..1)_KEX_CFG */
+ /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
+ u64 kex_ld_flags[NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
+ u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+} __packed;
+
+struct npc_kpu_fwdata {
+ int entries;
+ /* What follows is:
+ * struct npc_kpu_profile_cam[entries];
+ * struct npc_kpu_profile_action[entries];
+ */
+ u8 data[];
+} __packed;
+
+struct npc_lt_def {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+} __packed;
+
+struct npc_lt_def_ipsec {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 spi_offset;
+ u8 spi_nz;
+} __packed;
+
+struct npc_lt_def_apad {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 valid;
+} __packed;
+
+struct npc_lt_def_color {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 noffset;
+ u8 offset;
+} __packed;
+
+struct npc_lt_def_et {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 valid;
+ u8 offset;
+} __packed;
+
+struct npc_lt_def_cfg {
+ struct npc_lt_def rx_ol2;
+ struct npc_lt_def rx_oip4;
+ struct npc_lt_def rx_iip4;
+ struct npc_lt_def rx_oip6;
+ struct npc_lt_def rx_iip6;
+ struct npc_lt_def rx_otcp;
+ struct npc_lt_def rx_itcp;
+ struct npc_lt_def rx_oudp;
+ struct npc_lt_def rx_iudp;
+ struct npc_lt_def rx_osctp;
+ struct npc_lt_def rx_isctp;
+ struct npc_lt_def_ipsec rx_ipsec[2];
+ struct npc_lt_def pck_ol2;
+ struct npc_lt_def pck_oip4;
+ struct npc_lt_def pck_oip6;
+ struct npc_lt_def pck_iip4;
+ struct npc_lt_def_apad rx_apad0;
+ struct npc_lt_def_apad rx_apad1;
+ struct npc_lt_def_color ovlan;
+ struct npc_lt_def_color ivlan;
+ struct npc_lt_def_color rx_gen0_color;
+ struct npc_lt_def_color rx_gen1_color;
+ struct npc_lt_def_et rx_et[2];
+} __packed;
+
+/* Loadable KPU profile firmware data */
+struct npc_kpu_profile_fwdata {
+#define KPU_SIGN 0x00666f727075706b
+#define KPU_NAME_LEN 32
+/** Maximum number of custom KPU entries supported by the built-in profile. */
+#define KPU_MAX_CST_ENT 6
+ /* KPU Profle Header */
+ __le64 signature; /* "kpuprof\0" (8 bytes/ASCII characters) */
+ u8 name[KPU_NAME_LEN]; /* KPU Profile name */
+ __le64 version; /* KPU profile version */
+ u8 kpus;
+ u8 reserved[7];
+
+ /* Default MKEX profile to be used with this KPU profile. May be
+ * overridden with mkex_profile module parameter. Format is same as for
+ * the MKEX profile to streamline processing.
+ */
+ struct npc_mcam_kex mkex;
+ /* LTYPE values for specific HW offloaded protocols. */
+ struct npc_lt_def_cfg lt_def;
+ /* Dynamically sized data:
+ * Custom KPU CAM and ACTION configuration entries.
+ * struct npc_kpu_fwdata kpu[kpus];
+ */
+ u8 data[];
+} __packed;
+
+struct rvu_npc_mcam_rule {
+ struct flow_msg packet;
+ struct flow_msg mask;
+ u8 intf;
+ union {
+ struct nix_tx_action tx_action;
+ struct nix_rx_action rx_action;
+ };
+ u64 vtag_action;
+ struct list_head list;
+ u64 features;
+ u16 owner;
+ u16 entry;
+ u16 cntr;
+ bool has_cntr;
+ u8 default_rule;
+ bool enable;
+ bool vfvlan_cfg;
+ u16 chan;
+ u16 chan_mask;
+ u8 lxmb;
+};
+
+#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
new file mode 100644
index 000000000..a820bad3a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -0,0 +1,15266 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef NPC_PROFILE_H
+#define NPC_PROFILE_H
+
+#define NPC_KPU_PROFILE_VER 0x0000000100070000
+#define NPC_KPU_VER_MAJ(ver) ((u16)(((ver) >> 32) & 0xFFFF))
+#define NPC_KPU_VER_MIN(ver) ((u16)(((ver) >> 16) & 0xFFFF))
+#define NPC_KPU_VER_PATCH(ver) ((u16)((ver) & 0xFFFF))
+
+#define NPC_IH_W 0x8000
+#define NPC_IH_UTAG 0x2000
+
+#define NPC_ETYPE_IP 0x0800
+#define NPC_ETYPE_IP6 0x86dd
+#define NPC_ETYPE_ARP 0x0806
+#define NPC_ETYPE_RARP 0x8035
+#define NPC_ETYPE_NGIO 0x8842
+#define NPC_ETYPE_MPLSU 0x8847
+#define NPC_ETYPE_MPLSM 0x8848
+#define NPC_ETYPE_ETAG 0x893f
+#define NPC_ETYPE_CTAG 0x8100
+#define NPC_ETYPE_SBTAG 0x88a8
+#define NPC_ETYPE_ITAG 0x88e7
+#define NPC_ETYPE_PTP 0x88f7
+#define NPC_ETYPE_FCOE 0x8906
+#define NPC_ETYPE_QINQ 0x9100
+#define NPC_ETYPE_TRANS_ETH_BR 0x6558
+#define NPC_ETYPE_PPP 0x880b
+#define NPC_ETYPE_NSH 0x894f
+#define NPC_ETYPE_DSA 0xdada
+#define NPC_ETYPE_PPPOE 0x8864
+
+#define NPC_PPP_IP 0x0021
+#define NPC_PPP_IP6 0x0057
+
+#define NPC_IPNH_HOP 0
+#define NPC_IPNH_ICMP 1
+#define NPC_IPNH_IGMP 2
+#define NPC_IPNH_IP 4
+#define NPC_IPNH_TCP 6
+#define NPC_IPNH_UDP 17
+#define NPC_IPNH_IP6 41
+#define NPC_IPNH_ROUT 43
+#define NPC_IPNH_FRAG 44
+#define NPC_IPNH_GRE 47
+#define NPC_IPNH_ESP 50
+#define NPC_IPNH_AH 51
+#define NPC_IPNH_ICMP6 58
+#define NPC_IPNH_NONH 59
+#define NPC_IPNH_DEST 60
+#define NPC_IPNH_SCTP 132
+#define NPC_IPNH_MOBILITY 135
+#define NPC_IPNH_MPLS 137
+#define NPC_IPNH_HOSTID 139
+#define NPC_IPNH_SHIM6 140
+
+#define NPC_UDP_PORT_PTP_E 319
+#define NPC_UDP_PORT_PTP_G 320
+#define NPC_UDP_PORT_GTPC 2123
+#define NPC_UDP_PORT_GTPU 2152
+#define NPC_UDP_PORT_VXLAN 4789
+#define NPC_UDP_PORT_VXLANGPE 4790
+#define NPC_UDP_PORT_GENEVE 6081
+#define NPC_UDP_PORT_MPLS 6635
+#define NPC_UDP_PORT_ESP 4500
+
+#define NPC_VXLANGPE_NP_IP 0x1
+#define NPC_VXLANGPE_NP_IP6 0x2
+#define NPC_VXLANGPE_NP_ETH 0x3
+#define NPC_VXLANGPE_NP_NSH 0x4
+#define NPC_VXLANGPE_NP_MPLS 0x5
+#define NPC_VXLANGPE_NP_GBP 0x6
+#define NPC_VXLANGPE_NP_VBNG 0x7
+
+#define NPC_NSH_NP_IP 0x1
+#define NPC_NSH_NP_IP6 0x2
+#define NPC_NSH_NP_ETH 0x3
+#define NPC_NSH_NP_NSH 0x4
+#define NPC_NSH_NP_MPLS 0x5
+
+#define NPC_TCP_PORT_HTTP 80
+#define NPC_TCP_PORT_HTTPS 443
+#define NPC_TCP_PORT_PPTP 1723
+
+#define NPC_MPLS_S 0x0100
+
+#define NPC_IP_TTL_MASK 0xff00
+#define NPC_IP_VER_4 0x4000
+#define NPC_IP_VER_6 0x6000
+#define NPC_IP_VER_MASK 0xf000
+#define NPC_IP_HDR_LEN_5 0x0500
+#define NPC_IP_HDR_LEN_MASK 0x0f00
+#define NPC_IP_HDR_MF 0x2000
+#define NPC_IP_HDR_FRAGOFF 0x1fff
+
+#define NPC_IP6_HOP_MASK 0x00ff
+#define NPC_IP6_FRAG_FRAGOFF 0xfff8
+
+#define NPC_GRE_F_CSUM (0x1 << 15)
+#define NPC_GRE_F_ROUTE (0x1 << 14)
+#define NPC_GRE_F_KEY (0x1 << 13)
+#define NPC_GRE_F_SEQ (0x1 << 12)
+#define NPC_GRE_F_ACK (0x1 << 7)
+#define NPC_GRE_FLAG_MASK (NPC_GRE_F_CSUM | NPC_GRE_F_ROUTE | \
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK)
+#define NPC_GRE_VER_MASK 0x0003
+#define NPC_GRE_VER_1 0x0001
+
+#define NPC_VXLAN_I 0x0800
+
+#define NPC_VXLANGPE_VER (0x3 << 12)
+#define NPC_VXLANGPE_I (0x1 << 11)
+#define NPC_VXLANGPE_P (0x1 << 10)
+#define NPC_VXLANGPE_B (0x1 << 9)
+#define NPC_VXLANGPE_NP_MASK 0x00ff
+
+#define NPC_NSH_NP_MASK 0x00ff
+
+#define NPC_GENEVE_F_OAM (0x1 << 7)
+#define NPC_GENEVE_F_CRI_OPT (0x1 << 6)
+
+#define NPC_GTP_PT_GTP (0x1 << 12)
+#define NPC_GTP_PT_MASK (0x1 << 12)
+#define NPC_GTP_VER1 (0x1 << 13)
+#define NPC_GTP_VER_MASK (0x7 << 13)
+#define NPC_GTP_MT_G_PDU 0xff
+#define NPC_GTP_MT_MASK 0xff
+
+#define NPC_TCP_FLAGS_FIN 0x0001
+#define NPC_TCP_FLAGS_SYN 0x0002
+#define NPC_TCP_FLAGS_RST 0x0004
+#define NPC_TCP_FLAGS_PSH 0x0008
+#define NPC_TCP_FLAGS_ACK 0x0010
+#define NPC_TCP_FLAGS_URG 0x0020
+#define NPC_TCP_FLAGS_MASK 0x003f
+
+#define NPC_TCP_DATA_OFFSET_5 0x5000
+#define NPC_TCP_DATA_OFFSET_MASK 0xf000
+
+#define NPC_DSA_EXTEND 0x1000
+#define NPC_DSA_EDSA 0x8000
+#define NPC_DSA_FDSA 0xc000
+
+#define NPC_KEXOF_DMAC 9
+#define MKEX_SIGN 0x19bbfdbd15f
+#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
+ (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
+ ((flags_ena) << 6) | ((key_ofs) & 0x3F))
+
+/* Rx parse key extract nibble enable */
+#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
+ NPC_PARSE_NIBBLE_L2L3_BCAST | \
+ NPC_PARSE_NIBBLE_LA_LTYPE | \
+ NPC_PARSE_NIBBLE_LB_LTYPE | \
+ NPC_PARSE_NIBBLE_LC_LTYPE | \
+ NPC_PARSE_NIBBLE_LD_LTYPE | \
+ NPC_PARSE_NIBBLE_LE_LTYPE)
+/* Tx parse key extract nibble enable */
+#define NPC_PARSE_NIBBLE_INTF_TX (NPC_PARSE_NIBBLE_LA_LTYPE | \
+ NPC_PARSE_NIBBLE_LB_LTYPE | \
+ NPC_PARSE_NIBBLE_LC_LTYPE | \
+ NPC_PARSE_NIBBLE_LD_LTYPE | \
+ NPC_PARSE_NIBBLE_LE_LTYPE)
+
+enum npc_kpu_parser_state {
+ NPC_S_NA = 0,
+ NPC_S_KPU1_ETHER,
+ NPC_S_KPU1_IH_NIX,
+ NPC_S_KPU1_IH,
+ NPC_S_KPU1_EXDSA,
+ NPC_S_KPU1_HIGIG2,
+ NPC_S_KPU1_IH_NIX_HIGIG2,
+ NPC_S_KPU1_CUSTOM_PRE_L2,
+ NPC_S_KPU1_CPT_HDR,
+ NPC_S_KPU1_VLAN_EXDSA,
+ NPC_S_KPU2_CTAG,
+ NPC_S_KPU2_CTAG2,
+ NPC_S_KPU2_SBTAG,
+ NPC_S_KPU2_QINQ,
+ NPC_S_KPU2_ETAG,
+ NPC_S_KPU2_EXDSA,
+ NPC_S_KPU2_CPT_CTAG,
+ NPC_S_KPU2_CPT_QINQ,
+ NPC_S_KPU3_CTAG,
+ NPC_S_KPU3_STAG,
+ NPC_S_KPU3_QINQ,
+ NPC_S_KPU3_CTAG_C,
+ NPC_S_KPU3_STAG_C,
+ NPC_S_KPU3_QINQ_C,
+ NPC_S_KPU3_DSA,
+ NPC_S_KPU3_VLAN_EXDSA,
+ NPC_S_KPU4_MPLS,
+ NPC_S_KPU4_NSH,
+ NPC_S_KPU4_FDSA,
+ NPC_S_KPU4_VLAN_EXDSA,
+ NPC_S_KPU4_PPPOE,
+ NPC_S_KPU5_IP,
+ NPC_S_KPU5_IP6,
+ NPC_S_KPU5_ARP,
+ NPC_S_KPU5_RARP,
+ NPC_S_KPU5_PTP,
+ NPC_S_KPU5_FCOE,
+ NPC_S_KPU5_MPLS,
+ NPC_S_KPU5_MPLS_PL,
+ NPC_S_KPU5_NSH,
+ NPC_S_KPU5_CPT_IP,
+ NPC_S_KPU5_CPT_IP6,
+ NPC_S_KPU5_NGIO,
+ NPC_S_KPU6_IP6_EXT,
+ NPC_S_KPU6_IP6_HOP_DEST,
+ NPC_S_KPU6_IP6_ROUT,
+ NPC_S_KPU6_IP6_FRAG,
+ NPC_S_KPU6_IP6_CPT_FRAG,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST,
+ NPC_S_KPU6_IP6_CPT_ROUT,
+ NPC_S_KPU7_IP6_EXT,
+ NPC_S_KPU7_IP6_ROUT,
+ NPC_S_KPU7_IP6_FRAG,
+ NPC_S_KPU7_CPT_IP6_FRAG,
+ NPC_S_KPU8_TCP,
+ NPC_S_KPU8_UDP,
+ NPC_S_KPU8_SCTP,
+ NPC_S_KPU8_ICMP,
+ NPC_S_KPU8_IGMP,
+ NPC_S_KPU8_ICMP6,
+ NPC_S_KPU8_GRE,
+ NPC_S_KPU8_AH,
+ NPC_S_KPU9_TU_MPLS_IN_GRE,
+ NPC_S_KPU9_TU_MPLS_IN_NSH,
+ NPC_S_KPU9_TU_MPLS_IN_IP,
+ NPC_S_KPU9_TU_MPLS_IN_UDP,
+ NPC_S_KPU9_TU_NSH_IN_GRE,
+ NPC_S_KPU9_VXLAN,
+ NPC_S_KPU9_VXLANGPE,
+ NPC_S_KPU9_GENEVE,
+ NPC_S_KPU9_GTPC,
+ NPC_S_KPU9_GTPU,
+ NPC_S_KPU9_ESP,
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE,
+ NPC_S_KPU10_TU_MPLS_PL,
+ NPC_S_KPU10_TU_MPLS,
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE,
+ NPC_S_KPU11_TU_ETHER,
+ NPC_S_KPU11_TU_PPP,
+ NPC_S_KPU11_TU_MPLS_IN_NSH,
+ NPC_S_KPU11_TU_MPLS_PL,
+ NPC_S_KPU11_TU_MPLS,
+ NPC_S_KPU11_TU_ETHER_IN_NSH,
+ NPC_S_KPU12_TU_IP,
+ NPC_S_KPU12_TU_IP6,
+ NPC_S_KPU12_TU_ARP,
+ NPC_S_KPU13_TU_IP6_EXT,
+ NPC_S_KPU14_TU_IP6_EXT,
+ NPC_S_KPU15_TU_TCP,
+ NPC_S_KPU15_TU_UDP,
+ NPC_S_KPU15_TU_SCTP,
+ NPC_S_KPU15_TU_ICMP,
+ NPC_S_KPU15_TU_IGMP,
+ NPC_S_KPU15_TU_ICMP6,
+ NPC_S_KPU15_TU_ESP,
+ NPC_S_KPU15_TU_AH,
+ NPC_S_KPU16_HTTP_DATA,
+ NPC_S_KPU16_HTTPS_DATA,
+ NPC_S_KPU16_PPTP_DATA,
+ NPC_S_KPU16_TCP_DATA,
+ NPC_S_KPU16_UDP_DATA,
+ NPC_S_KPU16_UDP_PTP,
+ NPC_S_LAST /* has to be the last item */
+};
+
+enum npc_kpu_la_uflag {
+ NPC_F_LA_U_HAS_TAG = 0x10,
+ NPC_F_LA_U_HAS_IH_NIX = 0x20,
+ NPC_F_LA_U_HAS_HIGIG2 = 0x40,
+};
+enum npc_kpu_la_lflag {
+ NPC_F_LA_L_UNK_ETYPE = 1,
+ NPC_F_LA_L_WITH_VLAN,
+ NPC_F_LA_L_WITH_ETAG,
+ NPC_F_LA_L_WITH_MPLS,
+ NPC_F_LA_L_WITH_NSH,
+};
+
+enum npc_kpu_lb_uflag {
+ NPC_F_LB_U_UNK_ETYPE = 0x80,
+ NPC_F_LB_U_MORE_TAG = 0x40,
+};
+enum npc_kpu_lb_lflag {
+ NPC_F_LB_L_WITH_CTAG = 1,
+ NPC_F_LB_L_WITH_CTAG_UNK,
+ NPC_F_LB_L_WITH_STAG_CTAG,
+ NPC_F_LB_L_WITH_STAG_STAG,
+ NPC_F_LB_L_WITH_QINQ_CTAG,
+ NPC_F_LB_L_WITH_QINQ_QINQ,
+ NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_L_WITH_ITAG_STAG,
+ NPC_F_LB_L_WITH_ITAG_CTAG,
+ NPC_F_LB_L_WITH_ITAG_UNK,
+ NPC_F_LB_L_WITH_BTAG_ITAG,
+ NPC_F_LB_L_WITH_STAG,
+ NPC_F_LB_L_WITH_QINQ,
+ NPC_F_LB_L_DSA,
+ NPC_F_LB_L_DSA_VLAN,
+ NPC_F_LB_L_EDSA,
+ NPC_F_LB_L_EDSA_VLAN,
+ NPC_F_LB_L_EXDSA,
+ NPC_F_LB_L_EXDSA_VLAN,
+ NPC_F_LB_L_FDSA,
+};
+
+enum npc_kpu_lc_uflag {
+ NPC_F_LC_U_UNK_PROTO = 0x10,
+ NPC_F_LC_U_IP_FRAG = 0x20,
+ NPC_F_LC_U_IP6_FRAG = 0x40,
+};
+enum npc_kpu_lc_lflag {
+ NPC_F_LC_L_IP_IN_IP = 1,
+ NPC_F_LC_L_6TO4,
+ NPC_F_LC_L_MPLS_IN_IP,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ NPC_F_LC_L_MPLS_4_LABELS,
+ NPC_F_LC_L_MPLS_3_LABELS,
+ NPC_F_LC_L_MPLS_2_LABELS,
+ NPC_F_LC_L_EXT_HOP,
+ NPC_F_LC_L_EXT_DEST,
+ NPC_F_LC_L_EXT_ROUT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ NPC_F_LC_L_EXT_HOSTID,
+ NPC_F_LC_L_EXT_SHIM6,
+};
+
+enum npc_kpu_ld_lflag {
+ NPC_F_LD_L_TCP_UNK_PORT = 1,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ NPC_F_LD_L_TCP_UNK_PORT_HAS_OPTIONS,
+ NPC_F_LD_L_UDP_UNK_PORT,
+ NPC_F_LD_L_GRE_NVGRE,
+ NPC_F_LD_L_GRE_HAS_SRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ NPC_F_LD_L_GRE_HAS_ROUTE,
+ NPC_F_LD_L_GRE_UNK_PROTO,
+ NPC_F_LD_L_GRE_VER1,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ,
+ NPC_F_LD_L_GRE_VER1_HAS_ACK,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ_ACK,
+ NPC_F_LD_L_GRE_VER1_UNK_PROTO,
+ NPC_F_LD_L_MPLS_4_LABELS,
+ NPC_F_LD_L_MPLS_3_LABELS,
+ NPC_F_LD_L_MPLS_2_LABELS,
+};
+
+enum npc_kpu_le_lflag {
+ NPC_F_LE_L_VXLAN_NOVNI,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ NPC_F_LE_L_VXLANGPE_UNK,
+ NPC_F_LE_L_VXLANGPE_NONP,
+ NPC_F_LE_L_GENEVE_OAM,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ NPC_F_LE_L_GTPU_G_PDU,
+ NPC_F_LE_L_GTPU_UNK,
+};
+
+enum npc_kpu_lf_uflag {
+ NPC_F_LF_U_UNK_ETYPE = 0x10,
+ NPC_F_LF_U_HAS_TAG = 0x20,
+};
+
+enum npc_kpu_lf_lflag {
+ NPC_F_LF_L_WITH_CTAG = 1,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ NPC_F_LF_L_WITH_STAG,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ NPC_F_LF_L_WITH_QINQ,
+};
+
+enum npc_kpu_lg_uflag {
+ NPC_F_LG_U_UNK_IP_PROTO = 0x10,
+ NPC_F_LG_U_IP_HAS_OPTIONS = 0x20,
+ NPC_F_LG_U_IP6_HAS_EXT = 0x40,
+};
+
+enum npc_kpu_lh_uflag {
+ NPC_F_LH_U_TCP_HAS_OPTIONS = 0x80,
+};
+
+enum npc_kpu_lh_lflag {
+ NPC_F_LH_L_TCP_HTTP = 1,
+ NPC_F_LH_L_TCP_HTTPS,
+ NPC_F_LH_L_TCP_PPTP,
+ NPC_F_LH_L_TCP_UNK_PORT,
+ NPC_F_LH_L_UDP_UNK_PORT,
+};
+
+enum npc_kpu_err_code {
+ NPC_EC_NOERR = 0, /* has to be zero */
+ NPC_EC_UNK,
+ NPC_EC_IH_LENGTH,
+ NPC_EC_EDSA_UNK,
+ NPC_EC_L2_K1,
+ NPC_EC_L2_K2,
+ NPC_EC_L2_K3,
+ NPC_EC_L2_K3_ETYPE_UNK,
+ NPC_EC_L2_K4,
+ NPC_EC_MPLS_2MANY,
+ NPC_EC_MPLS_UNK,
+ NPC_EC_NSH_UNK,
+ NPC_EC_IP_TTL_0,
+ NPC_EC_IP_FRAG_OFFSET_1,
+ NPC_EC_IP_VER,
+ NPC_EC_IP6_HOP_0,
+ NPC_EC_IP6_VER,
+ NPC_EC_TCP_FLAGS_FIN_ONLY,
+ NPC_EC_TCP_FLAGS_ZERO,
+ NPC_EC_TCP_FLAGS_RST_FIN,
+ NPC_EC_TCP_FLAGS_URG_SYN,
+ NPC_EC_TCP_FLAGS_RST_SYN,
+ NPC_EC_TCP_FLAGS_SYN_FIN,
+ NPC_EC_VXLAN,
+ NPC_EC_NVGRE,
+ NPC_EC_GRE,
+ NPC_EC_GRE_VER1,
+ NPC_EC_L4,
+ NPC_EC_OIP4_CSUM,
+ NPC_EC_IIP4_CSUM,
+ NPC_EC_LAST /* has to be the last item */
+};
+
+enum NPC_ERRLEV_E {
+ NPC_ERRLEV_RE = 0,
+ NPC_ERRLEV_LA = 1,
+ NPC_ERRLEV_LB = 2,
+ NPC_ERRLEV_LC = 3,
+ NPC_ERRLEV_LD = 4,
+ NPC_ERRLEV_LE = 5,
+ NPC_ERRLEV_LF = 6,
+ NPC_ERRLEV_LG = 7,
+ NPC_ERRLEV_LH = 8,
+ NPC_ERRLEV_R9 = 9,
+ NPC_ERRLEV_R10 = 10,
+ NPC_ERRLEV_R11 = 11,
+ NPC_ERRLEV_R12 = 12,
+ NPC_ERRLEV_R13 = 13,
+ NPC_ERRLEV_R14 = 14,
+ NPC_ERRLEV_NIX = 15,
+ NPC_ERRLEV_ENUM_LAST = 16,
+};
+
+#define NPC_KPU_NOP_CAM \
+ { \
+ NPC_S_NA, 0xff, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ }
+
+#define NPC_KPU_NOP_ACTION \
+ { \
+ NPC_ERRLEV_RE, NPC_EC_NOERR, \
+ 0, 0, 0, 0, 0, \
+ NPC_S_NA, 0, 0, \
+ NPC_LID_LA, NPC_LT_NA, \
+ 0, \
+ 0, 0, 0, 0, \
+ }
+
+static struct npc_kpu_profile_action ikpu_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_PRE_L2_ETHER,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_VLAN_EXDSA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 24, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CPT_HDR, 40, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 7, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 90, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 36, 40, 44, 0, 0,
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 28, 32, 36, 0, 0,
+ NPC_S_KPU1_HIGIG2, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 14, 20, 0, 0,
+ NPC_S_KPU1_EXDSA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 20, 24, 28, 0, 0,
+ NPC_S_KPU1_IH_NIX, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
+ },
+};
+
+static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_DSA,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ 0x0000,
+ 0xfc00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ 0x0400,
+ 0xfe00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_DSA_EXTEND,
+ NPC_DSA_EXTEND,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_EXDSA, 0xff,
+ NPC_DSA_FDSA,
+ NPC_DSA_FDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EXTEND,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_NGIO,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_VLAN_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_IP,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_IP6,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_ETH,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_MPLS,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ 0x0000,
+ NPC_DSA_FDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_PPPOE, 0xff,
+ NPC_PPP_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_PPPOE, 0xff,
+ NPC_PPP_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ NPC_IP_TTL_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0001,
+ NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_ARP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_RARP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_PTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_FCOE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ 0x0000,
+ NPC_IP6_HOP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_HOP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_DEST << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_MOBILITY << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_HOSTID << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_SHIM6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ NPC_IP_TTL_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0001,
+ NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ NPC_IP6_HOP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_HOP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_DEST << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_MOBILITY << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_HOSTID << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_SHIM6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_NGIO, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU6_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU7_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_VXLAN,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_VXLANGPE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_GENEVE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_GTPC,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_GTPU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_PTP_E,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_PTP_G,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_MPLS,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_ESP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_UDP_PORT_ESP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_SCTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_ICMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_IGMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_ICMP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_AH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_GRE_F_ROUTE,
+ 0x4fff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x4fff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0003,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_ACK | NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK | NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x2001,
+ 0xef7f,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0001,
+ 0x0003,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ NPC_NSH_NP_IP,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ NPC_NSH_NP_IP6,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ NPC_NSH_NP_ETH,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_VXLAN, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLAN_I,
+ NPC_VXLAN_I,
+ 0x0000,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_VXLAN, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_VXLAN, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP6,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_ETH,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_NSH,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_MPLS,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP6,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_ETH,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_NSH,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_MPLS,
+ NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_GTPC, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_GTPU, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GTP_PT_GTP | NPC_GTP_VER1,
+ NPC_GTP_PT_MASK | NPC_GTP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_ESP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ NPC_NSH_NP_IP,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ NPC_NSH_NP_IP6,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ NPC_NSH_NP_ETH,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_PPP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_PL, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER_IN_NSH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_ARP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU13_TU_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU14_TU_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_UDP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_SCTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ICMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_IGMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ICMP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ESP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_AH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ {
+ NPC_S_KPU16_TCP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_HTTP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_HTTPS_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_PPTP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_UDP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_UDP_PTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_action kpu1_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 12, 0, 0,
+ NPC_S_KPU2_CTAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 1, 0,
+ NPC_S_KPU3_DSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 2, 0,
+ NPC_S_KPU4_PPPOE, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_8023,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_8023,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LA, NPC_EC_IH_LENGTH,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 16, 0, 0,
+ NPC_S_KPU2_EXDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 12, 2, 0,
+ NPC_S_KPU4_FDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LA, NPC_EC_EDSA_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_CPT_IP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_CPT_IP6, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CPT_CTAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CPT_QINQ, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 0, 0, 1, 0,
+ NPC_S_KPU3_VLAN_EXDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LA, NPC_EC_L2_K1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu2_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_NGIO, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG_UNK,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_STAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG_STAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_STAG, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_STAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_UNK,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG_UNK,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_QINQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 1,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_STAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_STAG, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_STAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_UNK,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_QINQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_RARP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 16, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA_VLAN,
+ NPC_F_LB_L_EDSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA_VLAN,
+ NPC_F_LB_L_EXDSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_CPT_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_CPT_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_CPT_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_CPT_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu3_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 6, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU4_VLAN_EXDSA, 12, 1,
+ NPC_LID_LB, NPC_LT_LB_VLAN_EXDSA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu4_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_MPLS_PL, 4, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_MPLS_PL, 8, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ NPC_F_LC_L_MPLS_2_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_MPLS_PL, 12, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ NPC_F_LC_L_MPLS_3_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU5_MPLS, 12, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ NPC_F_LC_L_MPLS_4_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 7, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 7, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 6, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 4, 0,
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_NSH_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 10, 0,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 10, 0,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K4,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu5_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_FRAG_OFFSET_1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_6TO4,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 2, 0,
+ NPC_S_KPU8_UDP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_6TO4,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_ARP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_RARP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_PTP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_FCOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_HOP_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_DEST,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_ROUT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU6_IP6_FRAG, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_U_IP6_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOSTID,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_SHIM6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_MPLS_2MANY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_FRAG_OFFSET_1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_6TO4,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 2, 0,
+ NPC_S_KPU8_UDP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_6TO4,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_HOP_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_DEST,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_ROUT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_FRAG, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_U_IP6_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOSTID,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_SHIM6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NGIO,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu6_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU7_IP6_ROUT, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU7_IP6_ROUT, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_CPT_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_CPT_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu7_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 0, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 0, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 4, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 0, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 0, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 4, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu8_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_ZERO,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_RST_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_URG_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_RST_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_SYN_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTPS_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_PPTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_TCP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_UNK_PORT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTPS_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_PPTP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_TCP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_UNK_PORT_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_VXLANGPE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_GENEVE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_GTPC, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_GTPU, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_KPU16_UDP_PTP, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_KPU16_UDP_PTP, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU9_ESP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU9_ESP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_UDP_DATA, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_SCTP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ICMP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_IGMP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ICMP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_AH,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_NVGRE,
+ NPC_F_LD_L_GRE_NVGRE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_NVGRE,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_ROUTE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_GRE,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_HAS_ACK,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ_ACK,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_GRE_VER1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu9_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_F_LD_L_MPLS_2_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_F_LD_L_MPLS_3_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_F_LD_L_MPLS_4_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_F_LD_L_MPLS_2_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_F_LD_L_MPLS_3_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_F_LD_L_MPLS_4_LABELS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_NSH_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLAN,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLAN,
+ NPC_F_LE_L_VXLAN_NOVNI,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_VXLAN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_UNK,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NONP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ 0,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ 0,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ 0,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ 0, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_GTPC,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_GTPU,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_ESP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu10_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_MPLS_2MANY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS_PL, 4, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS_PL, 8, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS_PL, 12, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS, 12, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_ETHER_IN_NSH, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 1, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_NSH_UNK,
+ 6, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu11_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 14, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 14, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 14, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_PPP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_MPLS_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_MPLS_2MANY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_MPLS_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ETHER_IN_NSH,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu12_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU15_TU_TCP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_UDP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_SCTP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_IGMP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ESP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_AH, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_UNK_IP_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU15_TU_TCP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_UDP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_SCTP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_IGMP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ESP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_AH, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS | NPC_F_LG_U_UNK_IP_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ARP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU15_TU_TCP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_UDP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_SCTP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP6, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ESP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_AH, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU13_TU_IP6_EXT, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ NPC_F_LG_U_IP6_HAS_EXT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LG, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu13_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu14_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu15_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_ZERO,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_RST_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_URG_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_RST_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_SYN_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTP_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_HTTP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTPS_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_HTTP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_PPTP_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_PPTP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_TCP_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_UNK_PORT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTP_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_HTTP,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTPS_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_HTTPS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_PPTP_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_PPTP,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_TCP_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_UNK_PORT,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_UDP_DATA, 8, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_UDP,
+ NPC_F_LH_L_UDP_UNK_PORT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_SCTP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_ICMP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_IGMP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_ICMP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_ESP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_AH,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LG, NPC_EC_L4,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu16_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+};
+
+static struct npc_kpu_profile npc_kpu_profiles[] = {
+ {
+ ARRAY_SIZE(kpu1_cam_entries),
+ ARRAY_SIZE(kpu1_action_entries),
+ &kpu1_cam_entries[0],
+ &kpu1_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu2_cam_entries),
+ ARRAY_SIZE(kpu2_action_entries),
+ &kpu2_cam_entries[0],
+ &kpu2_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu3_cam_entries),
+ ARRAY_SIZE(kpu3_action_entries),
+ &kpu3_cam_entries[0],
+ &kpu3_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu4_cam_entries),
+ ARRAY_SIZE(kpu4_action_entries),
+ &kpu4_cam_entries[0],
+ &kpu4_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu5_cam_entries),
+ ARRAY_SIZE(kpu5_action_entries),
+ &kpu5_cam_entries[0],
+ &kpu5_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu6_cam_entries),
+ ARRAY_SIZE(kpu6_action_entries),
+ &kpu6_cam_entries[0],
+ &kpu6_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu7_cam_entries),
+ ARRAY_SIZE(kpu7_action_entries),
+ &kpu7_cam_entries[0],
+ &kpu7_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu8_cam_entries),
+ ARRAY_SIZE(kpu8_action_entries),
+ &kpu8_cam_entries[0],
+ &kpu8_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu9_cam_entries),
+ ARRAY_SIZE(kpu9_action_entries),
+ &kpu9_cam_entries[0],
+ &kpu9_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu10_cam_entries),
+ ARRAY_SIZE(kpu10_action_entries),
+ &kpu10_cam_entries[0],
+ &kpu10_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu11_cam_entries),
+ ARRAY_SIZE(kpu11_action_entries),
+ &kpu11_cam_entries[0],
+ &kpu11_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu12_cam_entries),
+ ARRAY_SIZE(kpu12_action_entries),
+ &kpu12_cam_entries[0],
+ &kpu12_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu13_cam_entries),
+ ARRAY_SIZE(kpu13_action_entries),
+ &kpu13_cam_entries[0],
+ &kpu13_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu14_cam_entries),
+ ARRAY_SIZE(kpu14_action_entries),
+ &kpu14_cam_entries[0],
+ &kpu14_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu15_cam_entries),
+ ARRAY_SIZE(kpu15_action_entries),
+ &kpu15_cam_entries[0],
+ &kpu15_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu16_cam_entries),
+ ARRAY_SIZE(kpu16_action_entries),
+ &kpu16_cam_entries[0],
+ &kpu16_action_entries[0],
+ },
+};
+
+static struct npc_lt_def_cfg npc_lt_defaults = {
+ .rx_ol2 = {
+ .lid = NPC_LID_LA,
+ .ltype_match = NPC_LT_LA_ETHER,
+ .ltype_mask = 0x0F,
+ },
+ .ovlan = {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_LB_CTAG,
+ .ltype_mask = 0x0F,
+ },
+ .ivlan = {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_LB_STAG_QINQ,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oip4 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP,
+ .ltype_mask = 0x0E,
+ },
+ .rx_iip4 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oip6 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0E,
+ },
+ .rx_iip6 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_otcp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_TCP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_itcp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_TCP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oudp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_UDP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_iudp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_UDP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_osctp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_SCTP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_isctp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_SCTP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_ipsec = {
+ {
+ .lid = NPC_LID_LE,
+ .ltype_match = NPC_LT_LE_ESP,
+ .ltype_mask = 0x0F,
+ },
+ {
+ .spi_offset = 8,
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_ESP,
+ .ltype_mask = 0x0F,
+ },
+ },
+ .pck_ol2 = {
+ .lid = NPC_LID_LA,
+ .ltype_match = NPC_LT_LA_ETHER,
+ .ltype_mask = 0x0F,
+ },
+ .pck_oip4 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP,
+ .ltype_mask = 0x0E,
+ },
+ .pck_iip4 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_apad0 = {
+ .valid = 0,
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_apad1 = {
+ .valid = 0,
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_et = {
+ {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_NA,
+ .ltype_mask = 0x0,
+ },
+ {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_NA,
+ .ltype_mask = 0x0,
+ },
+ },
+};
+
+static struct npc_mcam_kex npc_mkex_default = {
+ .mkex_sign = MKEX_SIGN,
+ .name = "default",
+ .kpu_version = NPC_KPU_PROFILE_VER,
+ .keyx_cfg = {
+ /* nibble: LA..LE (ltype only) + Error code + Channel */
+ [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX |
+ (u64)NPC_EXACT_NIBBLE_HIT,
+ /* nibble: LA..LE (ltype only) */
+ [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX,
+ },
+ .intf_lid_lt_ld = {
+ /* Default RX MCAM KEX profile */
+ [NIX_INTF_RX] = {
+ [NPC_LID_LA] = {
+ /* Layer A: Ethernet: */
+ [NPC_LT_LA_ETHER] = {
+ /* DMAC: 6 bytes, KW1[55:8] */
+ KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
+ },
+ /* Layer A: HiGig2: */
+ [NPC_LT_LA_HIGIG2_ETHER] = {
+ /* Classification: 2 bytes, KW1[23:8] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* VID: 2 bytes, KW1[39:24] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0,
+ NPC_KEXOF_DMAC + 2),
+ },
+ },
+ [NPC_LID_LB] = {
+ /* Layer B: Single VLAN (CTAG) */
+ [NPC_LT_LB_CTAG] = {
+ /* CTAG VLAN: 2 bytes, KW1[7:0], KW0[63:56] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x5),
+ },
+ /* Layer B: Stacked VLAN (STAG|QinQ) */
+ [NPC_LT_LB_STAG_QINQ] = {
+ /* Outer VLAN: 2 bytes, KW1[7:0], KW0[63:56] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x5),
+ },
+ [NPC_LT_LB_FDSA] = {
+ /* SWITCH PORT: 1 byte, KW0[63:56] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x5),
+ },
+ },
+ [NPC_LID_LC] = {
+ /* Layer C: IPv4 */
+ [NPC_LT_LC_IP] = {
+ /* SIP+DIP: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
+ /* TOS: 1 byte, KW1[63:56] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf),
+ },
+ /* Layer C: IPv6 */
+ [NPC_LT_LC_IP6] = {
+ /* Everything up to SADDR: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0x0, 0x1, 0x0, 0x10),
+ },
+ },
+ [NPC_LID_LD] = {
+ /* Layer D:UDP */
+ [NPC_LT_LD_UDP] = {
+ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
+ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
+ },
+ /* Layer D:TCP */
+ [NPC_LT_LD_TCP] = {
+ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
+ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
+ },
+ },
+ },
+
+ /* Default TX MCAM KEX profile */
+ [NIX_INTF_TX] = {
+ [NPC_LID_LA] = {
+ /* Layer A: NIX_INST_HDR_S + Ethernet */
+ /* NIX appends 8 bytes of NIX_INST_HDR_S at the
+ * start of each TX packet supplied to NPC.
+ */
+ [NPC_LT_LA_IH_NIX_ETHER] = {
+ /* PF_FUNC: 2B , KW0 [47:32] */
+ KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
+ /* DMAC: 6 bytes, KW1[63:16] */
+ KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa),
+ },
+ /* Layer A: HiGig2: */
+ [NPC_LT_LA_IH_NIX_HIGIG2_ETHER] = {
+ /* PF_FUNC: 2B , KW0 [47:32] */
+ KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
+ /* VID: 2 bytes, KW1[31:16] */
+ KEX_LD_CFG(0x01, 0x10, 0x1, 0x0, 0xa),
+ },
+ },
+ [NPC_LID_LB] = {
+ /* Layer B: Single VLAN (CTAG) */
+ [NPC_LT_LB_CTAG] = {
+ /* CTAG VLAN[2..3] KW0[63:48] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
+ /* CTAG VLAN[2..3] KW1[15:0] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x8),
+ },
+ /* Layer B: Stacked VLAN (STAG|QinQ) */
+ [NPC_LT_LB_STAG_QINQ] = {
+ /* Outer VLAN: 2 bytes, KW0[63:48] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
+ /* Outer VLAN: 2 Bytes, KW1[15:0] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x8),
+ },
+ },
+ [NPC_LID_LC] = {
+ /* Layer C: IPv4 */
+ [NPC_LT_LC_IP] = {
+ /* SIP+DIP: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
+ },
+ /* Layer C: IPv6 */
+ [NPC_LT_LC_IP6] = {
+ /* Everything up to SADDR: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0x0, 0x1, 0x0, 0x10),
+ },
+ },
+ [NPC_LID_LD] = {
+ /* Layer D:UDP */
+ [NPC_LT_LD_UDP] = {
+ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
+ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
+ },
+ /* Layer D:TCP */
+ [NPC_LT_LD_TCP] = {
+ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
+ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
+ },
+ },
+ },
+ },
+};
+
+#endif /* NPC_PROFILE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
new file mode 100644
index 000000000..0ee420a48
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell PTP driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+
+#include "ptp.h"
+#include "mbox.h"
+#include "rvu.h"
+
+#define DRV_NAME "Marvell PTP Driver"
+
+#define PCI_DEVID_OCTEONTX2_PTP 0xA00C
+#define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100
+#define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200
+#define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300
+#define PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP 0xB400
+#define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500
+#define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP 0xB600
+#define PCI_DEVID_OCTEONTX2_RST 0xA085
+#define PCI_DEVID_CN10K_PTP 0xA09E
+#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00
+
+#define PCI_PTP_BAR_NO 0
+
+#define PTP_CLOCK_CFG 0xF00ULL
+#define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0)
+#define PTP_CLOCK_CFG_EXT_CLK_EN BIT_ULL(1)
+#define PTP_CLOCK_CFG_EXT_CLK_IN_MASK GENMASK_ULL(7, 2)
+#define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9)
+#define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8)
+#define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10)
+#define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30)
+#define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31)
+
+#define PTP_PPS_HI_INCR 0xF60ULL
+#define PTP_PPS_LO_INCR 0xF68ULL
+#define PTP_PPS_THRESH_HI 0xF58ULL
+
+#define PTP_CLOCK_LO 0xF08ULL
+#define PTP_CLOCK_HI 0xF10ULL
+#define PTP_CLOCK_COMP 0xF18ULL
+#define PTP_TIMESTAMP 0xF20ULL
+#define PTP_CLOCK_SEC 0xFD0ULL
+#define PTP_SEC_ROLLOVER 0xFD8ULL
+
+#define CYCLE_MULT 1000
+
+static struct ptp *first_ptp_block;
+static const struct pci_device_id ptp_id_table[];
+
+static bool is_ptp_dev_cnf10kb(struct ptp *ptp)
+{
+ return (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_PTP) ? true : false;
+}
+
+static bool is_ptp_dev_cn10k(struct ptp *ptp)
+{
+ return (ptp->pdev->device == PCI_DEVID_CN10K_PTP) ? true : false;
+}
+
+static bool cn10k_ptp_errata(struct ptp *ptp)
+{
+ if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
+ ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ return true;
+ return false;
+}
+
+static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp)
+{
+ if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
+ ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ return true;
+ return false;
+}
+
+static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer)
+{
+ struct ptp *ptp = container_of(hrtimer, struct ptp, hrtimer);
+ ktime_t curr_ts = ktime_get();
+ ktime_t delta_ns, period_ns;
+ u64 ptp_clock_hi;
+
+ /* calculate the elapsed time since last restart */
+ delta_ns = ktime_to_ns(ktime_sub(curr_ts, ptp->last_ts));
+
+ /* if the ptp clock value has crossed 0.5 seconds,
+ * its too late to update pps threshold value, so
+ * update threshold after 1 second.
+ */
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ if (ptp_clock_hi > 500000000) {
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - ptp_clock_hi));
+ } else {
+ writeq(500000000, ptp->reg_base + PTP_PPS_THRESH_HI);
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - delta_ns));
+ }
+
+ hrtimer_forward_now(hrtimer, period_ns);
+ ptp->last_ts = curr_ts;
+
+ return HRTIMER_RESTART;
+}
+
+static void ptp_hrtimer_start(struct ptp *ptp, ktime_t start_ns)
+{
+ ktime_t period_ns;
+
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - start_ns));
+ hrtimer_start(&ptp->hrtimer, period_ns, HRTIMER_MODE_REL);
+ ptp->last_ts = ktime_get();
+}
+
+static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp)
+{
+ u64 sec, sec1, nsec;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
+ sec = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec1 = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ /* check nsec rollover */
+ if (sec1 > sec) {
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec = sec1;
+ }
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+
+ return sec * NSEC_PER_SEC + nsec;
+}
+
+static u64 read_ptp_tstmp_nsec(struct ptp *ptp)
+{
+ return readq(ptp->reg_base + PTP_CLOCK_HI);
+}
+
+static u64 ptp_calc_adjusted_comp(u64 ptp_clock_freq)
+{
+ u64 comp, adj = 0, cycles_per_sec, ns_drift = 0;
+ u32 ptp_clock_nsec, cycle_time;
+ int cycle;
+
+ /* Errata:
+ * Issue #1: At the time of 1 sec rollover of the nano-second counter,
+ * the nano-second counter is set to 0. However, it should be set to
+ * (existing counter_value - 10^9).
+ *
+ * Issue #2: The nano-second counter rolls over at 0x3B9A_C9FF.
+ * It should roll over at 0x3B9A_CA00.
+ */
+
+ /* calculate ptp_clock_comp value */
+ comp = ((u64)1000000000ULL << 32) / ptp_clock_freq;
+ /* use CYCLE_MULT to avoid accuracy loss due to integer arithmetic */
+ cycle_time = NSEC_PER_SEC * CYCLE_MULT / ptp_clock_freq;
+ /* cycles per sec */
+ cycles_per_sec = ptp_clock_freq;
+
+ /* check whether ptp nanosecond counter rolls over early */
+ cycle = cycles_per_sec - 1;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ while (ptp_clock_nsec < NSEC_PER_SEC) {
+ if (ptp_clock_nsec == 0x3B9AC9FF)
+ goto calc_adj_comp;
+ cycle++;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ }
+ /* compute nanoseconds lost per second when nsec counter rolls over */
+ ns_drift = ptp_clock_nsec - NSEC_PER_SEC;
+ /* calculate ptp_clock_comp adjustment */
+ if (ns_drift > 0) {
+ adj = comp * ns_drift;
+ adj = adj / 1000000000ULL;
+ }
+ /* speed up the ptp clock to account for nanoseconds lost */
+ comp += adj;
+ return comp;
+
+calc_adj_comp:
+ /* slow down the ptp clock to not rollover early */
+ adj = comp * cycle_time;
+ adj = adj / 1000000000ULL;
+ adj = adj / CYCLE_MULT;
+ comp -= adj;
+
+ return comp;
+}
+
+struct ptp *ptp_get(void)
+{
+ struct ptp *ptp = first_ptp_block;
+
+ /* Check PTP block is present in hardware */
+ if (!pci_dev_present(ptp_id_table))
+ return ERR_PTR(-ENODEV);
+ /* Check driver is bound to PTP block */
+ if (!ptp)
+ ptp = ERR_PTR(-EPROBE_DEFER);
+ else if (!IS_ERR(ptp))
+ pci_dev_get(ptp->pdev);
+
+ return ptp;
+}
+
+void ptp_put(struct ptp *ptp)
+{
+ if (!ptp)
+ return;
+
+ pci_dev_put(ptp->pdev);
+}
+
+static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
+{
+ bool neg_adj = false;
+ u32 freq, freq_adj;
+ u64 comp, adj;
+ s64 ppb;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* The hardware adds the clock compensation value to the PTP clock
+ * on every coprocessor clock cycle. Typical convention is that it
+ * represent number of nanosecond betwen each cycle. In this
+ * convention compensation value is in 64 bit fixed-point
+ * representation where upper 32 bits are number of nanoseconds
+ * and lower is fractions of nanosecond.
+ * The scaled_ppm represent the ratio in "parts per million" by which
+ * the compensation value should be corrected.
+ * To calculate new compenstation value we use 64bit fixed point
+ * arithmetic on following formula
+ * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+ * where tbase is the basic compensation value calculated
+ * initialy in the probe function.
+ */
+ /* convert scaled_ppm to ppb */
+ ppb = 1 + scaled_ppm;
+ ppb *= 125;
+ ppb >>= 13;
+
+ if (cn10k_ptp_errata(ptp)) {
+ /* calculate the new frequency based on ppb */
+ freq_adj = (ptp->clock_rate * ppb) / 1000000000ULL;
+ freq = neg_adj ? ptp->clock_rate + freq_adj : ptp->clock_rate - freq_adj;
+ comp = ptp_calc_adjusted_comp(freq);
+ } else {
+ comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+ }
+ writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
+
+ return 0;
+}
+
+static int ptp_get_clock(struct ptp *ptp, u64 *clk)
+{
+ /* Return the current PTP clock */
+ *clk = ptp->read_ptp_tstmp(ptp);
+
+ return 0;
+}
+
+void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
+{
+ struct pci_dev *pdev;
+ u64 clock_comp;
+ u64 clock_cfg;
+
+ if (!ptp)
+ return;
+
+ pdev = ptp->pdev;
+
+ if (!sclk) {
+ dev_err(&pdev->dev, "PTP input clock cannot be zero\n");
+ return;
+ }
+
+ /* sclk is in MHz */
+ ptp->clock_rate = sclk * 1000000;
+
+ /* Program the seconds rollover value to 1 second */
+ if (is_ptp_dev_cnf10kb(ptp))
+ writeq(0x3b9aca00, ptp->reg_base + PTP_SEC_ROLLOVER);
+
+ /* Enable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+
+ if (ext_clk_freq) {
+ ptp->clock_rate = ext_clk_freq;
+ /* Set GPIO as PTP clock source */
+ clock_cfg &= ~PTP_CLOCK_CFG_EXT_CLK_IN_MASK;
+ clock_cfg |= PTP_CLOCK_CFG_EXT_CLK_EN;
+ }
+
+ if (extts) {
+ clock_cfg |= PTP_CLOCK_CFG_TSTMP_EDGE;
+ /* Set GPIO as timestamping source */
+ clock_cfg &= ~PTP_CLOCK_CFG_TSTMP_IN_MASK;
+ clock_cfg |= PTP_CLOCK_CFG_TSTMP_EN;
+ }
+
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+
+ /* Set 50% duty cycle for 1Hz output */
+ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
+ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
+ if (cn10k_ptp_errata(ptp)) {
+ /* The ptp_clock_hi rollsover to zero once clock cycle before it
+ * reaches one second boundary. so, program the pps_lo_incr in
+ * such a way that the pps threshold value comparison at one
+ * second boundary will succeed and pps edge changes. After each
+ * one second boundary, the hrtimer handler will be invoked and
+ * reprograms the pps threshold value.
+ */
+ ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
+ writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
+ ptp->reg_base + PTP_PPS_LO_INCR);
+ }
+
+ if (cn10k_ptp_errata(ptp))
+ clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
+ else
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+
+ /* Initial compensation value to start the nanosecs counter */
+ writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
+}
+
+static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
+{
+ u64 timestamp;
+
+ if (is_ptp_dev_cn10k(ptp)) {
+ timestamp = readq(ptp->reg_base + PTP_TIMESTAMP);
+ *clk = (timestamp >> 32) * NSEC_PER_SEC + (timestamp & 0xFFFFFFFF);
+ } else {
+ *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+ }
+
+ return 0;
+}
+
+static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
+{
+ if (!cn10k_ptp_errata(ptp))
+ writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+
+ return 0;
+}
+
+static int ptp_extts_on(struct ptp *ptp, int on)
+{
+ u64 ptp_clock_hi;
+
+ if (cn10k_ptp_errata(ptp)) {
+ if (on) {
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
+ } else {
+ if (hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+ }
+ }
+
+ return 0;
+}
+
+static int ptp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct ptp *ptp;
+ int err;
+
+ ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
+ if (!ptp) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ptp->pdev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ goto error_free;
+
+ err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+ if (err)
+ goto error_free;
+
+ ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
+
+ pci_set_drvdata(pdev, ptp);
+ if (!first_ptp_block)
+ first_ptp_block = ptp;
+
+ spin_lock_init(&ptp->ptp_lock);
+ if (is_ptp_tsfmt_sec_nsec(ptp))
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
+ else
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
+
+ if (cn10k_ptp_errata(ptp)) {
+ hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ptp->hrtimer.function = ptp_reset_thresh;
+ }
+
+ return 0;
+
+error_free:
+ kfree(ptp);
+
+error:
+ /* For `ptp_get()` we need to differentiate between the case
+ * when the core has not tried to probe this device and the case when
+ * the probe failed. In the later case we keep the error in
+ * `dev->driver_data`.
+ */
+ pci_set_drvdata(pdev, ERR_PTR(err));
+ if (!first_ptp_block)
+ first_ptp_block = ERR_PTR(err);
+
+ return err;
+}
+
+static void ptp_remove(struct pci_dev *pdev)
+{
+ struct ptp *ptp = pci_get_drvdata(pdev);
+ u64 clock_cfg;
+
+ if (IS_ERR_OR_NULL(ptp))
+ return;
+
+ if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+
+ /* Disable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+ kfree(ptp);
+}
+
+static const struct pci_device_id ptp_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_98xx_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_96XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_PTP) },
+ { 0, }
+};
+
+struct pci_driver ptp_driver = {
+ .name = DRV_NAME,
+ .id_table = ptp_id_table,
+ .probe = ptp_probe,
+ .remove = ptp_remove,
+};
+
+int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
+ struct ptp_rsp *rsp)
+{
+ int err = 0;
+
+ /* This function is the PTP mailbox handler invoked when
+ * called by AF consumers/netdev drivers via mailbox mechanism.
+ * It is used by netdev driver to get the PTP clock and to set
+ * frequency adjustments. Since mailbox can be called without
+ * notion of whether the driver is bound to ptp device below
+ * validation is needed as first step.
+ */
+ if (!rvu->ptp)
+ return -ENODEV;
+
+ switch (req->op) {
+ case PTP_OP_ADJFINE:
+ err = ptp_adjfine(rvu->ptp, req->scaled_ppm);
+ break;
+ case PTP_OP_GET_CLOCK:
+ err = ptp_get_clock(rvu->ptp, &rsp->clk);
+ break;
+ case PTP_OP_GET_TSTMP:
+ err = ptp_get_tstmp(rvu->ptp, &rsp->clk);
+ break;
+ case PTP_OP_SET_THRESH:
+ err = ptp_set_thresh(rvu->ptp, req->thresh);
+ break;
+ case PTP_OP_EXTTS_ON:
+ err = ptp_extts_on(rvu->ptp, req->extts_on);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
new file mode 100644
index 000000000..b9d92abc3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell PTP driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef PTP_H
+#define PTP_H
+
+#include <linux/timecounter.h>
+#include <linux/time64.h>
+#include <linux/spinlock.h>
+
+struct ptp {
+ struct pci_dev *pdev;
+ void __iomem *reg_base;
+ u64 (*read_ptp_tstmp)(struct ptp *ptp);
+ spinlock_t ptp_lock; /* lock */
+ struct hrtimer hrtimer;
+ ktime_t last_ts;
+ u32 clock_rate;
+ u32 clock_period;
+};
+
+struct ptp *ptp_get(void);
+void ptp_put(struct ptp *ptp);
+void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts);
+
+extern struct pci_driver ptp_driver;
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
new file mode 100644
index 000000000..6b4792a94
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -0,0 +1,502 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include "cgx.h"
+#include "lmac_common.h"
+
+static struct mac_ops rpm_mac_ops = {
+ .name = "rpm",
+ .csr_offset = 0x4e00,
+ .lmac_offset = 20,
+ .int_register = RPMX_CMRX_SW_INT,
+ .int_set_reg = RPMX_CMRX_SW_INT_ENA_W1S,
+ .irq_offset = 1,
+ .int_ena_bit = BIT_ULL(0),
+ .lmac_fwi = RPM_LMAC_FWI,
+ .non_contiguous_serdes_lane = true,
+ .rx_stats_cnt = 43,
+ .tx_stats_cnt = 34,
+ .get_nr_lmacs = rpm_get_nr_lmacs,
+ .get_lmac_type = rpm_get_lmac_type,
+ .lmac_fifo_len = rpm_get_lmac_fifo_len,
+ .mac_lmac_intl_lbk = rpm_lmac_internal_loopback,
+ .mac_get_rx_stats = rpm_get_rx_stats,
+ .mac_get_tx_stats = rpm_get_tx_stats,
+ .mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding,
+ .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
+ .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
+ .mac_pause_frm_config = rpm_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = rpm_lmac_ptp_config,
+ .mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
+ .mac_tx_enable = rpm_lmac_tx_enable,
+ .pfc_config = rpm_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
+};
+
+struct mac_ops *rpm_get_mac_ops(void)
+{
+ return &rpm_mac_ops;
+}
+
+static void rpm_write(rpm_t *rpm, u64 lmac, u64 offset, u64 val)
+{
+ cgx_write(rpm, lmac, offset, val);
+}
+
+static u64 rpm_read(rpm_t *rpm, u64 lmac, u64 offset)
+{
+ return cgx_read(rpm, lmac, offset);
+}
+
+int rpm_get_nr_lmacs(void *rpmd)
+{
+ rpm_t *rpm = rpmd;
+
+ return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL);
+}
+
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg, last;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ last = cfg;
+ if (enable)
+ cfg |= RPM_TX_EN;
+ else
+ cfg &= ~(RPM_TX_EN);
+
+ if (cfg != last)
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return !!(last & RPM_TX_EN);
+}
+
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (enable)
+ cfg |= RPM_RX_EN | RPM_TX_EN;
+ else
+ cfg &= ~(RPM_RX_EN | RPM_TX_EN);
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return 0;
+}
+
+void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ struct lmac *lmac;
+ u64 cfg;
+
+ if (!rpm)
+ return;
+
+ lmac = lmac_pdata(lmac_id, rpm);
+ if (!lmac)
+ return;
+
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
+ if (enable) {
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ } else {
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ }
+}
+
+int rpm_lmac_get_pause_frm_status(void *rpmd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (!(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE)) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
+
+ return 0;
+}
+
+static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id,
+ unsigned long pfc_en,
+ bool enable)
+{
+ u64 quanta_offset = 0, quanta_thresh = 0, cfg;
+ int i, shift;
+
+ /* Set pause time and interval */
+ for_each_set_bit(i, &pfc_en, 16) {
+ switch (i) {
+ case 0:
+ case 1:
+ quanta_offset = RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL01_QUANTA_THRESH;
+ break;
+ case 2:
+ case 3:
+ quanta_offset = RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL23_QUANTA_THRESH;
+ break;
+ case 4:
+ case 5:
+ quanta_offset = RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL45_QUANTA_THRESH;
+ break;
+ case 6:
+ case 7:
+ quanta_offset = RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL67_QUANTA_THRESH;
+ break;
+ case 8:
+ case 9:
+ quanta_offset = RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL89_QUANTA_THRESH;
+ break;
+ case 10:
+ case 11:
+ quanta_offset = RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH;
+ break;
+ case 12:
+ case 13:
+ quanta_offset = RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH;
+ break;
+ case 14:
+ case 15:
+ quanta_offset = RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH;
+ break;
+ }
+
+ if (!quanta_offset || !quanta_thresh)
+ continue;
+
+ shift = (i % 2) ? 1 : 0;
+ cfg = rpm_read(rpm, lmac_id, quanta_offset);
+ if (enable) {
+ cfg |= ((u64)RPM_DEFAULT_PAUSE_TIME << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_offset, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, quanta_thresh);
+ if (enable) {
+ cfg |= ((u64)(RPM_DEFAULT_PAUSE_TIME / 2) << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_thresh, cfg);
+ }
+}
+
+int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
+ u8 rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ cfg |= tx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ /* Configure CL0 Pause Quanta & threshold for 802.3X frames */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
+ cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id);
+ } else {
+ /* Disable all Pause Quanta & threshold values */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
+ cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id);
+ }
+ rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg);
+ return 0;
+}
+
+void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ /* ALL pause frames received are completely ignored */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable forward pause to TX block */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable all PFC classes */
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg);
+ rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
+
+ /* Disable forward pause to driver */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Enable channel mask for all LMACS */
+ rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
+}
+
+int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
+{
+ rpm_t *rpm = rpmd;
+ u64 val_lo, val_hi;
+
+ if (!rpm || lmac_id >= rpm->lmac_count)
+ return -ENODEV;
+
+ mutex_lock(&rpm->lock);
+
+ /* Update idx to point per lmac Rx statistics page */
+ idx += lmac_id * rpm->mac_ops->rx_stats_cnt;
+
+ /* Read lower 32 bits of counter */
+ val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX +
+ (idx * 8));
+
+ /* upon read of lower 32 bits, higher 32 bits are written
+ * to RPMX_MTI_STAT_DATA_HI_CDC
+ */
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+
+ *rx_stat = (val_hi << 32 | val_lo);
+
+ mutex_unlock(&rpm->lock);
+ return 0;
+}
+
+int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat)
+{
+ rpm_t *rpm = rpmd;
+ u64 val_lo, val_hi;
+
+ if (!rpm || lmac_id >= rpm->lmac_count)
+ return -ENODEV;
+
+ mutex_lock(&rpm->lock);
+
+ /* Update idx to point per lmac Tx statistics page */
+ idx += lmac_id * rpm->mac_ops->tx_stats_cnt;
+
+ val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX +
+ (idx * 8));
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+
+ *tx_stat = (val_hi << 32 | val_lo);
+
+ mutex_unlock(&rpm->lock);
+ return 0;
+}
+
+u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
+{
+ rpm_t *rpm = rpmd;
+ u64 req = 0, resp;
+ int err;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req);
+ err = cgx_fwi_cmd_generic(req, &resp, rpm, 0);
+ if (!err)
+ return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp);
+ return err;
+}
+
+u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id)
+{
+ rpm_t *rpm = rpmd;
+ u64 hi_perf_lmac;
+ u8 num_lmacs;
+ u32 fifo_len;
+
+ fifo_len = rpm->mac_ops->fifo_len;
+ num_lmacs = rpm->mac_ops->get_nr_lmacs(rpm);
+
+ switch (num_lmacs) {
+ case 1:
+ return fifo_len;
+ case 2:
+ return fifo_len / 2;
+ case 3:
+ /* LMAC marked as hi_perf gets half of the FIFO and rest 1/4th */
+ hi_perf_lmac = rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS);
+ hi_perf_lmac = (hi_perf_lmac >> 4) & 0x3ULL;
+ if (lmac_id == hi_perf_lmac)
+ return fifo_len / 2;
+ return fifo_len / 4;
+ case 4:
+ default:
+ return fifo_len / 4;
+ }
+ return 0;
+}
+
+int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u8 lmac_type;
+ u64 cfg;
+
+ if (!rpm || lmac_id >= rpm->lmac_count)
+ return -ENODEV;
+ lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id);
+
+ if (lmac_type == LMAC_MODE_QSGMII || lmac_type == LMAC_MODE_SGMII) {
+ dev_err(&rpm->pdev->dev, "loopback not supported for LPC mode\n");
+ return 0;
+ }
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
+
+ if (enable)
+ cfg |= RPMX_MTI_PCS_LBK;
+ else
+ cfg &= ~RPMX_MTI_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
+
+ return 0;
+}
+
+void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG);
+ if (enable) {
+ cfg |= RPMX_RX_TS_PREPEND;
+ cfg |= RPMX_TX_PTP_1S_SUPPORT;
+ } else {
+ cfg &= ~RPMX_RX_TS_PREPEND;
+ cfg &= ~RPMX_TX_PTP_1S_SUPPORT;
+ }
+
+ rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE);
+
+ if (enable) {
+ cfg |= RPMX_ONESTEP_ENABLE;
+ cfg &= ~RPMX_TS_BINARY_MODE;
+ } else {
+ cfg &= ~RPMX_ONESTEP_ENABLE;
+ }
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE, cfg);
+}
+
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg, class_en;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ class_en = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ pfc_en |= FIELD_GET(RPM_PFC_CLASS_MASK, class_en);
+
+ if (rx_pause) {
+ cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
+ } else {
+ cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
+ }
+
+ if (tx_pause) {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, pfc_en, true);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ class_en = FIELD_SET(RPM_PFC_CLASS_MASK, pfc_en, class_en);
+ } else {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xfff, false);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ class_en = FIELD_SET(RPM_PFC_CLASS_MASK, 0, class_en);
+ }
+
+ if (!rx_pause && !tx_pause)
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+ else
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, class_en);
+
+ return 0;
+}
+
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause, u8 *rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
new file mode 100644
index 000000000..77f2ef9e1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CN10K RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef RPM_H
+#define RPM_H
+
+#include <linux/bits.h>
+
+/* PCI device IDs */
+#define PCI_DEVID_CN10K_RPM 0xA060
+
+/* Registers */
+#define RPMX_CMRX_CFG 0x00
+#define RPMX_RX_TS_PREPEND BIT_ULL(22)
+#define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17)
+#define RPMX_CMRX_SW_INT 0x180
+#define RPMX_CMRX_SW_INT_W1S 0x188
+#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
+#define RPMX_CMRX_LINK_CFG 0x1070
+#define RPMX_MTI_PCS100X_CONTROL1 0x20000
+#define RPMX_MTI_LPCSX_CONTROL1 0x30000
+#define RPMX_MTI_PCS_LBK BIT_ULL(14)
+#define RPMX_MTI_LPCSX_CONTROL(id) (0x30000 | ((id) * 0x100))
+
+#define RPMX_CMRX_LINK_RANGE_MASK GENMASK_ULL(19, 16)
+#define RPMX_CMRX_LINK_BASE_MASK GENMASK_ULL(11, 0)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG 0x8010
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE BIT_ULL(29)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE BIT_ULL(28)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE BIT_ULL(19)
+#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA 0x80B0
+#define RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA 0x80B8
+#define RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA 0x80C0
+#define RPMX_MTI_MAC100X_CL01_QUANTA_THRESH 0x80C8
+#define RPMX_MTI_MAC100X_CL23_QUANTA_THRESH 0x80D0
+#define RPMX_MTI_MAC100X_CL45_QUANTA_THRESH 0x80D8
+#define RPMX_MTI_MAC100X_CL67_QUANTA_THRESH 0x80E0
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA 0x8110
+#define RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA 0x8118
+#define RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA 0x8120
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH 0x8130
+#define RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH 0x8138
+#define RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH 0x8140
+#define RPMX_CMR_RX_OVR_BP 0x4120
+#define RPMX_CMR_RX_OVR_BP_EN(x) BIT_ULL((x) + 8)
+#define RPMX_CMR_RX_OVR_BP_BP(x) BIT_ULL((x) + 4)
+#define RPMX_CMR_CHAN_MSK_OR 0x4118
+#define RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX 0x12000
+#define RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX 0x13000
+#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
+
+#define RPM_LMAC_FWI 0xa
+#define RPM_TX_EN BIT_ULL(0)
+#define RPM_RX_EN BIT_ULL(1)
+#define RPMX_CMRX_PRT_CBFC_CTL 0x5B08
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_RX_SHIFT 33
+#define RPMX_CMRX_PRT_CBFC_CTL_PHYS_BP_SHIFT 16
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_TX_SHIFT 0
+#define RPM_PFC_CLASS_MASK GENMASK_ULL(48, 33)
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_PAD_EN BIT_ULL(11)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD BIT_ULL(7)
+#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPM_DEFAULT_PAUSE_TIME 0x7FF
+
+#define RPMX_MTI_MAC100X_XIF_MODE 0x8100
+#define RPMX_ONESTEP_ENABLE BIT_ULL(5)
+#define RPMX_TS_BINARY_MODE BIT_ULL(11)
+
+/* Function Declarations */
+int rpm_get_nr_lmacs(void *rpmd);
+u8 rpm_get_lmac_type(void *rpmd, int lmac_id);
+u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id);
+int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable);
+void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_get_pause_frm_status(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
+ u8 rx_pause);
+int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
+int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
+void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
new file mode 100644
index 000000000..d88d86bf0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -0,0 +1,3426 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include "cgx.h"
+#include "rvu.h"
+#include "rvu_reg.h"
+#include "ptp.h"
+#include "mcs.h"
+
+#include "rvu_trace.h"
+#include "rvu_npc_hash.h"
+
+#define DRV_NAME "rvu_af"
+#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
+
+static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf);
+static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf);
+static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
+
+static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ int type, int num,
+ void (mbox_handler)(struct work_struct *),
+ void (mbox_up_handler)(struct work_struct *));
+enum {
+ TYPE_AFVF,
+ TYPE_AFPF,
+};
+
+/* Supported devices */
+static const struct pci_device_id rvu_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, rvu_id_table);
+
+static char *mkex_profile; /* MKEX profile name */
+module_param(mkex_profile, charp, 0000);
+MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
+
+static char *kpu_profile; /* KPU profile name */
+module_param(kpu_profile, charp, 0000);
+MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
+
+static void rvu_setup_hw_capabilities(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
+ hw->cap.nix_fixed_txschq_mapping = false;
+ hw->cap.nix_shaping = true;
+ hw->cap.nix_tx_link_bp = true;
+ hw->cap.nix_rx_multicast = true;
+ hw->cap.nix_shaper_toggle_wait = false;
+ hw->cap.npc_hash_extract = false;
+ hw->cap.npc_exact_match_enabled = false;
+ hw->rvu = rvu;
+
+ if (is_rvu_pre_96xx_C0(rvu)) {
+ hw->cap.nix_fixed_txschq_mapping = true;
+ hw->cap.nix_txsch_per_cgx_lmac = 4;
+ hw->cap.nix_txsch_per_lbk_lmac = 132;
+ hw->cap.nix_txsch_per_sdp_lmac = 76;
+ hw->cap.nix_shaping = false;
+ hw->cap.nix_tx_link_bp = false;
+ if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu))
+ hw->cap.nix_rx_multicast = false;
+ }
+ if (!is_rvu_pre_96xx_C0(rvu))
+ hw->cap.nix_shaper_toggle_wait = true;
+
+ if (!is_rvu_otx2(rvu))
+ hw->cap.per_pf_mbox_regs = true;
+
+ if (is_rvu_npc_hash_extract_en(rvu))
+ hw->cap.npc_hash_extract = true;
+}
+
+/* Poll a RVU block's register 'offset', for a 'zero'
+ * or 'nonzero' at bits specified by 'mask'
+ */
+int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(20000);
+ bool twice = false;
+ void __iomem *reg;
+ u64 reg_val;
+
+ reg = rvu->afreg_base + ((block << 28) | offset);
+again:
+ reg_val = readq(reg);
+ if (zero && !(reg_val & mask))
+ return 0;
+ if (!zero && (reg_val & mask))
+ return 0;
+ if (time_before(jiffies, timeout)) {
+ usleep_range(1, 5);
+ goto again;
+ }
+ /* In scenarios where CPU is scheduled out before checking
+ * 'time_before' (above) and gets scheduled in such that
+ * jiffies are beyond timeout value, then check again if HW is
+ * done with the operation in the meantime.
+ */
+ if (!twice) {
+ twice = true;
+ goto again;
+ }
+ return -EBUSY;
+}
+
+int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
+{
+ int id;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ id = find_first_zero_bit(rsrc->bmap, rsrc->max);
+ if (id >= rsrc->max)
+ return -ENOSPC;
+
+ __set_bit(id, rsrc->bmap);
+
+ return id;
+}
+
+int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
+{
+ int start;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
+ if (start >= rsrc->max)
+ return -ENOSPC;
+
+ bitmap_set(rsrc->bmap, start, nrsrc);
+ return start;
+}
+
+static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
+{
+ if (!rsrc->bmap)
+ return;
+ if (start >= rsrc->max)
+ return;
+
+ bitmap_clear(rsrc->bmap, start, nrsrc);
+}
+
+bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
+{
+ int start;
+
+ if (!rsrc->bmap)
+ return false;
+
+ start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
+ if (start >= rsrc->max)
+ return false;
+
+ return true;
+}
+
+void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
+{
+ if (!rsrc->bmap)
+ return;
+
+ __clear_bit(id, rsrc->bmap);
+}
+
+int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
+{
+ int used;
+
+ if (!rsrc->bmap)
+ return 0;
+
+ used = bitmap_weight(rsrc->bmap, rsrc->max);
+ return (rsrc->max - used);
+}
+
+bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
+{
+ if (!rsrc->bmap)
+ return false;
+
+ return !test_bit(id, rsrc->bmap);
+}
+
+int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
+{
+ rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
+ sizeof(long), GFP_KERNEL);
+ if (!rsrc->bmap)
+ return -ENOMEM;
+ return 0;
+}
+
+void rvu_free_bitmap(struct rsrc_bmap *rsrc)
+{
+ kfree(rsrc->bmap);
+}
+
+/* Get block LF's HW index from a PF_FUNC's block slot number */
+int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
+{
+ u16 match = 0;
+ int lf;
+
+ mutex_lock(&rvu->rsrc_lock);
+ for (lf = 0; lf < block->lf.max; lf++) {
+ if (block->fn_map[lf] == pcifunc) {
+ if (slot == match) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return lf;
+ }
+ match++;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return -ENODEV;
+}
+
+/* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
+ * Some silicon variants of OcteonTX2 supports
+ * multiple blocks of same type.
+ *
+ * @pcifunc has to be zero when no LF is yet attached.
+ *
+ * For a pcifunc if LFs are attached from multiple blocks of same type, then
+ * return blkaddr of first encountered block.
+ */
+int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
+{
+ int devnum, blkaddr = -ENODEV;
+ u64 cfg, reg;
+ bool is_pf;
+
+ switch (blktype) {
+ case BLKTYPE_NPC:
+ blkaddr = BLKADDR_NPC;
+ goto exit;
+ case BLKTYPE_NPA:
+ blkaddr = BLKADDR_NPA;
+ goto exit;
+ case BLKTYPE_NIX:
+ /* For now assume NIX0 */
+ if (!pcifunc) {
+ blkaddr = BLKADDR_NIX0;
+ goto exit;
+ }
+ break;
+ case BLKTYPE_SSO:
+ blkaddr = BLKADDR_SSO;
+ goto exit;
+ case BLKTYPE_SSOW:
+ blkaddr = BLKADDR_SSOW;
+ goto exit;
+ case BLKTYPE_TIM:
+ blkaddr = BLKADDR_TIM;
+ goto exit;
+ case BLKTYPE_CPT:
+ /* For now assume CPT0 */
+ if (!pcifunc) {
+ blkaddr = BLKADDR_CPT0;
+ goto exit;
+ }
+ break;
+ }
+
+ /* Check if this is a RVU PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK) {
+ is_pf = false;
+ devnum = rvu_get_hwvf(rvu, pcifunc);
+ } else {
+ is_pf = true;
+ devnum = rvu_get_pf(pcifunc);
+ }
+
+ /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
+ * 'BLKADDR_NIX1'.
+ */
+ if (blktype == BLKTYPE_NIX) {
+ reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
+ RVU_PRIV_HWVFX_NIXX_CFG(0);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg) {
+ blkaddr = BLKADDR_NIX0;
+ goto exit;
+ }
+
+ reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
+ RVU_PRIV_HWVFX_NIXX_CFG(1);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_NIX1;
+ }
+
+ if (blktype == BLKTYPE_CPT) {
+ reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
+ RVU_PRIV_HWVFX_CPTX_CFG(0);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg) {
+ blkaddr = BLKADDR_CPT0;
+ goto exit;
+ }
+
+ reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
+ RVU_PRIV_HWVFX_CPTX_CFG(1);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_CPT1;
+ }
+
+exit:
+ if (is_block_implemented(rvu->hw, blkaddr))
+ return blkaddr;
+ return -ENODEV;
+}
+
+static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, u16 pcifunc,
+ u16 lf, bool attach)
+{
+ int devnum, num_lfs = 0;
+ bool is_pf;
+ u64 reg;
+
+ if (lf >= block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
+ __func__, lf, block->name, block->lf.max);
+ return;
+ }
+
+ /* Check if this is for a RVU PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK) {
+ is_pf = false;
+ devnum = rvu_get_hwvf(rvu, pcifunc);
+ } else {
+ is_pf = true;
+ devnum = rvu_get_pf(pcifunc);
+ }
+
+ block->fn_map[lf] = attach ? pcifunc : 0;
+
+ switch (block->addr) {
+ case BLKADDR_NPA:
+ pfvf->npalf = attach ? true : false;
+ num_lfs = pfvf->npalf;
+ break;
+ case BLKADDR_NIX0:
+ case BLKADDR_NIX1:
+ pfvf->nixlf = attach ? true : false;
+ num_lfs = pfvf->nixlf;
+ break;
+ case BLKADDR_SSO:
+ attach ? pfvf->sso++ : pfvf->sso--;
+ num_lfs = pfvf->sso;
+ break;
+ case BLKADDR_SSOW:
+ attach ? pfvf->ssow++ : pfvf->ssow--;
+ num_lfs = pfvf->ssow;
+ break;
+ case BLKADDR_TIM:
+ attach ? pfvf->timlfs++ : pfvf->timlfs--;
+ num_lfs = pfvf->timlfs;
+ break;
+ case BLKADDR_CPT0:
+ attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
+ num_lfs = pfvf->cptlfs;
+ break;
+ case BLKADDR_CPT1:
+ attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
+ num_lfs = pfvf->cpt1_lfs;
+ break;
+ }
+
+ reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
+ rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
+}
+
+inline int rvu_get_pf(u16 pcifunc)
+{
+ return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+}
+
+void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
+{
+ u64 cfg;
+
+ /* Get numVFs attached to this PF and first HWVF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ if (numvfs)
+ *numvfs = (cfg >> 12) & 0xFF;
+ if (hwvf)
+ *hwvf = cfg & 0xFFF;
+}
+
+int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
+{
+ int pf, func;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ func = pcifunc & RVU_PFVF_FUNC_MASK;
+
+ /* Get first HWVF attached to this PF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+
+ return ((cfg & 0xFFF) + func - 1);
+}
+
+struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
+{
+ /* Check if it is a PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ return &rvu->pf[rvu_get_pf(pcifunc)];
+}
+
+static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
+{
+ int pf, vf, nvfs;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ if (pf >= rvu->hw->total_pfs)
+ return false;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return true;
+
+ /* Check if VF is within number of VFs attached to this PF */
+ vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ nvfs = (cfg >> 12) & 0xFF;
+ if (vf >= nvfs)
+ return false;
+
+ return true;
+}
+
+bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
+{
+ struct rvu_block *block;
+
+ if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
+ return false;
+
+ block = &hw->block[blkaddr];
+ return block->implemented;
+}
+
+static void rvu_check_block_implemented(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* For each block check if 'implemented' bit is set */
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
+ if (cfg & BIT_ULL(11))
+ block->implemented = true;
+ }
+}
+
+static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
+{
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
+ RVU_BLK_RVUM_REVID);
+}
+
+static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
+{
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
+}
+
+int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
+{
+ int err;
+
+ if (!block->implemented)
+ return 0;
+
+ rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
+ err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
+ true);
+ return err;
+}
+
+static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
+{
+ struct rvu_block *block = &rvu->hw->block[blkaddr];
+ int err;
+
+ if (!block->implemented)
+ return;
+
+ rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
+ err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
+ if (err) {
+ dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
+ while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
+ ;
+ }
+}
+
+static void rvu_reset_all_blocks(struct rvu *rvu)
+{
+ /* Do a HW reset of all RVU blocks */
+ rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
+}
+
+static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
+{
+ struct rvu_pfvf *pfvf;
+ u64 cfg;
+ int lf;
+
+ for (lf = 0; lf < block->lf.max; lf++) {
+ cfg = rvu_read64(rvu, block->addr,
+ block->lfcfg_reg | (lf << block->lfshift));
+ if (!(cfg & BIT_ULL(63)))
+ continue;
+
+ /* Set this resource as being used */
+ __set_bit(lf, block->lf.bmap);
+
+ /* Get, to whom this LF is attached */
+ pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ (cfg >> 8) & 0xFFFF, lf, true);
+
+ /* Set start MSIX vector for this LF within this PF/VF */
+ rvu_set_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
+{
+ int min_vecs;
+
+ if (!vf)
+ goto check_pf;
+
+ if (!nvecs) {
+ dev_warn(rvu->dev,
+ "PF%d:VF%d is configured with zero msix vectors, %d\n",
+ pf, vf - 1, nvecs);
+ }
+ return;
+
+check_pf:
+ if (pf == 0)
+ min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
+ else
+ min_vecs = RVU_PF_INT_VEC_CNT;
+
+ if (!(nvecs < min_vecs))
+ return;
+ dev_warn(rvu->dev,
+ "PF%d is configured with too few vectors, %d, min is %d\n",
+ pf, nvecs, min_vecs);
+}
+
+static int rvu_setup_msix_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs, hwvf, err;
+ int nvecs, offset, max_msix;
+ struct rvu_pfvf *pfvf;
+ u64 cfg, phy_addr;
+ dma_addr_t iova;
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ /* If PF is not enabled, nothing to do */
+ if (!((cfg >> 20) & 0x01))
+ continue;
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+
+ pfvf = &rvu->pf[pf];
+ /* Get num of MSIX vectors attached to this PF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
+ pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
+ rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
+
+ /* Alloc msix bitmap for this PF */
+ err = rvu_alloc_bitmap(&pfvf->msix);
+ if (err)
+ return err;
+
+ /* Allocate memory for MSIX vector to RVU block LF mapping */
+ pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!pfvf->msix_lfmap)
+ return -ENOMEM;
+
+ /* For PF0 (AF) firmware will set msix vector offsets for
+ * AF, block AF and PF0_INT vectors, so jump to VFs.
+ */
+ if (!pf)
+ goto setup_vfmsix;
+
+ /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
+ * These are allocated on driver init and never freed,
+ * so no need to set 'msix_lfmap' for these.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
+ nvecs = (cfg >> 12) & 0xFF;
+ cfg &= ~0x7FFULL;
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
+setup_vfmsix:
+ /* Alloc msix bitmap for VFs */
+ for (vf = 0; vf < numvfs; vf++) {
+ pfvf = &rvu->hwvf[hwvf + vf];
+ /* Get num of MSIX vectors attached to this VF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_MSIX_CFG(pf));
+ pfvf->msix.max = (cfg & 0xFFF) + 1;
+ rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
+
+ /* Alloc msix bitmap for this VF */
+ err = rvu_alloc_bitmap(&pfvf->msix);
+ if (err)
+ return err;
+
+ pfvf->msix_lfmap =
+ devm_kcalloc(rvu->dev, pfvf->msix.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!pfvf->msix_lfmap)
+ return -ENOMEM;
+
+ /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
+ * These are allocated on driver init and never freed,
+ * so no need to set 'msix_lfmap' for these.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
+ nvecs = (cfg >> 12) & 0xFF;
+ cfg &= ~0x7FFULL;
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
+ cfg | offset);
+ }
+ }
+
+ /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
+ * create an IOMMU mapping for the physical address configured by
+ * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ max_msix = cfg & 0xFFFFF;
+ if (rvu->fwdata && rvu->fwdata->msixtr_base)
+ phy_addr = rvu->fwdata->msixtr_base;
+ else
+ phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
+
+ iova = dma_map_resource(rvu->dev, phy_addr,
+ max_msix * PCI_MSIX_ENTRY_SIZE,
+ DMA_BIDIRECTIONAL, 0);
+
+ if (dma_mapping_error(rvu->dev, iova))
+ return -ENOMEM;
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
+ rvu->msix_base_iova = iova;
+ rvu->msixtr_base_phy = phy_addr;
+
+ return 0;
+}
+
+static void rvu_reset_msix(struct rvu *rvu)
+{
+ /* Restore msixtr base register */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
+ rvu->msixtr_base_phy);
+}
+
+static void rvu_free_hw_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ int id, max_msix;
+ u64 cfg;
+
+ rvu_npa_freemem(rvu);
+ rvu_npc_freemem(rvu);
+ rvu_nix_freemem(rvu);
+
+ /* Free block LF bitmaps */
+ for (id = 0; id < BLK_COUNT; id++) {
+ block = &hw->block[id];
+ kfree(block->lf.bmap);
+ }
+
+ /* Free MSIX bitmaps */
+ for (id = 0; id < hw->total_pfs; id++) {
+ pfvf = &rvu->pf[id];
+ kfree(pfvf->msix.bmap);
+ }
+
+ for (id = 0; id < hw->total_vfs; id++) {
+ pfvf = &rvu->hwvf[id];
+ kfree(pfvf->msix.bmap);
+ }
+
+ /* Unmap MSIX vector base IOVA mapping */
+ if (!rvu->msix_base_iova)
+ return;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ max_msix = cfg & 0xFFFFF;
+ dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
+ max_msix * PCI_MSIX_ENTRY_SIZE,
+ DMA_BIDIRECTIONAL, 0);
+
+ rvu_reset_msix(rvu);
+ mutex_destroy(&rvu->rsrc_lock);
+}
+
+static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs, hwvf;
+ struct rvu_pfvf *pfvf;
+ u64 *mac;
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
+ if (!pf)
+ goto lbkvf;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+ /* Assign MAC address to PF */
+ pfvf = &rvu->pf[pf];
+ if (rvu->fwdata && pf < PF_MACNUM_MAX) {
+ mac = &rvu->fwdata->pf_macs[pf];
+ if (*mac)
+ u64_to_ether_addr(*mac, pfvf->mac_addr);
+ else
+ eth_random_addr(pfvf->mac_addr);
+ } else {
+ eth_random_addr(pfvf->mac_addr);
+ }
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
+
+lbkvf:
+ /* Assign MAC address to VFs*/
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+ for (vf = 0; vf < numvfs; vf++, hwvf++) {
+ pfvf = &rvu->hwvf[hwvf];
+ if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
+ mac = &rvu->fwdata->vf_macs[hwvf];
+ if (*mac)
+ u64_to_ether_addr(*mac, pfvf->mac_addr);
+ else
+ eth_random_addr(pfvf->mac_addr);
+ } else {
+ eth_random_addr(pfvf->mac_addr);
+ }
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
+ }
+ }
+}
+
+static int rvu_fwdata_init(struct rvu *rvu)
+{
+ u64 fwdbase;
+ int err;
+
+ /* Get firmware data base address */
+ err = cgx_get_fwdata_base(&fwdbase);
+ if (err)
+ goto fail;
+ rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
+ if (!rvu->fwdata)
+ goto fail;
+ if (!is_rvu_fwdata_valid(rvu)) {
+ dev_err(rvu->dev,
+ "Mismatch in 'fwdata' struct btw kernel and firmware\n");
+ iounmap(rvu->fwdata);
+ rvu->fwdata = NULL;
+ return -EINVAL;
+ }
+ return 0;
+fail:
+ dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
+ return -EIO;
+}
+
+static void rvu_fwdata_exit(struct rvu *rvu)
+{
+ if (rvu->fwdata)
+ iounmap(rvu->fwdata);
+}
+
+static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* Init NIX LF's bitmap */
+ block = &hw->block[blkaddr];
+ if (!block->implemented)
+ return 0;
+ blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ block->lf.max = cfg & 0xFFF;
+ block->addr = blkaddr;
+ block->type = BLKTYPE_NIX;
+ block->lfshift = 8;
+ block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
+ block->lfcfg_reg = NIX_PRIV_LFX_CFG;
+ block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = NIX_AF_LF_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "NIX%d", blkid);
+ rvu->nix_blkaddr[blkid] = blkaddr;
+ return rvu_alloc_bitmap(&block->lf);
+}
+
+static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* Init CPT LF's bitmap */
+ block = &hw->block[blkaddr];
+ if (!block->implemented)
+ return 0;
+ blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
+ cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
+ block->lf.max = cfg & 0xFF;
+ block->addr = blkaddr;
+ block->type = BLKTYPE_CPT;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
+ block->lfcfg_reg = CPT_PRIV_LFX_CFG;
+ block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = CPT_AF_LF_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "CPT%d", blkid);
+ return rvu_alloc_bitmap(&block->lf);
+}
+
+static void rvu_get_lbk_bufsize(struct rvu *rvu)
+{
+ struct pci_dev *pdev = NULL;
+ void __iomem *base;
+ u64 lbk_const;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_LBK, pdev);
+ if (!pdev)
+ return;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ lbk_const = readq(base + LBK_CONST);
+
+ /* cache fifo size */
+ rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
+
+ iounmap(base);
+err_put:
+ pci_dev_put(pdev);
+}
+
+static int rvu_setup_hw_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid, err;
+ u64 cfg;
+
+ /* Get HW supported max RVU PF & VF count */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ hw->total_pfs = (cfg >> 32) & 0xFF;
+ hw->total_vfs = (cfg >> 20) & 0xFFF;
+ hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
+
+ /* Init NPA LF's bitmap */
+ block = &hw->block[BLKADDR_NPA];
+ if (!block->implemented)
+ goto nix;
+ cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
+ block->lf.max = (cfg >> 16) & 0xFFF;
+ block->addr = BLKADDR_NPA;
+ block->type = BLKTYPE_NPA;
+ block->lfshift = 8;
+ block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
+ block->lfcfg_reg = NPA_PRIV_LFX_CFG;
+ block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = NPA_AF_LF_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "NPA");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate NPA LF bitmap\n", __func__);
+ return err;
+ }
+
+nix:
+ err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate NIX0 LFs bitmap\n", __func__);
+ return err;
+ }
+
+ err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate NIX1 LFs bitmap\n", __func__);
+ return err;
+ }
+
+ /* Init SSO group's bitmap */
+ block = &hw->block[BLKADDR_SSO];
+ if (!block->implemented)
+ goto ssow;
+ cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
+ block->lf.max = cfg & 0xFFFF;
+ block->addr = BLKADDR_SSO;
+ block->type = BLKTYPE_SSO;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
+ block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
+ block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
+ block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "SSO GROUP");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate SSO LF bitmap\n", __func__);
+ return err;
+ }
+
+ssow:
+ /* Init SSO workslot's bitmap */
+ block = &hw->block[BLKADDR_SSOW];
+ if (!block->implemented)
+ goto tim;
+ block->lf.max = (cfg >> 56) & 0xFF;
+ block->addr = BLKADDR_SSOW;
+ block->type = BLKTYPE_SSOW;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
+ block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
+ block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
+ block->lfreset_reg = SSOW_AF_LF_HWS_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "SSOWS");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate SSOW LF bitmap\n", __func__);
+ return err;
+ }
+
+tim:
+ /* Init TIM LF's bitmap */
+ block = &hw->block[BLKADDR_TIM];
+ if (!block->implemented)
+ goto cpt;
+ cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
+ block->lf.max = cfg & 0xFFFF;
+ block->addr = BLKADDR_TIM;
+ block->type = BLKTYPE_TIM;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
+ block->lfcfg_reg = TIM_PRIV_LFX_CFG;
+ block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = TIM_AF_LF_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "TIM");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate TIM LF bitmap\n", __func__);
+ return err;
+ }
+
+cpt:
+ err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate CPT0 LF bitmap\n", __func__);
+ return err;
+ }
+ err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate CPT1 LF bitmap\n", __func__);
+ return err;
+ }
+
+ /* Allocate memory for PFVF data */
+ rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
+ sizeof(struct rvu_pfvf), GFP_KERNEL);
+ if (!rvu->pf) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__);
+ return -ENOMEM;
+ }
+
+ rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
+ sizeof(struct rvu_pfvf), GFP_KERNEL);
+ if (!rvu->hwvf) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__);
+ return -ENOMEM;
+ }
+
+ mutex_init(&rvu->rsrc_lock);
+
+ rvu_fwdata_init(rvu);
+
+ err = rvu_setup_msix_resources(rvu);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to setup MSIX resources\n", __func__);
+ return err;
+ }
+
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ if (!block->lf.bmap)
+ continue;
+
+ /* Allocate memory for block LF/slot to pcifunc mapping info */
+ block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!block->fn_map) {
+ err = -ENOMEM;
+ goto msix_err;
+ }
+
+ /* Scan all blocks to check if low level firmware has
+ * already provisioned any of the resources to a PF/VF.
+ */
+ rvu_scan_block(rvu, block);
+ }
+
+ err = rvu_set_channels_base(rvu);
+ if (err)
+ goto msix_err;
+
+ err = rvu_npc_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__);
+ goto npc_err;
+ }
+
+ err = rvu_cgx_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__);
+ goto cgx_err;
+ }
+
+ err = rvu_npc_exact_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "failed to initialize exact match table\n");
+ return err;
+ }
+
+ /* Assign MACs for CGX mapped functions */
+ rvu_setup_pfvf_macaddress(rvu);
+
+ err = rvu_npa_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__);
+ goto npa_err;
+ }
+
+ rvu_get_lbk_bufsize(rvu);
+
+ err = rvu_nix_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__);
+ goto nix_err;
+ }
+
+ err = rvu_sdp_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__);
+ goto nix_err;
+ }
+
+ rvu_program_channels(rvu);
+
+ err = rvu_mcs_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__);
+ goto nix_err;
+ }
+
+ return 0;
+
+nix_err:
+ rvu_nix_freemem(rvu);
+npa_err:
+ rvu_npa_freemem(rvu);
+cgx_err:
+ rvu_cgx_exit(rvu);
+npc_err:
+ rvu_npc_freemem(rvu);
+ rvu_fwdata_exit(rvu);
+msix_err:
+ rvu_reset_msix(rvu);
+ return err;
+}
+
+/* NPA and NIX admin queue APIs */
+void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
+{
+ if (!aq)
+ return;
+
+ qmem_free(rvu->dev, aq->inst);
+ qmem_free(rvu->dev, aq->res);
+ devm_kfree(rvu->dev, aq);
+}
+
+int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
+ int qsize, int inst_size, int res_size)
+{
+ struct admin_queue *aq;
+ int err;
+
+ *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
+ if (!*ad_queue)
+ return -ENOMEM;
+ aq = *ad_queue;
+
+ /* Alloc memory for instructions i.e AQ */
+ err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
+ if (err) {
+ devm_kfree(rvu->dev, aq);
+ return err;
+ }
+
+ /* Alloc memory for results */
+ err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
+ if (err) {
+ rvu_aq_free(rvu, aq);
+ return err;
+ }
+
+ spin_lock_init(&aq->lock);
+ return 0;
+}
+
+int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
+ struct ready_msg_rsp *rsp)
+{
+ if (rvu->fwdata) {
+ rsp->rclk_freq = rvu->fwdata->rclk;
+ rsp->sclk_freq = rvu->fwdata->sclk;
+ }
+ return 0;
+}
+
+/* Get current count of a RVU block's LF/slots
+ * provisioned to a given RVU func.
+ */
+u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
+{
+ switch (blkaddr) {
+ case BLKADDR_NPA:
+ return pfvf->npalf ? 1 : 0;
+ case BLKADDR_NIX0:
+ case BLKADDR_NIX1:
+ return pfvf->nixlf ? 1 : 0;
+ case BLKADDR_SSO:
+ return pfvf->sso;
+ case BLKADDR_SSOW:
+ return pfvf->ssow;
+ case BLKADDR_TIM:
+ return pfvf->timlfs;
+ case BLKADDR_CPT0:
+ return pfvf->cptlfs;
+ case BLKADDR_CPT1:
+ return pfvf->cpt1_lfs;
+ }
+ return 0;
+}
+
+/* Return true if LFs of block type are attached to pcifunc */
+static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
+{
+ switch (blktype) {
+ case BLKTYPE_NPA:
+ return pfvf->npalf ? 1 : 0;
+ case BLKTYPE_NIX:
+ return pfvf->nixlf ? 1 : 0;
+ case BLKTYPE_SSO:
+ return !!pfvf->sso;
+ case BLKTYPE_SSOW:
+ return !!pfvf->ssow;
+ case BLKTYPE_TIM:
+ return !!pfvf->timlfs;
+ case BLKTYPE_CPT:
+ return pfvf->cptlfs || pfvf->cpt1_lfs;
+ }
+
+ return false;
+}
+
+bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
+{
+ struct rvu_pfvf *pfvf;
+
+ if (!is_pf_func_valid(rvu, pcifunc))
+ return false;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* Check if this PFFUNC has a LF of type blktype attached */
+ if (!is_blktype_attached(pfvf, blktype))
+ return false;
+
+ return true;
+}
+
+static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
+ int pcifunc, int slot)
+{
+ u64 val;
+
+ val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
+ rvu_write64(rvu, block->addr, block->lookup_reg, val);
+ /* Wait for the lookup to finish */
+ /* TODO: put some timeout here */
+ while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
+ ;
+
+ val = rvu_read64(rvu, block->addr, block->lookup_reg);
+
+ /* Check LF valid bit */
+ if (!(val & (1ULL << 12)))
+ return -1;
+
+ return (val & 0xFFF);
+}
+
+int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
+ u16 global_slot, u16 *slot_in_block)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int numlfs, total_lfs = 0, nr_blocks = 0;
+ int i, num_blkaddr[BLK_COUNT] = { 0 };
+ struct rvu_block *block;
+ int blkaddr;
+ u16 start_slot;
+
+ if (!is_blktype_attached(pfvf, blktype))
+ return -ENODEV;
+
+ /* Get all the block addresses from which LFs are attached to
+ * the given pcifunc in num_blkaddr[].
+ */
+ for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
+ block = &rvu->hw->block[blkaddr];
+ if (block->type != blktype)
+ continue;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ continue;
+
+ numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr);
+ if (numlfs) {
+ total_lfs += numlfs;
+ num_blkaddr[nr_blocks] = blkaddr;
+ nr_blocks++;
+ }
+ }
+
+ if (global_slot >= total_lfs)
+ return -ENODEV;
+
+ /* Based on the given global slot number retrieve the
+ * correct block address out of all attached block
+ * addresses and slot number in that block.
+ */
+ total_lfs = 0;
+ blkaddr = -ENODEV;
+ for (i = 0; i < nr_blocks; i++) {
+ numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
+ total_lfs += numlfs;
+ if (global_slot < total_lfs) {
+ blkaddr = num_blkaddr[i];
+ start_slot = total_lfs - numlfs;
+ *slot_in_block = global_slot - start_slot;
+ break;
+ }
+ }
+
+ return blkaddr;
+}
+
+static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int slot, lf, num_lfs;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
+ if (blkaddr < 0)
+ return;
+
+ if (blktype == BLKTYPE_NIX)
+ rvu_nix_reset_mac(pfvf, pcifunc);
+
+ block = &hw->block[blkaddr];
+
+ num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ if (!num_lfs)
+ return;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
+ if (lf < 0) /* This should never happen */
+ continue;
+
+ /* Disable the LF */
+ rvu_write64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift), 0x00ULL);
+
+ /* Update SW maintained mapping info as well */
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ pcifunc, lf, false);
+
+ /* Free the resource */
+ rvu_free_rsrc(&block->lf, lf);
+
+ /* Clear MSIX vector offset for this LF */
+ rvu_clear_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
+ u16 pcifunc)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ bool detach_all = true;
+ struct rvu_block *block;
+ int blkid;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ /* Check for partial resource detach */
+ if (detach && detach->partial)
+ detach_all = false;
+
+ /* Check for RVU block's LFs attached to this func,
+ * if so, detach them.
+ */
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ if (!block->lf.bmap)
+ continue;
+ if (!detach_all && detach) {
+ if (blkid == BLKADDR_NPA && !detach->npalf)
+ continue;
+ else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
+ continue;
+ else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
+ continue;
+ else if ((blkid == BLKADDR_SSO) && !detach->sso)
+ continue;
+ else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
+ continue;
+ else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
+ continue;
+ else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
+ continue;
+ else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
+ continue;
+ }
+ rvu_detach_block(rvu, pcifunc, block->type);
+ }
+
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_detach_resources(struct rvu *rvu,
+ struct rsrc_detach *detach,
+ struct msg_rsp *rsp)
+{
+ return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
+}
+
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr = BLKADDR_NIX0, vf;
+ struct rvu_pfvf *pf;
+
+ pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+
+ /* All CGX mapped PFs are set with assigned NIX block during init */
+ if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ blkaddr = pf->nix_blkaddr;
+ } else if (is_afvf(pcifunc)) {
+ vf = pcifunc - 1;
+ /* Assign NIX based on VF number. All even numbered VFs get
+ * NIX0 and odd numbered gets NIX1
+ */
+ blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
+ /* NIX1 is not present on all silicons */
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ blkaddr = BLKADDR_NIX0;
+ }
+
+ /* if SDP1 then the blkaddr is NIX1 */
+ if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
+ blkaddr = BLKADDR_NIX1;
+
+ switch (blkaddr) {
+ case BLKADDR_NIX1:
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+ pfvf->nix_rx_intf = NIX_INTFX_RX(1);
+ pfvf->nix_tx_intf = NIX_INTFX_TX(1);
+ break;
+ case BLKADDR_NIX0:
+ default:
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+ pfvf->nix_rx_intf = NIX_INTFX_RX(0);
+ pfvf->nix_tx_intf = NIX_INTFX_TX(0);
+ break;
+ }
+
+ return pfvf->nix_blkaddr;
+}
+
+static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
+ u16 pcifunc, struct rsrc_attach *attach)
+{
+ int blkaddr;
+
+ switch (blktype) {
+ case BLKTYPE_NIX:
+ blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
+ break;
+ case BLKTYPE_CPT:
+ if (attach->hdr.ver < RVU_MULTI_BLK_VER)
+ return rvu_get_blkaddr(rvu, blktype, 0);
+ blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
+ BLKADDR_CPT0;
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -ENODEV;
+ break;
+ default:
+ return rvu_get_blkaddr(rvu, blktype, 0);
+ }
+
+ if (is_block_implemented(rvu->hw, blkaddr))
+ return blkaddr;
+
+ return -ENODEV;
+}
+
+static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
+ int num_lfs, struct rsrc_attach *attach)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int slot, lf;
+ int blkaddr;
+ u64 cfg;
+
+ if (!num_lfs)
+ return;
+
+ blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (!block->lf.bmap)
+ return;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ /* Allocate the resource */
+ lf = rvu_alloc_rsrc(&block->lf);
+ if (lf < 0)
+ return;
+
+ cfg = (1ULL << 63) | (pcifunc << 8) | slot;
+ rvu_write64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift), cfg);
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ pcifunc, lf, true);
+
+ /* Set start MSIX vector for this LF within this PF/VF */
+ rvu_set_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static int rvu_check_rsrc_availability(struct rvu *rvu,
+ struct rsrc_attach *req, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int free_lfs, mappedlfs, blkaddr;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+
+ /* Only one NPA LF can be attached */
+ if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
+ block = &hw->block[BLKADDR_NPA];
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (!free_lfs)
+ goto fail;
+ } else if (req->npalf) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid req, already has NPA\n",
+ pcifunc);
+ return -EINVAL;
+ }
+
+ /* Only one NIX LF can be attached */
+ if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
+ blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
+ pcifunc, req);
+ if (blkaddr < 0)
+ return blkaddr;
+ block = &hw->block[blkaddr];
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (!free_lfs)
+ goto fail;
+ } else if (req->nixlf) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid req, already has NIX\n",
+ pcifunc);
+ return -EINVAL;
+ }
+
+ if (req->sso) {
+ block = &hw->block[BLKADDR_SSO];
+ /* Is request within limits ? */
+ if (req->sso > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid SSO req, %d > max %d\n",
+ pcifunc, req->sso, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ /* Check if additional resources are available */
+ if (req->sso > mappedlfs &&
+ ((req->sso - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->ssow) {
+ block = &hw->block[BLKADDR_SSOW];
+ if (req->ssow > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid SSOW req, %d > max %d\n",
+ pcifunc, req->sso, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->ssow > mappedlfs &&
+ ((req->ssow - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->timlfs) {
+ block = &hw->block[BLKADDR_TIM];
+ if (req->timlfs > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
+ pcifunc, req->timlfs, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->timlfs > mappedlfs &&
+ ((req->timlfs - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->cptlfs) {
+ blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
+ pcifunc, req);
+ if (blkaddr < 0)
+ return blkaddr;
+ block = &hw->block[blkaddr];
+ if (req->cptlfs > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
+ pcifunc, req->cptlfs, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->cptlfs > mappedlfs &&
+ ((req->cptlfs - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_info(rvu->dev, "Request for %s failed\n", block->name);
+ return -ENOSPC;
+}
+
+static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
+ struct rsrc_attach *attach)
+{
+ int blkaddr, num_lfs;
+
+ blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
+ attach->hdr.pcifunc, attach);
+ if (blkaddr < 0)
+ return false;
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
+ blkaddr);
+ /* Requester already has LFs from given block ? */
+ return !!num_lfs;
+}
+
+int rvu_mbox_handler_attach_resources(struct rvu *rvu,
+ struct rsrc_attach *attach,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = attach->hdr.pcifunc;
+ int err;
+
+ /* If first request, detach all existing attached resources */
+ if (!attach->modify)
+ rvu_detach_rsrcs(rvu, NULL, pcifunc);
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ /* Check if the request can be accommodated */
+ err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
+ if (err)
+ goto exit;
+
+ /* Now attach the requested resources */
+ if (attach->npalf)
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
+
+ if (attach->nixlf)
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
+
+ if (attach->sso) {
+ /* RVU func doesn't know which exact LF or slot is attached
+ * to it, it always sees as slot 0,1,2. So for a 'modify'
+ * request, simply detach all existing attached LFs/slots
+ * and attach a fresh.
+ */
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
+ attach->sso, attach);
+ }
+
+ if (attach->ssow) {
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
+ attach->ssow, attach);
+ }
+
+ if (attach->timlfs) {
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
+ attach->timlfs, attach);
+ }
+
+ if (attach->cptlfs) {
+ if (attach->modify &&
+ rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
+ attach->cptlfs, attach);
+ }
+
+exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return err;
+}
+
+static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int lf)
+{
+ u16 vec;
+
+ if (lf < 0)
+ return MSIX_VECTOR_INVALID;
+
+ for (vec = 0; vec < pfvf->msix.max; vec++) {
+ if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
+ return vec;
+ }
+ return MSIX_VECTOR_INVALID;
+}
+
+static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf)
+{
+ u16 nvecs, vec, offset;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift));
+ nvecs = (cfg >> 12) & 0xFF;
+
+ /* Check and alloc MSIX vectors, must be contiguous */
+ if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
+ return;
+
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+
+ /* Config MSIX offset in LF */
+ rvu_write64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
+
+ /* Update the bitmap as well */
+ for (vec = 0; vec < nvecs; vec++)
+ pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
+}
+
+static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf)
+{
+ u16 nvecs, vec, offset;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift));
+ nvecs = (cfg >> 12) & 0xFF;
+
+ /* Clear MSIX offset in LF */
+ rvu_write64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift), cfg & ~0x7FFULL);
+
+ offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
+
+ /* Update the mapping */
+ for (vec = 0; vec < nvecs; vec++)
+ pfvf->msix_lfmap[offset + vec] = 0;
+
+ /* Free the same in MSIX bitmap */
+ rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
+}
+
+int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
+ struct msix_offset_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int lf, slot, blkaddr;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->msix.bmap)
+ return 0;
+
+ /* Set MSIX offsets for each block's LFs attached to this PF/VF */
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
+ rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
+
+ /* Get BLKADDR from which LFs are attached to pcifunc */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0) {
+ rsp->nix_msixoff = MSIX_VECTOR_INVALID;
+ } else {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
+ }
+
+ rsp->sso = pfvf->sso;
+ for (slot = 0; slot < rsp->sso; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
+ rsp->sso_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
+ }
+
+ rsp->ssow = pfvf->ssow;
+ for (slot = 0; slot < rsp->ssow; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
+ rsp->ssow_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
+ }
+
+ rsp->timlfs = pfvf->timlfs;
+ for (slot = 0; slot < rsp->timlfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
+ rsp->timlf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
+ }
+
+ rsp->cptlfs = pfvf->cptlfs;
+ for (slot = 0; slot < rsp->cptlfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
+ rsp->cptlf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
+ }
+
+ rsp->cpt1_lfs = pfvf->cpt1_lfs;
+ for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
+ rsp->cpt1_lf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
+ struct free_rsrcs_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ block = &hw->block[BLKADDR_NPA];
+ rsp->npa = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_NIX0];
+ rsp->nix = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_NIX1];
+ rsp->nix1 = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_SSO];
+ rsp->sso = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_SSOW];
+ rsp->ssow = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_TIM];
+ rsp->tim = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_CPT0];
+ rsp->cpt = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_CPT1];
+ rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
+
+ if (rvu->hw->cap.nix_fixed_txschq_mapping) {
+ rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
+ /* NIX1 */
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ goto out;
+ rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
+ } else {
+ nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ rsp->schq[NIX_TXSCH_LVL_SMQ] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
+ rsp->schq[NIX_TXSCH_LVL_TL4] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
+ rsp->schq[NIX_TXSCH_LVL_TL3] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ rsp->schq[NIX_TXSCH_LVL_TL2] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ goto out;
+
+ nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
+ rvu_rsrc_free_count(&txsch->schq);
+ }
+
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
+out:
+ rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return 0;
+}
+
+int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ u16 vf, numvfs;
+ u64 cfg;
+
+ vf = pcifunc & RVU_PFVF_FUNC_MASK;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
+ numvfs = (cfg >> 12) & 0xFF;
+
+ if (vf && vf <= numvfs)
+ __rvu_flr_handler(rvu, pcifunc);
+ else
+ return RVU_INVALID_VF_ID;
+
+ return 0;
+}
+
+int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
+ struct get_hw_cap_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
+ rsp->nix_shaping = hw->cap.nix_shaping;
+ rsp->npc_hash_extract = hw->cap.npc_hash_extract;
+
+ return 0;
+}
+
+int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, nixlf;
+ u16 target;
+
+ /* Only PF can add VF permissions */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
+ return -EOPNOTSUPP;
+
+ target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
+ pfvf = rvu_get_pfvf(rvu, target);
+
+ if (req->flags & RESET_VF_PERM) {
+ pfvf->flags &= RVU_CLEAR_VF_PERM;
+ } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
+ (req->flags & VF_TRUSTED)) {
+ change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
+ /* disable multicast and promisc entries */
+ if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
+ if (blkaddr < 0)
+ return 0;
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
+ target, 0);
+ if (nixlf < 0)
+ return 0;
+ npc_enadis_default_mce_entry(rvu, target, nixlf,
+ NIXLF_ALLMULTI_ENTRY,
+ false);
+ npc_enadis_default_mce_entry(rvu, target, nixlf,
+ NIXLF_PROMISC_ENTRY,
+ false);
+ }
+ }
+
+ return 0;
+}
+
+static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *req)
+{
+ struct rvu *rvu = pci_get_drvdata(mbox->pdev);
+
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG)
+ goto bad_message;
+
+ switch (req->id) {
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+ case _id: { \
+ struct _rsp_type *rsp; \
+ int err; \
+ \
+ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
+ mbox, devid, \
+ sizeof(struct _rsp_type)); \
+ /* some handlers should complete even if reply */ \
+ /* could not be allocated */ \
+ if (!rsp && \
+ _id != MBOX_MSG_DETACH_RESOURCES && \
+ _id != MBOX_MSG_NIX_TXSCH_FREE && \
+ _id != MBOX_MSG_VF_FLR) \
+ return -ENOMEM; \
+ if (rsp) { \
+ rsp->hdr.id = _id; \
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
+ rsp->hdr.pcifunc = req->pcifunc; \
+ rsp->hdr.rc = 0; \
+ } \
+ \
+ err = rvu_mbox_handler_ ## _fn_name(rvu, \
+ (struct _req_type *)req, \
+ rsp); \
+ if (rsp && err) \
+ rsp->hdr.rc = err; \
+ \
+ trace_otx2_msg_process(mbox->pdev, _id, err); \
+ return rsp ? err : -ENOMEM; \
+ }
+MBOX_MESSAGES
+#undef M
+
+bad_message:
+ default:
+ otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
+ return -ENODEV;
+ }
+}
+
+static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
+{
+ struct rvu *rvu = mwork->rvu;
+ int offset, err, id, devid;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ struct mbox_wq_info *mw;
+ struct otx2_mbox *mbox;
+
+ switch (type) {
+ case TYPE_AFPF:
+ mw = &rvu->afpf_wq_info;
+ break;
+ case TYPE_AFVF:
+ mw = &rvu->afvf_wq_info;
+ break;
+ default:
+ return;
+ }
+
+ devid = mwork - mw->mbox_wrk;
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[devid];
+
+ /* Process received mbox messages */
+ req_hdr = mdev->mbase + mbox->rx_start;
+ if (mw->mbox_wrk[devid].num_msgs == 0)
+ return;
+
+ offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
+ msg = mdev->mbase + offset;
+
+ /* Set which PF/VF sent this message based on mbox IRQ */
+ switch (type) {
+ case TYPE_AFPF:
+ msg->pcifunc &=
+ ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
+ msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
+ break;
+ case TYPE_AFVF:
+ msg->pcifunc &=
+ ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
+ msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
+ break;
+ }
+
+ err = rvu_process_mbox_msg(mbox, devid, msg);
+ if (!err) {
+ offset = mbox->rx_start + msg->next_msgoff;
+ continue;
+ }
+
+ if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
+ dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
+ err, otx2_mbox_id2name(msg->id),
+ msg->id, rvu_get_pf(msg->pcifunc),
+ (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+ else
+ dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
+ err, otx2_mbox_id2name(msg->id),
+ msg->id, devid);
+ }
+ mw->mbox_wrk[devid].num_msgs = 0;
+
+ /* Send mbox responses to VF/PF */
+ otx2_mbox_msg_send(mbox, devid);
+}
+
+static inline void rvu_afpf_mbox_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_handler(mwork, TYPE_AFPF);
+}
+
+static inline void rvu_afvf_mbox_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_handler(mwork, TYPE_AFVF);
+}
+
+static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
+{
+ struct rvu *rvu = mwork->rvu;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct mbox_wq_info *mw;
+ struct otx2_mbox *mbox;
+ int offset, id, devid;
+
+ switch (type) {
+ case TYPE_AFPF:
+ mw = &rvu->afpf_wq_info;
+ break;
+ case TYPE_AFVF:
+ mw = &rvu->afvf_wq_info;
+ break;
+ default:
+ return;
+ }
+
+ devid = mwork - mw->mbox_wrk_up;
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[devid];
+
+ rsp_hdr = mdev->mbase + mbox->rx_start;
+ if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
+ dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
+ return;
+ }
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
+ msg = mdev->mbase + offset;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(rvu->dev,
+ "Mbox msg with unknown ID 0x%x\n", msg->id);
+ goto end;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(rvu->dev,
+ "Mbox msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_CGX_LINK_EVENT:
+ break;
+ default:
+ if (msg->rc)
+ dev_err(rvu->dev,
+ "Mbox msg response has err %d, ID 0x%x\n",
+ msg->rc, msg->id);
+ break;
+ }
+end:
+ offset = mbox->rx_start + msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+ mw->mbox_wrk_up[devid].up_num_msgs = 0;
+
+ otx2_mbox_reset(mbox, devid);
+}
+
+static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_up_handler(mwork, TYPE_AFPF);
+}
+
+static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_up_handler(mwork, TYPE_AFVF);
+}
+
+static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ int num, int type, unsigned long *pf_bmap)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int region;
+ u64 bar4;
+
+ /* For cn10k platform VF mailbox regions of a PF follows after the
+ * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
+ * RVU_PF_VF_BAR4_ADDR register.
+ */
+ if (type == TYPE_AFVF) {
+ for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
+ if (hw->cap.per_pf_mbox_regs) {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFX_BAR4_ADDR(0)) +
+ MBOX_SIZE;
+ bar4 += region * MBOX_SIZE;
+ } else {
+ bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
+ bar4 += region * MBOX_SIZE;
+ }
+ mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ if (!mbox_addr[region])
+ goto error;
+ }
+ return 0;
+ }
+
+ /* For cn10k platform AF <-> PF mailbox region of a PF is read from per
+ * PF registers. Whereas for Octeontx2 it is read from
+ * RVU_AF_PF_BAR4_ADDR register.
+ */
+ for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
+ if (hw->cap.per_pf_mbox_regs) {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFX_BAR4_ADDR(region));
+ } else {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PF_BAR4_ADDR);
+ bar4 += region * MBOX_SIZE;
+ }
+ mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ if (!mbox_addr[region])
+ goto error;
+ }
+ return 0;
+
+error:
+ while (region--)
+ iounmap((void __iomem *)mbox_addr[region]);
+ return -ENOMEM;
+}
+
+static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ int type, int num,
+ void (mbox_handler)(struct work_struct *),
+ void (mbox_up_handler)(struct work_struct *))
+{
+ int err = -EINVAL, i, dir, dir_up;
+ void __iomem *reg_base;
+ struct rvu_work *mwork;
+ unsigned long *pf_bmap;
+ void **mbox_regions;
+ const char *name;
+ u64 cfg;
+
+ pf_bmap = bitmap_zalloc(num, GFP_KERNEL);
+ if (!pf_bmap)
+ return -ENOMEM;
+
+ /* RVU VFs */
+ if (type == TYPE_AFVF)
+ bitmap_set(pf_bmap, 0, num);
+
+ if (type == TYPE_AFPF) {
+ /* Mark enabled PFs in bitmap */
+ for (i = 0; i < num; i++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(i));
+ if (cfg & BIT_ULL(20))
+ set_bit(i, pf_bmap);
+ }
+ }
+
+ mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+ if (!mbox_regions) {
+ err = -ENOMEM;
+ goto free_bitmap;
+ }
+
+ switch (type) {
+ case TYPE_AFPF:
+ name = "rvu_afpf_mailbox";
+ dir = MBOX_DIR_AFPF;
+ dir_up = MBOX_DIR_AFPF_UP;
+ reg_base = rvu->afreg_base;
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF, pf_bmap);
+ if (err)
+ goto free_regions;
+ break;
+ case TYPE_AFVF:
+ name = "rvu_afvf_mailbox";
+ dir = MBOX_DIR_PFVF;
+ dir_up = MBOX_DIR_PFVF_UP;
+ reg_base = rvu->pfreg_base;
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF, pf_bmap);
+ if (err)
+ goto free_regions;
+ break;
+ default:
+ goto free_regions;
+ }
+
+ mw->mbox_wq = alloc_workqueue(name,
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ num);
+ if (!mw->mbox_wq) {
+ err = -ENOMEM;
+ goto unmap_regions;
+ }
+
+ mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!mw->mbox_wrk) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!mw->mbox_wrk_up) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
+ reg_base, dir, num, pf_bmap);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
+ reg_base, dir_up, num, pf_bmap);
+ if (err)
+ goto exit;
+
+ for (i = 0; i < num; i++) {
+ if (!test_bit(i, pf_bmap))
+ continue;
+
+ mwork = &mw->mbox_wrk[i];
+ mwork->rvu = rvu;
+ INIT_WORK(&mwork->work, mbox_handler);
+
+ mwork = &mw->mbox_wrk_up[i];
+ mwork->rvu = rvu;
+ INIT_WORK(&mwork->work, mbox_up_handler);
+ }
+ goto free_regions;
+
+exit:
+ destroy_workqueue(mw->mbox_wq);
+unmap_regions:
+ while (num--)
+ iounmap((void __iomem *)mbox_regions[num]);
+free_regions:
+ kfree(mbox_regions);
+free_bitmap:
+ bitmap_free(pf_bmap);
+ return err;
+}
+
+static void rvu_mbox_destroy(struct mbox_wq_info *mw)
+{
+ struct otx2_mbox *mbox = &mw->mbox;
+ struct otx2_mbox_dev *mdev;
+ int devid;
+
+ if (mw->mbox_wq) {
+ destroy_workqueue(mw->mbox_wq);
+ mw->mbox_wq = NULL;
+ }
+
+ for (devid = 0; devid < mbox->ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ if (mdev->hwbase)
+ iounmap((void __iomem *)mdev->hwbase);
+ }
+
+ otx2_mbox_destroy(&mw->mbox);
+ otx2_mbox_destroy(&mw->mbox_up);
+}
+
+static void rvu_queue_work(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr)
+{
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ int i;
+
+ for (i = first; i < mdevs; i++) {
+ /* start from 0 */
+ if (!(intr & BIT_ULL(i - first)))
+ continue;
+
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[i];
+ hdr = mdev->mbase + mbox->rx_start;
+
+ /*The hdr->num_msgs is set to zero immediately in the interrupt
+ * handler to ensure that it holds a correct value next time
+ * when the interrupt handler is called.
+ * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
+ * pf>mbox.up_num_msgs holds the data for use in
+ * pfaf_mbox_up_handler.
+ */
+
+ if (hdr->num_msgs) {
+ mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
+ }
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[i];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs) {
+ mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
+ }
+ }
+}
+
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int vfs = rvu->vfs;
+ u64 intr;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
+ /* Clear interrupts */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
+
+ /* Sync with mbox memory region */
+ rmb();
+
+ rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
+
+ /* Handle VF interrupts */
+ if (vfs > 64) {
+ intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
+
+ rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
+ vfs -= 64;
+ }
+
+ intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
+
+ rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_enable_mbox_intr(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ /* Clear spurious irqs, if any */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
+
+ /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
+ INTR_MASK(hw->total_pfs) & ~1ULL);
+}
+
+static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
+{
+ struct rvu_block *block;
+ int slot, lf, num_lfs;
+ int err;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->addr);
+ if (!num_lfs)
+ return;
+ for (slot = 0; slot < num_lfs; slot++) {
+ lf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (lf < 0)
+ continue;
+
+ /* Cleanup LF and reset it */
+ if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
+ rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
+ else if (block->addr == BLKADDR_NPA)
+ rvu_npa_lf_teardown(rvu, pcifunc, lf);
+ else if ((block->addr == BLKADDR_CPT0) ||
+ (block->addr == BLKADDR_CPT1))
+ rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf,
+ slot);
+
+ err = rvu_lf_reset(rvu, block, lf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, lf);
+ }
+ }
+}
+
+static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
+{
+ if (rvu_npc_exact_has_match_table(rvu))
+ rvu_npc_exact_reset(rvu, pcifunc);
+
+ mutex_lock(&rvu->flr_lock);
+ /* Reset order should reflect inter-block dependencies:
+ * 1. Reset any packet/work sources (NIX, CPT, TIM)
+ * 2. Flush and reset SSO/SSOW
+ * 3. Cleanup pools (NPA)
+ */
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
+ rvu_reset_lmt_map_tbl(rvu, pcifunc);
+ rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ /* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM
+ * entries, check and free the MCAM entries explicitly to avoid leak.
+ * Since LF is detached use LF number as -1.
+ */
+ rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
+
+ if (rvu->mcs_blk_cnt)
+ rvu_mcs_flr_handler(rvu, pcifunc);
+
+ mutex_unlock(&rvu->flr_lock);
+}
+
+static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
+{
+ int reg = 0;
+
+ /* pcifunc = 0(PF0) | (vf + 1) */
+ __rvu_flr_handler(rvu, vf + 1);
+
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+
+ /* Signal FLR finish and enable IRQ */
+ rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+}
+
+static void rvu_flr_handler(struct work_struct *work)
+{
+ struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
+ struct rvu *rvu = flrwork->rvu;
+ u16 pcifunc, numvfs, vf;
+ u64 cfg;
+ int pf;
+
+ pf = flrwork - rvu->flr_wrk;
+ if (pf >= rvu->hw->total_pfs) {
+ rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
+ return;
+ }
+
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ numvfs = (cfg >> 12) & 0xFF;
+ pcifunc = pf << RVU_PFVF_PF_SHIFT;
+
+ for (vf = 0; vf < numvfs; vf++)
+ __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
+
+ __rvu_flr_handler(rvu, pcifunc);
+
+ /* Signal FLR finish */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
+
+ /* Enable interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf));
+}
+
+static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
+{
+ int dev, vf, reg = 0;
+ u64 intr;
+
+ if (start_vf >= 64)
+ reg = 1;
+
+ intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
+ if (!intr)
+ return;
+
+ for (vf = 0; vf < numvfs; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ /* Clear and disable the interrupt */
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
+
+ dev = vf + start_vf + rvu->hw->total_pfs;
+ queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
+ }
+}
+
+static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ u64 intr;
+ u8 pf;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
+ if (!intr)
+ goto afvf_flr;
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (intr & (1ULL << pf)) {
+ /* clear interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
+ BIT_ULL(pf));
+ /* Disable the interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
+ BIT_ULL(pf));
+ /* PF is already dead do only AF related operations */
+ queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
+ }
+ }
+
+afvf_flr:
+ rvu_afvf_queue_flr_work(rvu, 0, 64);
+ if (rvu->vfs > 64)
+ rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
+{
+ int vf;
+
+ /* Nothing to be done here other than clearing the
+ * TRPEND bit.
+ */
+ for (vf = 0; vf < 64; vf++) {
+ if (intr & (1ULL << vf)) {
+ /* clear the trpend due to ME(master enable) */
+ rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
+ /* clear interrupt */
+ rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
+ }
+ }
+}
+
+/* Handles ME interrupts from VFs of AF */
+static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int vfset;
+ u64 intr;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
+
+ for (vfset = 0; vfset <= 1; vfset++) {
+ intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
+ if (intr)
+ rvu_me_handle_vfset(rvu, vfset, intr);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Handles ME interrupts from PFs */
+static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ u64 intr;
+ u8 pf;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
+
+ /* Nothing to be done here other than clearing the
+ * TRPEND bit.
+ */
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (intr & (1ULL << pf)) {
+ /* clear the trpend due to ME(master enable) */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
+ BIT_ULL(pf));
+ /* clear interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
+ BIT_ULL(pf));
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_unregister_interrupts(struct rvu *rvu)
+{
+ int irq;
+
+ rvu_cpt_unregister_interrupts(rvu);
+
+ /* Disable the Mbox interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ /* Disable the PF FLR interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ /* Disable the PF ME interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ for (irq = 0; irq < rvu->num_vec; irq++) {
+ if (rvu->irq_allocated[irq]) {
+ free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
+ rvu->irq_allocated[irq] = false;
+ }
+ }
+
+ pci_free_irq_vectors(rvu->pdev);
+ rvu->num_vec = 0;
+}
+
+static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
+{
+ struct rvu_pfvf *pfvf = &rvu->pf[0];
+ int offset;
+
+ pfvf = &rvu->pf[0];
+ offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+
+ /* Make sure there are enough MSIX vectors configured so that
+ * VF interrupts can be handled. Offset equal to zero means
+ * that PF vectors are not configured and overlapping AF vectors.
+ */
+ return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
+ offset;
+}
+
+static int rvu_register_interrupts(struct rvu *rvu)
+{
+ int ret, offset, pf_vec_start;
+
+ rvu->num_vec = pci_msix_vec_count(rvu->pdev);
+
+ rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
+ NAME_SIZE, GFP_KERNEL);
+ if (!rvu->irq_name)
+ return -ENOMEM;
+
+ rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
+ sizeof(bool), GFP_KERNEL);
+ if (!rvu->irq_allocated)
+ return -ENOMEM;
+
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
+ rvu->num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(rvu->dev,
+ "RVUAF: Request for %d msix vectors failed, ret %d\n",
+ rvu->num_vec, ret);
+ return ret;
+ }
+
+ /* Register mailbox interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
+ rvu_mbox_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for mbox irq\n");
+ goto fail;
+ }
+
+ rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
+
+ /* Enable mailbox interrupts from all PFs */
+ rvu_enable_mbox_intr(rvu);
+
+ /* Register FLR interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
+ "RVUAF FLR");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
+ rvu_flr_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
+ rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for FLR\n");
+ goto fail;
+ }
+ rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
+
+ /* Enable FLR interrupt for all PFs*/
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ /* Register ME interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
+ "RVUAF ME");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
+ rvu_me_pf_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
+ rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for ME\n");
+ }
+ rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
+
+ /* Clear TRPEND bit for all PF */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
+ /* Enable ME interrupt for all PFs*/
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ if (!rvu_afvf_msix_vectors_num_ok(rvu))
+ return 0;
+
+ /* Get PF MSIX vectors offset. */
+ pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+
+ /* Register MBOX0 interrupt. */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_mbox_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox0\n");
+
+ rvu->irq_allocated[offset] = true;
+
+ /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
+ * simply increment current offset by 1.
+ */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_mbox_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox1\n");
+
+ rvu->irq_allocated[offset] = true;
+
+ /* Register FLR interrupt handler for AF's VFs */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_flr_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_flr_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ /* Register ME interrupt handler for AF's VFs */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_me_vf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_me_vf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ ret = rvu_cpt_register_interrupts(rvu);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ rvu_unregister_interrupts(rvu);
+ return ret;
+}
+
+static void rvu_flr_wq_destroy(struct rvu *rvu)
+{
+ if (rvu->flr_wq) {
+ destroy_workqueue(rvu->flr_wq);
+ rvu->flr_wq = NULL;
+ }
+}
+
+static int rvu_flr_init(struct rvu *rvu)
+{
+ int dev, num_devs;
+ u64 cfg;
+ int pf;
+
+ /* Enable FLR for all PFs*/
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
+ cfg | BIT_ULL(22));
+ }
+
+ rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ 1);
+ if (!rvu->flr_wq)
+ return -ENOMEM;
+
+ num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
+ rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!rvu->flr_wrk) {
+ destroy_workqueue(rvu->flr_wq);
+ return -ENOMEM;
+ }
+
+ for (dev = 0; dev < num_devs; dev++) {
+ rvu->flr_wrk[dev].rvu = rvu;
+ INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
+ }
+
+ mutex_init(&rvu->flr_lock);
+
+ return 0;
+}
+
+static void rvu_disable_afvf_intr(struct rvu *rvu)
+{
+ int vfs = rvu->vfs;
+
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
+ INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+}
+
+static void rvu_enable_afvf_intr(struct rvu *rvu)
+{
+ int vfs = rvu->vfs;
+
+ /* Clear any pending interrupts and enable AF VF interrupts for
+ * the first 64 VFs.
+ */
+ /* Mbox */
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* FLR */
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* Same for remaining VFs, if any. */
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ INTR_MASK(vfs - 64));
+
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+}
+
+int rvu_get_num_lbk_chans(void)
+{
+ struct pci_dev *pdev;
+ void __iomem *base;
+ int ret = -EIO;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
+ NULL);
+ if (!pdev)
+ goto err;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ /* Read number of available LBK channels from LBK(0)_CONST register. */
+ ret = (readq(base + 0x10) >> 32) & 0xffff;
+ iounmap(base);
+err_put:
+ pci_dev_put(pdev);
+err:
+ return ret;
+}
+
+static int rvu_enable_sriov(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+ int err, chans, vfs;
+
+ if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
+ dev_warn(&pdev->dev,
+ "Skipping SRIOV enablement since not enough IRQs are available\n");
+ return 0;
+ }
+
+ chans = rvu_get_num_lbk_chans();
+ if (chans < 0)
+ return chans;
+
+ vfs = pci_sriov_get_totalvfs(pdev);
+
+ /* Limit VFs in case we have more VFs than LBK channels available. */
+ if (vfs > chans)
+ vfs = chans;
+
+ if (!vfs)
+ return 0;
+
+ /* LBK channel number 63 is used for switching packets between
+ * CGX mapped VFs. Hence limit LBK pairs till 62 only.
+ */
+ if (vfs > 62)
+ vfs = 62;
+
+ /* Save VFs number for reference in VF interrupts handlers.
+ * Since interrupts might start arriving during SRIOV enablement
+ * ordinary API cannot be used to get number of enabled VFs.
+ */
+ rvu->vfs = vfs;
+
+ err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
+ rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
+ if (err)
+ return err;
+
+ rvu_enable_afvf_intr(rvu);
+ /* Make sure IRQs are enabled before SRIOV. */
+ mb();
+
+ err = pci_enable_sriov(pdev, vfs);
+ if (err) {
+ rvu_disable_afvf_intr(rvu);
+ rvu_mbox_destroy(&rvu->afvf_wq_info);
+ return err;
+ }
+
+ return 0;
+}
+
+static void rvu_disable_sriov(struct rvu *rvu)
+{
+ rvu_disable_afvf_intr(rvu);
+ rvu_mbox_destroy(&rvu->afvf_wq_info);
+ pci_disable_sriov(rvu->pdev);
+}
+
+static void rvu_update_module_params(struct rvu *rvu)
+{
+ const char *default_pfl_name = "default";
+
+ strscpy(rvu->mkex_pfl_name,
+ mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
+ strscpy(rvu->kpu_pfl_name,
+ kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
+}
+
+static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct rvu *rvu;
+ int err;
+
+ rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
+ if (!rvu)
+ return -ENOMEM;
+
+ rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
+ if (!rvu->hw) {
+ devm_kfree(dev, rvu);
+ return -ENOMEM;
+ }
+
+ pci_set_drvdata(pdev, rvu);
+ rvu->pdev = pdev;
+ rvu->dev = &pdev->dev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto err_freemem;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_release_regions;
+ }
+
+ pci_set_master(pdev);
+
+ rvu->ptp = ptp_get();
+ if (IS_ERR(rvu->ptp)) {
+ err = PTR_ERR(rvu->ptp);
+ if (err)
+ goto err_release_regions;
+ rvu->ptp = NULL;
+ }
+
+ /* Map Admin function CSRs */
+ rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
+ rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
+ if (!rvu->afreg_base || !rvu->pfreg_base) {
+ dev_err(dev, "Unable to map admin function CSRs, aborting\n");
+ err = -ENOMEM;
+ goto err_put_ptp;
+ }
+
+ /* Store module params in rvu structure */
+ rvu_update_module_params(rvu);
+
+ /* Check which blocks the HW supports */
+ rvu_check_block_implemented(rvu);
+
+ rvu_reset_all_blocks(rvu);
+
+ rvu_setup_hw_capabilities(rvu);
+
+ err = rvu_setup_hw_resources(rvu);
+ if (err)
+ goto err_put_ptp;
+
+ /* Init mailbox btw AF and PFs */
+ err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
+ rvu->hw->total_pfs, rvu_afpf_mbox_handler,
+ rvu_afpf_mbox_up_handler);
+ if (err) {
+ dev_err(dev, "%s: Failed to initialize mbox\n", __func__);
+ goto err_hwsetup;
+ }
+
+ err = rvu_flr_init(rvu);
+ if (err) {
+ dev_err(dev, "%s: Failed to initialize flr\n", __func__);
+ goto err_mbox;
+ }
+
+ err = rvu_register_interrupts(rvu);
+ if (err) {
+ dev_err(dev, "%s: Failed to register interrupts\n", __func__);
+ goto err_flr;
+ }
+
+ err = rvu_register_dl(rvu);
+ if (err) {
+ dev_err(dev, "%s: Failed to register devlink\n", __func__);
+ goto err_irq;
+ }
+
+ rvu_setup_rvum_blk_revid(rvu);
+
+ /* Enable AF's VFs (if any) */
+ err = rvu_enable_sriov(rvu);
+ if (err) {
+ dev_err(dev, "%s: Failed to enable sriov\n", __func__);
+ goto err_dl;
+ }
+
+ /* Initialize debugfs */
+ rvu_dbg_init(rvu);
+
+ mutex_init(&rvu->rswitch.switch_lock);
+
+ if (rvu->fwdata)
+ ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
+ rvu->fwdata->ptp_ext_tstamp);
+
+ return 0;
+err_dl:
+ rvu_unregister_dl(rvu);
+err_irq:
+ rvu_unregister_interrupts(rvu);
+err_flr:
+ rvu_flr_wq_destroy(rvu);
+err_mbox:
+ rvu_mbox_destroy(&rvu->afpf_wq_info);
+err_hwsetup:
+ rvu_cgx_exit(rvu);
+ rvu_fwdata_exit(rvu);
+ rvu_mcs_exit(rvu);
+ rvu_reset_all_blocks(rvu);
+ rvu_free_hw_resources(rvu);
+ rvu_clear_rvum_blk_revid(rvu);
+err_put_ptp:
+ ptp_put(rvu->ptp);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_freemem:
+ pci_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, rvu->hw);
+ devm_kfree(dev, rvu);
+ return err;
+}
+
+static void rvu_remove(struct pci_dev *pdev)
+{
+ struct rvu *rvu = pci_get_drvdata(pdev);
+
+ rvu_dbg_exit(rvu);
+ rvu_unregister_dl(rvu);
+ rvu_unregister_interrupts(rvu);
+ rvu_flr_wq_destroy(rvu);
+ rvu_cgx_exit(rvu);
+ rvu_fwdata_exit(rvu);
+ rvu_mcs_exit(rvu);
+ rvu_mbox_destroy(&rvu->afpf_wq_info);
+ rvu_disable_sriov(rvu);
+ rvu_reset_all_blocks(rvu);
+ rvu_free_hw_resources(rvu);
+ rvu_clear_rvum_blk_revid(rvu);
+ ptp_put(rvu->ptp);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ devm_kfree(&pdev->dev, rvu->hw);
+ devm_kfree(&pdev->dev, rvu);
+}
+
+static struct pci_driver rvu_driver = {
+ .name = DRV_NAME,
+ .id_table = rvu_id_table,
+ .probe = rvu_probe,
+ .remove = rvu_remove,
+};
+
+static int __init rvu_init_module(void)
+{
+ int err;
+
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ err = pci_register_driver(&cgx_driver);
+ if (err < 0)
+ return err;
+
+ err = pci_register_driver(&ptp_driver);
+ if (err < 0)
+ goto ptp_err;
+
+ err = pci_register_driver(&mcs_driver);
+ if (err < 0)
+ goto mcs_err;
+
+ err = pci_register_driver(&rvu_driver);
+ if (err < 0)
+ goto rvu_err;
+
+ return 0;
+rvu_err:
+ pci_unregister_driver(&mcs_driver);
+mcs_err:
+ pci_unregister_driver(&ptp_driver);
+ptp_err:
+ pci_unregister_driver(&cgx_driver);
+
+ return err;
+}
+
+static void __exit rvu_cleanup_module(void)
+{
+ pci_unregister_driver(&rvu_driver);
+ pci_unregister_driver(&mcs_driver);
+ pci_unregister_driver(&ptp_driver);
+ pci_unregister_driver(&cgx_driver);
+}
+
+module_init(rvu_init_module);
+module_exit(rvu_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
new file mode 100644
index 000000000..0b76dfa97
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -0,0 +1,912 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef RVU_H
+#define RVU_H
+
+#include <linux/pci.h>
+#include <net/devlink.h>
+
+#include "rvu_struct.h"
+#include "rvu_devlink.h"
+#include "common.h"
+#include "mbox.h"
+#include "npc.h"
+#include "rvu_reg.h"
+
+/* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
+#define PCI_DEVID_OCTEONTX2_LBK 0xA061
+
+/* Subsystem Device ID */
+#define PCI_SUBSYS_DEVID_98XX 0xB100
+#define PCI_SUBSYS_DEVID_96XX 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_A 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
+#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
+
+/* PCI BAR nos */
+#define PCI_AF_REG_BAR_NUM 0
+#define PCI_PF_REG_BAR_NUM 2
+#define PCI_MBOX_BAR_NUM 4
+
+#define NAME_SIZE 32
+#define MAX_NIX_BLKS 2
+#define MAX_CPT_BLKS 2
+
+/* PF_FUNC */
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+#ifdef CONFIG_DEBUG_FS
+struct dump_ctx {
+ int lf;
+ int id;
+ bool all;
+};
+
+struct cpt_ctx {
+ int blkaddr;
+ struct rvu *rvu;
+};
+
+struct rvu_debugfs {
+ struct dentry *root;
+ struct dentry *cgx_root;
+ struct dentry *cgx;
+ struct dentry *lmac;
+ struct dentry *npa;
+ struct dentry *nix;
+ struct dentry *npc;
+ struct dentry *cpt;
+ struct dentry *mcs_root;
+ struct dentry *mcs;
+ struct dentry *mcs_rx;
+ struct dentry *mcs_tx;
+ struct dump_ctx npa_aura_ctx;
+ struct dump_ctx npa_pool_ctx;
+ struct dump_ctx nix_cq_ctx;
+ struct dump_ctx nix_rq_ctx;
+ struct dump_ctx nix_sq_ctx;
+ struct cpt_ctx cpt_ctx[MAX_CPT_BLKS];
+ int npa_qsize_id;
+ int nix_qsize_id;
+};
+#endif
+
+struct rvu_work {
+ struct work_struct work;
+ struct rvu *rvu;
+ int num_msgs;
+ int up_num_msgs;
+};
+
+struct rsrc_bmap {
+ unsigned long *bmap; /* Pointer to resource bitmap */
+ u16 max; /* Max resource id or count */
+};
+
+struct rvu_block {
+ struct rsrc_bmap lf;
+ struct admin_queue *aq; /* NIX/NPA AQ */
+ u16 *fn_map; /* LF to pcifunc mapping */
+ bool multislot;
+ bool implemented;
+ u8 addr; /* RVU_BLOCK_ADDR_E */
+ u8 type; /* RVU_BLOCK_TYPE_E */
+ u8 lfshift;
+ u64 lookup_reg;
+ u64 pf_lfcnt_reg;
+ u64 vf_lfcnt_reg;
+ u64 lfcfg_reg;
+ u64 msixcfg_reg;
+ u64 lfreset_reg;
+ unsigned char name[NAME_SIZE];
+ struct rvu *rvu;
+};
+
+struct nix_mcast {
+ struct qmem *mce_ctx;
+ struct qmem *mcast_buf;
+ int replay_pkind;
+ int next_free_mce;
+ struct mutex mce_lock; /* Serialize MCE updates */
+};
+
+struct nix_mce_list {
+ struct hlist_head head;
+ int count;
+ int max;
+};
+
+/* layer metadata to uniquely identify a packet header field */
+struct npc_layer_mdata {
+ u8 lid;
+ u8 ltype;
+ u8 hdr;
+ u8 key;
+ u8 len;
+};
+
+/* Structure to represent a field present in the
+ * generated key. A key field may present anywhere and can
+ * be of any size in the generated key. Once this structure
+ * is populated for fields of interest then field's presence
+ * and location (if present) can be known.
+ */
+struct npc_key_field {
+ /* Masks where all set bits indicate position
+ * of a field in the key
+ */
+ u64 kw_mask[NPC_MAX_KWS_IN_KEY];
+ /* Number of words in the key a field spans. If a field is
+ * of 16 bytes and key offset is 4 then the field will use
+ * 4 bytes in KW0, 8 bytes in KW1 and 4 bytes in KW2 and
+ * nr_kws will be 3(KW0, KW1 and KW2).
+ */
+ int nr_kws;
+ /* used by packet header fields */
+ struct npc_layer_mdata layer_mdata;
+};
+
+struct npc_mcam {
+ struct rsrc_bmap counters;
+ struct mutex lock; /* MCAM entries and counters update lock */
+ unsigned long *bmap; /* bitmap, 0 => bmap_entries */
+ unsigned long *bmap_reverse; /* Reverse bitmap, bmap_entries => 0 */
+ u16 bmap_entries; /* Number of unreserved MCAM entries */
+ u16 bmap_fcnt; /* MCAM entries free count */
+ u16 *entry2pfvf_map;
+ u16 *entry2cntr_map;
+ u16 *cntr2pfvf_map;
+ u16 *cntr_refcnt;
+ u16 *entry2target_pffunc;
+ u8 keysize; /* MCAM keysize 112/224/448 bits */
+ u8 banks; /* Number of MCAM banks */
+ u8 banks_per_entry;/* Number of keywords in key */
+ u16 banksize; /* Number of MCAM entries in each bank */
+ u16 total_entries; /* Total number of MCAM entries */
+ u16 nixlf_offset; /* Offset of nixlf rsvd uncast entries */
+ u16 pf_offset; /* Offset of PF's rsvd bcast, promisc entries */
+ u16 lprio_count;
+ u16 lprio_start;
+ u16 hprio_count;
+ u16 hprio_end;
+ u16 rx_miss_act_cntr; /* Counter for RX MISS action */
+ /* fields present in the generated key */
+ struct npc_key_field tx_key_fields[NPC_KEY_FIELDS_MAX];
+ struct npc_key_field rx_key_fields[NPC_KEY_FIELDS_MAX];
+ u64 tx_features;
+ u64 rx_features;
+ struct list_head mcam_rules;
+};
+
+/* Structure for per RVU func info ie PF/VF */
+struct rvu_pfvf {
+ bool npalf; /* Only one NPALF per RVU_FUNC */
+ bool nixlf; /* Only one NIXLF per RVU_FUNC */
+ u16 sso;
+ u16 ssow;
+ u16 cptlfs;
+ u16 timlfs;
+ u16 cpt1_lfs;
+ u8 cgx_lmac;
+
+ /* Block LF's MSIX vector info */
+ struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */
+#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF))
+ u16 *msix_lfmap; /* Vector to block LF mapping */
+
+ /* NPA contexts */
+ struct qmem *aura_ctx;
+ struct qmem *pool_ctx;
+ struct qmem *npa_qints_ctx;
+ unsigned long *aura_bmap;
+ unsigned long *pool_bmap;
+
+ /* NIX contexts */
+ struct qmem *rq_ctx;
+ struct qmem *sq_ctx;
+ struct qmem *cq_ctx;
+ struct qmem *rss_ctx;
+ struct qmem *cq_ints_ctx;
+ struct qmem *nix_qints_ctx;
+ unsigned long *sq_bmap;
+ unsigned long *rq_bmap;
+ unsigned long *cq_bmap;
+
+ u16 rx_chan_base;
+ u16 tx_chan_base;
+ u8 rx_chan_cnt; /* total number of RX channels */
+ u8 tx_chan_cnt; /* total number of TX channels */
+ u16 maxlen;
+ u16 minlen;
+
+ bool hw_rx_tstamp_en; /* Is rx_tstamp enabled */
+ u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
+ u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
+
+ /* Broadcast/Multicast/Promisc pkt replication info */
+ u16 bcast_mce_idx;
+ u16 mcast_mce_idx;
+ u16 promisc_mce_idx;
+ struct nix_mce_list bcast_mce_list;
+ struct nix_mce_list mcast_mce_list;
+ struct nix_mce_list promisc_mce_list;
+ bool use_mce_list;
+
+ struct rvu_npc_mcam_rule *def_ucast_rule;
+
+ bool cgx_in_use; /* this PF/VF using CGX? */
+ int cgx_users; /* number of cgx users - used only by PFs */
+
+ int intf_mode;
+ u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
+ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
+ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
+ u8 lbkid; /* NIX0/1 lbk link ID */
+ u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/
+ u64 lmt_map_ent_w1; /* Preseving the word1 of lmtst map table entry*/
+ unsigned long flags;
+ struct sdp_node_info *sdp_info;
+};
+
+enum rvu_pfvf_flags {
+ NIXLF_INITIALIZED = 0,
+ PF_SET_VF_MAC,
+ PF_SET_VF_CFG,
+ PF_SET_VF_TRUSTED,
+};
+
+#define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC)
+
+struct nix_txsch {
+ struct rsrc_bmap schq;
+ u8 lvl;
+#define NIX_TXSCHQ_FREE BIT_ULL(1)
+#define NIX_TXSCHQ_CFG_DONE BIT_ULL(0)
+#define TXSCH_MAP_FUNC(__pfvf_map) ((__pfvf_map) & 0xFFFF)
+#define TXSCH_MAP_FLAGS(__pfvf_map) ((__pfvf_map) >> 16)
+#define TXSCH_MAP(__func, __flags) (((__func) & 0xFFFF) | ((__flags) << 16))
+#define TXSCH_SET_FLAG(__pfvf_map, flag) ((__pfvf_map) | ((flag) << 16))
+ u32 *pfvf_map;
+};
+
+struct nix_mark_format {
+ u8 total;
+ u8 in_use;
+ u32 *cfg;
+};
+
+struct npc_pkind {
+ struct rsrc_bmap rsrc;
+ u32 *pfchan_map;
+};
+
+struct nix_flowkey {
+#define NIX_FLOW_KEY_ALG_MAX 32
+ u32 flowkey[NIX_FLOW_KEY_ALG_MAX];
+ int in_use;
+};
+
+struct nix_lso {
+ u8 total;
+ u8 in_use;
+};
+
+struct nix_txvlan {
+#define NIX_TX_VTAG_DEF_MAX 0x400
+ struct rsrc_bmap rsrc;
+ u16 *entry2pfvf_map;
+ struct mutex rsrc_lock; /* Serialize resource alloc/free */
+};
+
+struct nix_ipolicer {
+ struct rsrc_bmap band_prof;
+ u16 *pfvf_map;
+ u16 *match_id;
+ u16 *ref_count;
+};
+
+struct nix_hw {
+ int blkaddr;
+ struct rvu *rvu;
+ struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
+ struct nix_mcast mcast;
+ struct nix_flowkey flowkey;
+ struct nix_mark_format mark_format;
+ struct nix_lso lso;
+ struct nix_txvlan txvlan;
+ struct nix_ipolicer *ipolicer;
+ u64 *tx_credits;
+ u8 cc_mcs_cnt;
+};
+
+/* RVU block's capabilities or functionality,
+ * which vary by silicon version/skew.
+ */
+struct hw_cap {
+ /* Transmit side supported functionality */
+ u8 nix_tx_aggr_lvl; /* Tx link's traffic aggregation level */
+ u16 nix_txsch_per_cgx_lmac; /* Max Q's transmitting to CGX LMAC */
+ u16 nix_txsch_per_lbk_lmac; /* Max Q's transmitting to LBK LMAC */
+ u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */
+ bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
+ bool nix_shaping; /* Is shaping and coloring supported */
+ bool nix_shaper_toggle_wait; /* Shaping toggle needs poll/wait */
+ bool nix_tx_link_bp; /* Can link backpressure TL queues ? */
+ bool nix_rx_multicast; /* Rx packet replication support */
+ bool nix_common_dwrr_mtu; /* Common DWRR MTU for quantum config */
+ bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
+ bool programmable_chans; /* Channels programmable ? */
+ bool ipolicer;
+ bool npc_hash_extract; /* Hash extract enabled ? */
+ bool npc_exact_match_enabled; /* Exact match supported ? */
+};
+
+struct rvu_hwinfo {
+ u8 total_pfs; /* MAX RVU PFs HW supports */
+ u16 total_vfs; /* Max RVU VFs HW supports */
+ u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */
+ u8 cgx;
+ u8 lmac_per_cgx;
+ u16 cgx_chan_base; /* CGX base channel number */
+ u16 lbk_chan_base; /* LBK base channel number */
+ u16 sdp_chan_base; /* SDP base channel number */
+ u16 cpt_chan_base; /* CPT base channel number */
+ u8 cgx_links;
+ u8 lbk_links;
+ u8 sdp_links;
+ u8 cpt_links; /* Number of CPT links */
+ u8 npc_kpus; /* No of parser units */
+ u8 npc_pkinds; /* No of port kinds */
+ u8 npc_intfs; /* No of interfaces */
+ u8 npc_kpu_entries; /* No of KPU entries */
+ u16 npc_counters; /* No of match stats counters */
+ u32 lbk_bufsize; /* FIFO size supported by LBK */
+ bool npc_ext_set; /* Extended register set */
+ u64 npc_stat_ena; /* Match stats enable bit */
+
+ struct hw_cap cap;
+ struct rvu_block block[BLK_COUNT]; /* Block info */
+ struct nix_hw *nix;
+ struct rvu *rvu;
+ struct npc_pkind pkind;
+ struct npc_mcam mcam;
+ struct npc_exact_table *table;
+};
+
+struct mbox_wq_info {
+ struct otx2_mbox mbox;
+ struct rvu_work *mbox_wrk;
+
+ struct otx2_mbox mbox_up;
+ struct rvu_work *mbox_wrk_up;
+
+ struct workqueue_struct *mbox_wq;
+};
+
+struct rvu_fwdata {
+#define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/
+#define RVU_FWDATA_VERSION 0x0001
+ u32 header_magic;
+ u32 version; /* version id */
+
+ /* MAC address */
+#define PF_MACNUM_MAX 32
+#define VF_MACNUM_MAX 256
+ u64 pf_macs[PF_MACNUM_MAX];
+ u64 vf_macs[VF_MACNUM_MAX];
+ u64 sclk;
+ u64 rclk;
+ u64 mcam_addr;
+ u64 mcam_sz;
+ u64 msixtr_base;
+ u32 ptp_ext_clk_rate;
+ u32 ptp_ext_tstamp;
+#define FWDATA_RESERVED_MEM 1022
+ u64 reserved[FWDATA_RESERVED_MEM];
+#define CGX_MAX 5
+#define CGX_LMACS_MAX 4
+ struct cgx_lmac_fwdata_s cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
+ /* Do not add new fields below this line */
+};
+
+struct ptp;
+
+/* KPU profile adapter structure gathering all KPU configuration data and abstracting out the
+ * source where it came from.
+ */
+struct npc_kpu_profile_adapter {
+ const char *name;
+ u64 version;
+ const struct npc_lt_def_cfg *lt_def;
+ const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
+ const struct npc_kpu_profile *kpu; /* array[kpus] */
+ struct npc_mcam_kex *mkex;
+ struct npc_mcam_kex_hash *mkex_hash;
+ bool custom;
+ size_t pkinds;
+ size_t kpus;
+};
+
+#define RVU_SWITCH_LBK_CHAN 63
+
+struct rvu_switch {
+ struct mutex switch_lock; /* Serialize flow installation */
+ u32 used_entries;
+ u16 *entry2pcifunc;
+ u16 mode;
+ u16 start_entry;
+};
+
+struct rvu {
+ void __iomem *afreg_base;
+ void __iomem *pfreg_base;
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct rvu_hwinfo *hw;
+ struct rvu_pfvf *pf;
+ struct rvu_pfvf *hwvf;
+ struct mutex rsrc_lock; /* Serialize resource alloc/free */
+ int vfs; /* Number of VFs attached to RVU */
+ int nix_blkaddr[MAX_NIX_BLKS];
+
+ /* Mbox */
+ struct mbox_wq_info afpf_wq_info;
+ struct mbox_wq_info afvf_wq_info;
+
+ /* PF FLR */
+ struct rvu_work *flr_wrk;
+ struct workqueue_struct *flr_wq;
+ struct mutex flr_lock; /* Serialize FLRs */
+
+ /* MSI-X */
+ u16 num_vec;
+ char *irq_name;
+ bool *irq_allocated;
+ dma_addr_t msix_base_iova;
+ u64 msixtr_base_phy; /* Register reset value */
+
+ /* CGX */
+#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
+ u16 cgx_mapped_vfs; /* maximum CGX mapped VFs */
+ u8 cgx_mapped_pfs;
+ u8 cgx_cnt_max; /* CGX port count max */
+ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
+ u64 *cgxlmac2pf_map; /* bitmap of mapped pfs for
+ * every cgx lmac port
+ */
+ unsigned long pf_notify_bmap; /* Flags for PF notification */
+ void **cgx_idmap; /* cgx id to cgx data map table */
+ struct work_struct cgx_evh_work;
+ struct workqueue_struct *cgx_evh_wq;
+ spinlock_t cgx_evq_lock; /* cgx event queue lock */
+ struct list_head cgx_evq_head; /* cgx event queue head */
+ struct mutex cgx_cfg_lock; /* serialize cgx configuration */
+
+ char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
+ char kpu_pfl_name[KPU_NAME_LEN]; /* Configured KPU profile name */
+
+ /* Firmware data */
+ struct rvu_fwdata *fwdata;
+ void *kpu_fwdata;
+ size_t kpu_fwdata_sz;
+ void __iomem *kpu_prfl_addr;
+
+ /* NPC KPU data */
+ struct npc_kpu_profile_adapter kpu;
+
+ struct ptp *ptp;
+
+ int mcs_blk_cnt;
+
+#ifdef CONFIG_DEBUG_FS
+ struct rvu_debugfs rvu_dbg;
+#endif
+ struct rvu_devlink *rvu_dl;
+
+ /* RVU switch implementation over NPC with DMAC rules */
+ struct rvu_switch rswitch;
+
+ struct work_struct mcs_intr_work;
+ struct workqueue_struct *mcs_intr_wq;
+ struct list_head mcs_intrq_head;
+ /* mcs interrupt queue lock */
+ spinlock_t mcs_intrq_lock;
+};
+
+static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
+{
+ writeq(val, rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset)
+{
+ return readq(rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val)
+{
+ writeq(val, rvu->pfreg_base + offset);
+}
+
+static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
+{
+ return readq(rvu->pfreg_base + offset);
+}
+
+/* Silicon revisions */
+static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+ /* 96XX A0/B0, 95XX A0/A1/B0 chips */
+ return ((pdev->revision == 0x00) || (pdev->revision == 0x01) ||
+ (pdev->revision == 0x10) || (pdev->revision == 0x11) ||
+ (pdev->revision == 0x14));
+}
+
+static inline bool is_rvu_96xx_A0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return (pdev->revision == 0x00);
+}
+
+static inline bool is_rvu_96xx_B0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return (pdev->revision == 0x00) || (pdev->revision == 0x01);
+}
+
+static inline bool is_rvu_95xx_A0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return (pdev->revision == 0x10) || (pdev->revision == 0x11);
+}
+
+/* REVID for PCIe devices.
+ * Bits 0..1: minor pass, bit 3..2: major pass
+ * bits 7..4: midr id
+ */
+#define PCI_REVISION_ID_96XX 0x00
+#define PCI_REVISION_ID_95XX 0x10
+#define PCI_REVISION_ID_95XXN 0x20
+#define PCI_REVISION_ID_98XX 0x30
+#define PCI_REVISION_ID_95XXMM 0x40
+#define PCI_REVISION_ID_95XXO 0xE0
+
+static inline bool is_rvu_otx2(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ u8 midr = pdev->revision & 0xF0;
+
+ return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
+ midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX ||
+ midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
+}
+
+static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu)
+{
+ u64 npc_const3;
+
+ npc_const3 = rvu_read64(rvu, BLKADDR_NPC, NPC_AF_CONST3);
+ if (!(npc_const3 & BIT_ULL(62)))
+ return false;
+
+ return true;
+}
+
+static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid,
+ u8 lmacid, u8 chan)
+{
+ u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST);
+ u16 cgx_chans = nix_const & 0xFFULL;
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return NIX_CHAN_CGX_LMAC_CHX(cgxid, lmacid, chan);
+
+ return rvu->hw->cgx_chan_base +
+ (cgxid * hw->lmac_per_cgx + lmacid) * cgx_chans + chan;
+}
+
+static inline u16 rvu_nix_chan_lbk(struct rvu *rvu, u8 lbkid,
+ u8 chan)
+{
+ u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST);
+ u16 lbk_chans = (nix_const >> 16) & 0xFFULL;
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return NIX_CHAN_LBK_CHX(lbkid, chan);
+
+ return rvu->hw->lbk_chan_base + lbkid * lbk_chans + chan;
+}
+
+static inline u16 rvu_nix_chan_sdp(struct rvu *rvu, u8 chan)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return NIX_CHAN_SDP_CHX(chan);
+
+ return hw->sdp_chan_base + chan;
+}
+
+static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan)
+{
+ return rvu->hw->cpt_chan_base + chan;
+}
+
+static inline bool is_rvu_supports_nix1(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_98XX)
+ return true;
+
+ return false;
+}
+
+/* Function Prototypes
+ * RVU
+ */
+static inline bool is_afvf(u16 pcifunc)
+{
+ return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
+}
+
+static inline bool is_vf(u16 pcifunc)
+{
+ return !!(pcifunc & RVU_PFVF_FUNC_MASK);
+}
+
+/* check if PF_FUNC is AF */
+static inline bool is_pffunc_af(u16 pcifunc)
+{
+ return !pcifunc;
+}
+
+static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
+{
+ return (rvu->fwdata->header_magic == RVU_FWDATA_HEADER_MAGIC) &&
+ (rvu->fwdata->version == RVU_FWDATA_VERSION);
+}
+
+int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
+void rvu_free_bitmap(struct rsrc_bmap *rsrc);
+int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
+void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
+bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
+int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
+int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
+bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
+u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
+int rvu_get_pf(u16 pcifunc);
+struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
+void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
+bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
+bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype);
+int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
+int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
+int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
+int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
+int rvu_get_num_lbk_chans(void);
+int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
+ u16 global_slot, u16 *slot_in_block);
+
+/* RVU HW reg validation */
+enum regmap_block {
+ TXSCHQ_HWREGMAP = 0,
+ MAX_HWREGMAP,
+};
+
+bool rvu_check_valid_reg(int regmap, int regblk, u64 reg);
+
+/* NPA/NIX AQ APIs */
+int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
+ int qsize, int inst_size, int res_size);
+void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
+
+/* SDP APIs */
+int rvu_sdp_init(struct rvu *rvu);
+bool is_sdp_pfvf(u16 pcifunc);
+bool is_sdp_pf(u16 pcifunc);
+bool is_sdp_vf(u16 pcifunc);
+
+/* CGX APIs */
+static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
+{
+ return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) &&
+ !is_sdp_pf(pf << RVU_PFVF_PF_SHIFT);
+}
+
+static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
+{
+ *cgx_id = (map >> 4) & 0xF;
+ *lmac_id = (map & 0xF);
+}
+
+static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc)
+{
+ return ((pcifunc & RVU_PFVF_FUNC_MASK) &&
+ is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)));
+}
+
+#define M(_name, _id, fn_name, req, rsp) \
+int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
+MBOX_MESSAGES
+#undef M
+
+int rvu_cgx_init(struct rvu *rvu);
+int rvu_cgx_exit(struct rvu *rvu);
+void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
+int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
+void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
+int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
+int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
+ int rxtxflag, u64 *stat);
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc);
+
+/* NPA APIs */
+int rvu_npa_init(struct rvu *rvu);
+void rvu_npa_freemem(struct rvu *rvu);
+void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf);
+int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp);
+
+/* NIX APIs */
+bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
+int rvu_nix_init(struct rvu *rvu);
+int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr, u32 cfg);
+void rvu_nix_freemem(struct rvu *rvu);
+int rvu_get_nixlf_count(struct rvu *rvu);
+void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
+int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
+int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
+ struct nix_mce_list *mce_list,
+ int mce_idx, int mcam_index, bool add);
+void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
+ struct nix_mce_list **mce_list, int *mce_idx);
+struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr);
+int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr);
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
+int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
+ struct nix_hw **nix_hw, int *blkaddr);
+int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
+ u16 rq_idx, u16 match_id);
+int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u16 pcifunc, u8 ctype, u32 qidx);
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc);
+u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu);
+u32 convert_bytes_to_dwrr_mtu(u32 bytes);
+
+/* NPC APIs */
+void rvu_npc_freemem(struct rvu *rvu);
+int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
+void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
+int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en);
+void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, u8 *mac_addr);
+void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, u8 chan_cnt);
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
+void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan);
+void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
+void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ u64 chan);
+void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
+
+void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, int type, bool enable);
+void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable);
+void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
+ int group, int alg_idx, int mcam_index);
+
+void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt);
+void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt);
+bool is_npc_intf_tx(u8 intf);
+bool is_npc_intf_rx(u8 intf);
+bool is_npc_interface_valid(struct rvu *rvu, u8 intf);
+int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena);
+int npc_flow_steering_init(struct rvu *rvu, int blkaddr);
+const char *npc_get_field_name(u8 hdr);
+int npc_get_bank(struct npc_mcam *mcam, int index);
+void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
+void npc_mcam_disable_flows(struct rvu *rvu, u16 target);
+void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable);
+void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src, struct mcam_entry *entry,
+ u8 *intf, u8 *ena);
+bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc);
+bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
+u32 rvu_cgx_get_fifolen(struct rvu *rvu);
+void *rvu_first_cgx_pdata(struct rvu *rvu);
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
+int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable);
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
+u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac);
+int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
+ int type);
+bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
+ int index);
+int rvu_npc_init(struct rvu *rvu);
+int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
+ u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
+ u64 bcast_mcast_val, u64 bcast_mcast_mask);
+void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx);
+bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf);
+
+/* CPT APIs */
+int rvu_cpt_register_interrupts(struct rvu *rvu);
+void rvu_cpt_unregister_interrupts(struct rvu *rvu);
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
+ int slot);
+int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
+
+#define NDC_AF_BANK_MASK GENMASK_ULL(7, 0)
+#define NDC_AF_BANK_LINE_MASK GENMASK_ULL(31, 16)
+
+/* CN10K RVU */
+int rvu_set_channels_base(struct rvu *rvu);
+void rvu_program_channels(struct rvu *rvu);
+
+/* CN10K RVU - LMT*/
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
+
+#ifdef CONFIG_DEBUG_FS
+void rvu_dbg_init(struct rvu *rvu);
+void rvu_dbg_exit(struct rvu *rvu);
+#else
+static inline void rvu_dbg_init(struct rvu *rvu) {}
+static inline void rvu_dbg_exit(struct rvu *rvu) {}
+#endif
+
+int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr);
+
+/* RVU Switch */
+void rvu_switch_enable(struct rvu *rvu);
+void rvu_switch_disable(struct rvu *rvu);
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+
+int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
+ u64 pkind, u8 var_len_off, u8 var_len_off_mask,
+ u8 shift_dir);
+int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
+
+/* CN10K MCS */
+int rvu_mcs_init(struct rvu *rvu);
+int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
+void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena);
+void rvu_mcs_exit(struct rvu *rvu);
+
+#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
new file mode 100644
index 000000000..bcb4385d0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -0,0 +1,1254 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu.h"
+#include "cgx.h"
+#include "lmac_common.h"
+#include "rvu_reg.h"
+#include "rvu_trace.h"
+#include "rvu_npc_hash.h"
+
+struct cgx_evq_entry {
+ struct list_head evq_node;
+ struct cgx_link_event link_event;
+};
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
+ return req; \
+}
+
+MBOX_UP_CGX_MESSAGES
+#undef M
+
+bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
+{
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return 0;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ return (cgx_features_get(cgxd) & feature);
+}
+
+#define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx)
+/* Returns bitmap of mapped PFs */
+static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
+{
+ return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
+}
+
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
+{
+ unsigned long pfmap;
+
+ pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
+
+ /* Assumes only one pf mapped to a cgx lmac port */
+ if (!pfmap)
+ return -ENODEV;
+ else
+ return find_first_bit(&pfmap,
+ rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
+}
+
+static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
+{
+ return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
+}
+
+void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
+{
+ if (cgx_id >= rvu->cgx_cnt_max)
+ return NULL;
+
+ return rvu->cgx_idmap[cgx_id];
+}
+
+/* Return first enabled CGX instance if none are enabled then return NULL */
+void *rvu_first_cgx_pdata(struct rvu *rvu)
+{
+ int first_enabled_cgx = 0;
+ void *cgxd = NULL;
+
+ for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
+ cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
+ if (cgxd)
+ break;
+ }
+
+ return cgxd;
+}
+
+/* Based on P2X connectivity find mapped NIX block for a PF */
+static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
+ int cgx_id, int lmac_id)
+{
+ struct rvu_pfvf *pfvf = &rvu->pf[pf];
+ u8 p2x;
+
+ p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
+ /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+ if (is_rvu_supports_nix1(rvu) && p2x == CMR_P2X_SEL_NIX1)
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+}
+
+static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ int cgx_cnt_max = rvu->cgx_cnt_max;
+ int pf = PF_CGXMAP_BASE;
+ unsigned long lmac_bmap;
+ int size, free_pkind;
+ int cgx, lmac, iter;
+ int numvfs, hwvfs;
+
+ if (!cgx_cnt_max)
+ return 0;
+
+ if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF)
+ return -EINVAL;
+
+ /* Alloc map table
+ * An additional entry is required since PF id starts from 1 and
+ * hence entry at offset 0 is invalid.
+ */
+ size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8);
+ rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
+ if (!rvu->pf2cgxlmac_map)
+ return -ENOMEM;
+
+ /* Initialize all entries with an invalid cgx and lmac id */
+ memset(rvu->pf2cgxlmac_map, 0xFF, size);
+
+ /* Reverse map table */
+ rvu->cgxlmac2pf_map =
+ devm_kzalloc(rvu->dev,
+ cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64),
+ GFP_KERNEL);
+ if (!rvu->cgxlmac2pf_map)
+ return -ENOMEM;
+
+ rvu->cgx_mapped_pfs = 0;
+ for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
+ if (!rvu_cgx_pdata(cgx, rvu))
+ continue;
+ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+ for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
+ iter);
+ rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
+ rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
+ free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
+ pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
+ rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
+ rvu->cgx_mapped_pfs++;
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
+ rvu->cgx_mapped_vfs += numvfs;
+ pf++;
+ }
+ }
+ return 0;
+}
+
+static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
+{
+ struct cgx_evq_entry *qentry;
+ unsigned long flags;
+ int err;
+
+ qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
+ if (!qentry)
+ return -ENOMEM;
+
+ /* Lock the event queue before we read the local link status */
+ spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+ err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ &qentry->link_event.link_uinfo);
+ qentry->link_event.cgx_id = cgx_id;
+ qentry->link_event.lmac_id = lmac_id;
+ if (err) {
+ kfree(qentry);
+ goto skip_add;
+ }
+ list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+skip_add:
+ spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+
+ /* start worker to process the events */
+ queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+ return 0;
+}
+
+/* This is called from interrupt context and is expected to be atomic */
+static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
+{
+ struct cgx_evq_entry *qentry;
+ struct rvu *rvu = data;
+
+ /* post event to the event queue */
+ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+ if (!qentry)
+ return -ENOMEM;
+ qentry->link_event = *event;
+ spin_lock(&rvu->cgx_evq_lock);
+ list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+ spin_unlock(&rvu->cgx_evq_lock);
+
+ /* start worker to process the events */
+ queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+ return 0;
+}
+
+static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
+{
+ struct cgx_link_user_info *linfo;
+ struct cgx_link_info_msg *msg;
+ unsigned long pfmap;
+ int err, pfid;
+
+ linfo = &event->link_uinfo;
+ pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
+
+ do {
+ pfid = find_first_bit(&pfmap,
+ rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
+ clear_bit(pfid, &pfmap);
+
+ /* check if notification is enabled */
+ if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
+ dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
+ event->cgx_id, event->lmac_id,
+ linfo->link_up ? "UP" : "DOWN");
+ continue;
+ }
+
+ /* Send mbox message to PF */
+ msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
+ if (!msg)
+ continue;
+ msg->link_info = *linfo;
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
+ err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
+ if (err)
+ dev_warn(rvu->dev, "notification to pf %d failed\n",
+ pfid);
+ } while (pfmap);
+}
+
+static void cgx_evhandler_task(struct work_struct *work)
+{
+ struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
+ struct cgx_evq_entry *qentry;
+ struct cgx_link_event *event;
+ unsigned long flags;
+
+ do {
+ /* Dequeue an event */
+ spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+ qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
+ struct cgx_evq_entry,
+ evq_node);
+ if (qentry)
+ list_del(&qentry->evq_node);
+ spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+ if (!qentry)
+ break; /* nothing more to process */
+
+ event = &qentry->link_event;
+
+ /* process event */
+ cgx_notify_pfs(event, rvu);
+ kfree(qentry);
+ } while (1);
+}
+
+static int cgx_lmac_event_handler_init(struct rvu *rvu)
+{
+ unsigned long lmac_bmap;
+ struct cgx_event_cb cb;
+ int cgx, lmac, err;
+ void *cgxd;
+
+ spin_lock_init(&rvu->cgx_evq_lock);
+ INIT_LIST_HEAD(&rvu->cgx_evq_head);
+ INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
+ rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
+ if (!rvu->cgx_evh_wq) {
+ dev_err(rvu->dev, "alloc workqueue failed");
+ return -ENOMEM;
+ }
+
+ cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
+ cb.data = rvu;
+
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ lmac_bmap = cgx_get_lmac_bmap(cgxd);
+ for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ err = cgx_lmac_evh_register(&cb, cgxd, lmac);
+ if (err)
+ dev_err(rvu->dev,
+ "%d:%d handler register failed\n",
+ cgx, lmac);
+ }
+ }
+
+ return 0;
+}
+
+static void rvu_cgx_wq_destroy(struct rvu *rvu)
+{
+ if (rvu->cgx_evh_wq) {
+ destroy_workqueue(rvu->cgx_evh_wq);
+ rvu->cgx_evh_wq = NULL;
+ }
+}
+
+int rvu_cgx_init(struct rvu *rvu)
+{
+ int cgx, err;
+ void *cgxd;
+
+ /* CGX port id starts from 0 and are not necessarily contiguous
+ * Hence we allocate resources based on the maximum port id value.
+ */
+ rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
+ if (!rvu->cgx_cnt_max) {
+ dev_info(rvu->dev, "No CGX devices found!\n");
+ return -ENODEV;
+ }
+
+ rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
+ sizeof(void *), GFP_KERNEL);
+ if (!rvu->cgx_idmap)
+ return -ENOMEM;
+
+ /* Initialize the cgxdata table */
+ for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
+ rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
+
+ /* Map CGX LMAC interfaces to RVU PFs */
+ err = rvu_map_cgx_lmac_pf(rvu);
+ if (err)
+ return err;
+
+ /* Register for CGX events */
+ err = cgx_lmac_event_handler_init(rvu);
+ if (err)
+ return err;
+
+ mutex_init(&rvu->cgx_cfg_lock);
+
+ /* Ensure event handler registration is completed, before
+ * we turn on the links
+ */
+ mb();
+
+ /* Do link up for all CGX ports */
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ err = cgx_lmac_linkup_start(cgxd);
+ if (err)
+ dev_err(rvu->dev,
+ "Link up process failed to start on cgx %d\n",
+ cgx);
+ }
+
+ return 0;
+}
+
+int rvu_cgx_exit(struct rvu *rvu)
+{
+ unsigned long lmac_bmap;
+ int cgx, lmac;
+ void *cgxd;
+
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ lmac_bmap = cgx_get_lmac_bmap(cgxd);
+ for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
+ cgx_lmac_evh_unregister(cgxd, lmac);
+ }
+
+ /* Ensure event handler unregister is completed */
+ mb();
+
+ rvu_cgx_wq_destroy(rvu);
+ return 0;
+}
+
+/* Most of the CGX configuration is restricted to the mapped PF only,
+ * VF's of mapped PF and other PFs are not allowed. This fn() checks
+ * whether a PFFUNC is permitted to do the config or not.
+ */
+inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
+{
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ return false;
+ return true;
+}
+
+void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
+{
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ mac_ops = get_mac_ops(cgxd);
+ /* Set / clear CTL_BCK to control pause frame forwarding to NIX */
+ if (enable)
+ mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
+ else
+ mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
+}
+
+int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
+{
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
+}
+
+int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
+}
+
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
+{
+ struct mac_ops *mac_ops;
+
+ mac_ops = get_mac_ops(cgxd);
+ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
+}
+
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ int i = 0, lmac_count = 0;
+ u8 max_dmac_filters;
+ u8 cgx_id, lmac_id;
+ void *cgx_dev;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return;
+
+ if (rvu_npc_exact_has_match_table(rvu)) {
+ rvu_npc_exact_reset(rvu, pcifunc);
+ return;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgx_dev = cgx_get_pdata(cgx_id);
+ lmac_count = cgx_get_lmac_cnt(cgx_dev);
+ max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
+
+ for (i = 0; i < max_dmac_filters; i++)
+ cgx_lmac_addr_del(cgx_id, lmac_id, i);
+
+ /* As cgx_lmac_addr_del does not clear entry for index 0
+ * so it needs to be done explicitly
+ */
+ cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
+int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
+ return 0;
+}
+
+static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
+ void *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
+ int stat = 0, err = 0;
+ u64 tx_stat, rx_stat;
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ /* Rx stats */
+ while (stat < mac_ops->rx_stats_cnt) {
+ err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
+ if (err)
+ return err;
+ if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
+ ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
+ else
+ ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
+ stat++;
+ }
+
+ /* Tx stats */
+ stat = 0;
+ while (stat < mac_ops->tx_stats_cnt) {
+ err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
+ if (err)
+ return err;
+ if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
+ ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
+ else
+ ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
+ stat++;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
+ struct cgx_stats_rsp *rsp)
+{
+ return rvu_lmac_get_stats(rvu, req, (void *)rsp);
+}
+
+int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
+ struct rpm_stats_rsp *rsp)
+{
+ return rvu_lmac_get_stats(rvu, req, (void *)rsp);
+}
+
+int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_fec_stats_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ return cgx_get_fec_stats(cgxd, lmac, rsp);
+}
+
+int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
+
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+ int rc = 0;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_add(rvu, req, rsp);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
+ if (rc >= 0) {
+ rsp->index = rc;
+ return 0;
+ }
+
+ return rc;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_del(rvu, req, rsp);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
+}
+
+int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_max_dmac_entries_get_rsp
+ *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* If msg is received from PFs(which are not mapped to CGX LMACs)
+ * or VF then no entries are allocated for DMAC filters at CGX level.
+ * So returning zero.
+ */
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
+ rsp->max_dmac_filters = 0;
+ return 0;
+ }
+
+ if (rvu_npc_exact_has_match_table(rvu)) {
+ rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu);
+ return 0;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+ int rc = 0, i;
+ u64 cfg;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ rsp->hdr.rc = rc;
+ cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
+ /* copy 48 bit mac address to req->mac_addr */
+ for (i = 0; i < ETH_ALEN; i++)
+ rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ /* Disable drop on non hit rule */
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_promisc_config(cgx_id, lmac_id, true);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ /* Disable drop on non hit rule */
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_promisc_config(cgx_id, lmac_id, false);
+ return 0;
+}
+
+static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
+ return 0;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, enable);
+ /* If PTP is enabled then inform NPC that packets to be
+ * parsed by this PF will have their data shifted by 8 bytes
+ * and if PTP is disabled then no shift is required
+ */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
+ return -EINVAL;
+ /* This flag is required to clean up CGX conf if app gets killed */
+ pfvf->hw_rx_tstamp_en = enable;
+
+ /* Inform MCS about 8B RX header */
+ rvu_mcs_ptp_cfg(rvu, cgx_id, lmac_id, enable);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
+ return -EPERM;
+
+ return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
+}
+
+int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
+}
+
+static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ if (en) {
+ set_bit(pf, &rvu->pf_notify_bmap);
+ /* Send the current link status to PF */
+ rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
+ } else {
+ clear_bit(pf, &rvu->pf_notify_bmap);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
+ struct cgx_link_info_msg *rsp)
+{
+ u8 cgx_id, lmac_id;
+ int pf, err;
+
+ pf = rvu_get_pf(req->hdr.pcifunc);
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ &rsp->link_info);
+ return err;
+}
+
+int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_features_info_msg *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return 0;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ rsp->lmac_features = cgx_features_get(cgxd);
+
+ return 0;
+}
+
+u32 rvu_cgx_get_fifolen(struct rvu *rvu)
+{
+ struct mac_ops *mac_ops;
+ u32 fifo_len;
+
+ mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+ fifo_len = mac_ops ? mac_ops->fifo_len : 0;
+
+ return fifo_len;
+}
+
+u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
+{
+ struct mac_ops *mac_ops;
+ void *cgxd;
+
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ return 0;
+
+ mac_ops = get_mac_ops(cgxd);
+ if (!mac_ops->lmac_fifo_len)
+ return 0;
+
+ return mac_ops->lmac_fifo_len(cgxd, lmac);
+}
+
+static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
+
+ return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, en);
+}
+
+int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
+ return 0;
+}
+
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_pfc = 0, tx_pfc = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
+ return 0;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
+ if (tx_pfc || rx_pfc) {
+ dev_warn(rvu->dev,
+ "Can not configure 802.3X flow control as PFC frames are enabled");
+ return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
+}
+
+int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
+ struct cgx_pause_frm_cfg *req,
+ struct cgx_pause_frm_cfg *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ int err = 0;
+ void *cgxd;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ if (req->set)
+ err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
+ else
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+
+ return err;
+}
+
+int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
+}
+
+/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
+ * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
+ */
+int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
+ int index, int rxtxflag, u64 *stat)
+{
+ struct rvu_block *block;
+ int blkaddr;
+ u16 pcifunc;
+ int pf, lf;
+
+ *stat = 0;
+
+ if (!cgxd || !rvu)
+ return -EINVAL;
+
+ pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
+ if (pf < 0)
+ return pf;
+
+ /* Assumes LF of a PF and all of its VF belongs to the same
+ * NIX block
+ */
+ pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return 0;
+ block = &rvu->hw->block[blkaddr];
+
+ for (lf = 0; lf < block->lf.max; lf++) {
+ /* Check if a lf is attached to this PF or one of its VFs */
+ if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
+ ~RVU_PFVF_FUNC_MASK)))
+ continue;
+ if (rxtxflag == NIX_STATS_RX)
+ *stat += rvu_read64(rvu, blkaddr,
+ NIX_AF_LFX_RX_STATX(lf, index));
+ else
+ *stat += rvu_read64(rvu, blkaddr,
+ NIX_AF_LFX_TX_STATX(lf, index));
+ }
+
+ return 0;
+}
+
+int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
+{
+ struct rvu_pfvf *parent_pf, *pfvf;
+ int cgx_users, err = 0;
+
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ return 0;
+
+ parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ mutex_lock(&rvu->cgx_cfg_lock);
+
+ if (start && pfvf->cgx_in_use)
+ goto exit; /* CGX is already started hence nothing to do */
+ if (!start && !pfvf->cgx_in_use)
+ goto exit; /* CGX is already stopped hence nothing to do */
+
+ if (start) {
+ cgx_users = parent_pf->cgx_users;
+ parent_pf->cgx_users++;
+ } else {
+ parent_pf->cgx_users--;
+ cgx_users = parent_pf->cgx_users;
+ }
+
+ /* Start CGX when first of all NIXLFs is started.
+ * Stop CGX when last of all NIXLFs is stopped.
+ */
+ if (!cgx_users) {
+ err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
+ start);
+ if (err) {
+ dev_err(rvu->dev, "Unable to %s CGX\n",
+ start ? "start" : "stop");
+ /* Revert the usage count in case of error */
+ parent_pf->cgx_users = start ? parent_pf->cgx_users - 1
+ : parent_pf->cgx_users + 1;
+ goto exit;
+ }
+ }
+ pfvf->cgx_in_use = start;
+exit:
+ mutex_unlock(&rvu->cgx_cfg_lock);
+ return err;
+}
+
+int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
+ struct fec_mode *req,
+ struct fec_mode *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -EPERM;
+
+ if (req->fec == OTX2_FEC_OFF)
+ req->fec = OTX2_FEC_NONE;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
+ struct cgx_fw_data *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!rvu->fwdata)
+ return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
+ sizeof(struct cgx_lmac_fwdata_s));
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
+ struct cgx_set_link_mode_req *req,
+ struct cgx_set_link_mode_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_reset(rvu, req, rsp);
+
+ return cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
+int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct cgx_mac_addr_update_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_update(rvu, req, rsp);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
+}
+
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_8023 = 0, tx_8023 = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
+ if (tx_8023 || rx_8023) {
+ dev_warn(rvu->dev,
+ "Can not configure PFC as 802.3X pause frames are enabled");
+ return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
+}
+
+int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
+ struct cgx_pfc_cfg *req,
+ struct cgx_pfc_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+ int err;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
+ req->rx_pause, req->pfc_en);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
new file mode 100644
index 000000000..f9faa5b23
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RPM CN10K driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include "rvu.h"
+#include "cgx.h"
+#include "rvu_reg.h"
+
+/* RVU LMTST */
+#define LMT_TBL_OP_READ 0
+#define LMT_TBL_OP_WRITE 1
+#define LMT_MAP_TABLE_SIZE (128 * 1024)
+#define LMT_MAPTBL_ENTRY_SIZE 16
+
+/* Function to perform operations (read/write) on lmtst map table */
+static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
+ int lmt_tbl_op)
+{
+ void __iomem *lmt_map_base;
+ u64 tbl_base;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ return -ENOMEM;
+ }
+
+ if (lmt_tbl_op == LMT_TBL_OP_READ) {
+ *val = readq(lmt_map_base + index);
+ } else {
+ writeq((*val), (lmt_map_base + index));
+ /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
+ * changes effective. Write 1 for flush and read is being used as a
+ * barrier and sets up a data dependency. Write to 0 after a write
+ * to 1 to complete the flush.
+ */
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
+ rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
+ }
+
+ iounmap(lmt_map_base);
+ return 0;
+}
+
+#define LMT_MAP_TBL_W1_OFF 8
+static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
+{
+ return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
+ (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
+}
+
+static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ u64 iova, u64 *lmt_addr)
+{
+ u64 pa, val, pf;
+ int err = 0;
+
+ if (!iova) {
+ dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
+ pf = rvu_get_pf(pcifunc) & 0x1F;
+ val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
+ ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
+
+ err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
+ if (err) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
+ goto exit;
+ }
+ val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
+ if (val & ~0x1ULL) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
+ err = -EIO;
+ goto exit;
+ }
+ /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
+ * PA[11:0] = IOVA[11:0]
+ */
+ pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
+ pa &= GENMASK_ULL(39, 0);
+ *lmt_addr = (pa << 12) | (iova & 0xFFF);
+exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return err;
+}
+
+static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err = 0;
+ u64 val;
+
+ /* Read the current lmt addr of pcifunc */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+
+ /* Storing the seondary's lmt base address as this needs to be
+ * reverted in FLR. Also making sure this default value doesn't
+ * get overwritten on multiple calls to this mailbox.
+ */
+ if (!pfvf->lmt_base_addr)
+ pfvf->lmt_base_addr = val;
+
+ /* Update the LMT table with new addr */
+ err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
+ struct lmtst_tbl_setup_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ u32 pri_tbl_idx, tbl_idx;
+ u64 lmt_addr;
+ int err = 0;
+ u64 val;
+
+ /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
+ * region, if so, convert that IOVA to physical address and
+ * populate LMT table with that address
+ */
+ if (req->use_local_lmt_region) {
+ err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
+ req->lmt_iova, &lmt_addr);
+ if (err < 0)
+ return err;
+
+ /* Update the lmt addr for this PFFUNC in the LMT table */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
+ if (err)
+ return err;
+ }
+
+ /* Reconfiguring lmtst map table in lmt region shared mode i.e. make
+ * multiple PF_FUNCs to share an LMTLINE region, so primary/base
+ * pcifunc (which is passed as an argument to mailbox) is the one
+ * whose lmt base address will be shared among other secondary
+ * pcifunc (will be the one who is calling this mailbox).
+ */
+ if (req->base_pcifunc) {
+ /* Calculating the LMT table index equivalent to primary
+ * pcifunc.
+ */
+ pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
+
+ /* Read the base lmt addr of the primary pcifunc */
+ err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
+ LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ pri_tbl_idx, err);
+ goto error;
+ }
+
+ /* Update the base lmt addr of secondary with primary's base
+ * lmt addr.
+ */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
+ if (err)
+ return err;
+ }
+
+ /* This mailbox can also be used to update word1 of APR_LMT_MAP_ENTRY_S
+ * like enabling scheduled LMTST, disable LMTLINE prefetch, disable
+ * early completion for ordered LMTST.
+ */
+ if (req->sch_ena || req->dis_sched_early_comp || req->dis_line_pref) {
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc);
+ err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
+ &val, LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ tbl_idx + LMT_MAP_TBL_W1_OFF, err);
+ goto error;
+ }
+
+ /* Storing lmt map table entry word1 default value as this needs
+ * to be reverted in FLR. Also making sure this default value
+ * doesn't get overwritten on multiple calls to this mailbox.
+ */
+ if (!pfvf->lmt_map_ent_w1)
+ pfvf->lmt_map_ent_w1 = val;
+
+ /* Disable early completion for Ordered LMTSTs. */
+ if (req->dis_sched_early_comp)
+ val |= (req->dis_sched_early_comp <<
+ APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT);
+ /* Enable scheduled LMTST */
+ if (req->sch_ena)
+ val |= (req->sch_ena << APR_LMT_MAP_ENT_SCH_ENA_SHIFT) |
+ req->ssow_pf_func;
+ /* Disables LMTLINE prefetch before receiving store data. */
+ if (req->dis_line_pref)
+ val |= (req->dis_line_pref <<
+ APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT);
+
+ err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
+ &val, LMT_TBL_OP_WRITE);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx + LMT_MAP_TBL_W1_OFF, err);
+ goto error;
+ }
+ }
+
+error:
+ return err;
+}
+
+/* Resetting the lmtst map table to original base addresses */
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ if (pfvf->lmt_base_addr || pfvf->lmt_map_ent_w1) {
+ /* This corresponds to lmt map table index */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ /* Reverting back original lmt base addr for respective
+ * pcifunc.
+ */
+ if (pfvf->lmt_base_addr) {
+ err = lmtst_map_table_ops(rvu, tbl_idx,
+ &pfvf->lmt_base_addr,
+ LMT_TBL_OP_WRITE);
+ if (err)
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ pfvf->lmt_base_addr = 0;
+ }
+ /* Reverting back to orginal word1 val of lmtst map table entry
+ * which underwent changes.
+ */
+ if (pfvf->lmt_map_ent_w1) {
+ err = lmtst_map_table_ops(rvu,
+ tbl_idx + LMT_MAP_TBL_W1_OFF,
+ &pfvf->lmt_map_ent_w1,
+ LMT_TBL_OP_WRITE);
+ if (err)
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx + LMT_MAP_TBL_W1_OFF, err);
+ pfvf->lmt_map_ent_w1 = 0;
+ }
+ }
+}
+
+int rvu_set_channels_base(struct rvu *rvu)
+{
+ u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans;
+ u16 sdp_chan_base, cgx_chan_base, cpt_chan_base;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 nix_const, nix_const1;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+
+ hw->cgx = (nix_const >> 12) & 0xFULL;
+ hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
+ hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
+ hw->lbk_links = (nix_const >> 24) & 0xFULL;
+ hw->cpt_links = (nix_const >> 44) & 0xFULL;
+ hw->sdp_links = 1;
+
+ hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0);
+ hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0);
+ hw->sdp_chan_base = NIX_CHAN_SDP_CH_START;
+
+ /* No Programmable channels */
+ if (!(nix_const & BIT_ULL(60)))
+ return 0;
+
+ hw->cap.programmable_chans = true;
+
+ /* If programmable channels are present then configure
+ * channels such that all channel numbers are contiguous
+ * leaving no holes. This way the new CPT channels can be
+ * accomodated. The order of channel numbers assigned is
+ * LBK, SDP, CGX and CPT. Also the base channel number
+ * of a block must be multiple of number of channels
+ * of the block.
+ */
+ nr_lbk_chans = (nix_const >> 16) & 0xFFULL;
+ nr_sdp_chans = nix_const1 & 0xFFFULL;
+ nr_cgx_chans = nix_const & 0xFFULL;
+ nr_cpt_chans = (nix_const >> 32) & 0xFFFULL;
+
+ sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans;
+ /* Round up base channel to multiple of number of channels */
+ hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans);
+
+ cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans;
+ hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans);
+
+ cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans;
+ hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans);
+
+ /* Out of 4096 channels start CPT from 2048 so
+ * that MSB for CPT channels is always set
+ */
+ if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) {
+ hw->cpt_chan_base = NIX_CHAN_CPT_CH_START;
+ } else {
+ dev_err(rvu->dev,
+ "CPT channels could not fit in the range 2048-4095\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define LBK_CONNECT_NIXX(a) (0x0 + (a))
+
+static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base,
+ u64 offset, int lbkid, u16 chans)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 cfg;
+
+ cfg = readq(base + offset);
+ cfg &= ~(LBK_LINK_CFG_RANGE_MASK |
+ LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK);
+ cfg |= FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans));
+ cfg |= FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid);
+ cfg |= FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base);
+
+ writeq(cfg, base + offset);
+}
+
+static void rvu_lbk_set_channels(struct rvu *rvu)
+{
+ struct pci_dev *pdev = NULL;
+ void __iomem *base;
+ u64 lbk_const;
+ u8 src, dst;
+ u16 chans;
+
+ /* To loopback packets between multiple NIX blocks
+ * mutliple LBK blocks are needed. With two NIX blocks,
+ * four LBK blocks are needed and each LBK block
+ * source and destination are as follows:
+ * LBK0 - source NIX0 and destination NIX1
+ * LBK1 - source NIX0 and destination NIX1
+ * LBK2 - source NIX1 and destination NIX0
+ * LBK3 - source NIX1 and destination NIX1
+ * As per the HRM channel numbers should be programmed as:
+ * P2X and X2P of LBK0 as same
+ * P2X and X2P of LBK3 as same
+ * P2X of LBK1 and X2P of LBK2 as same
+ * P2X of LBK2 and X2P of LBK1 as same
+ */
+ while (true) {
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_LBK, pdev);
+ if (!pdev)
+ return;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ lbk_const = readq(base + LBK_CONST);
+ chans = FIELD_GET(LBK_CONST_CHANS, lbk_const);
+ dst = FIELD_GET(LBK_CONST_DST, lbk_const);
+ src = FIELD_GET(LBK_CONST_SRC, lbk_const);
+
+ if (src == dst) {
+ if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 0, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 0, chans);
+ } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 1, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 1, chans);
+ }
+ } else {
+ if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 0, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 1, chans);
+ } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 1, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 0, chans);
+ }
+ }
+ iounmap(base);
+ }
+err_put:
+ pci_dev_put(pdev);
+}
+
+static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
+{
+ u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int link, nix_link = 0;
+ u16 start;
+ u64 cfg;
+
+ cgx_chans = nix_const & 0xFFULL;
+ lbk_chans = (nix_const >> 16) & 0xFFULL;
+ sdp_chans = nix_const1 & 0xFFFULL;
+ cpt_chans = (nix_const >> 32) & 0xFFFULL;
+
+ start = hw->cgx_chan_base;
+ for (link = 0; link < hw->cgx_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += cgx_chans;
+ }
+
+ start = hw->lbk_chan_base;
+ for (link = 0; link < hw->lbk_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += lbk_chans;
+ }
+
+ start = hw->sdp_chan_base;
+ for (link = 0; link < hw->sdp_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += sdp_chans;
+ }
+
+ start = hw->cpt_chan_base;
+ for (link = 0; link < hw->cpt_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += cpt_chans;
+ }
+}
+
+static void rvu_nix_set_channels(struct rvu *rvu)
+{
+ int blkaddr = 0;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ __rvu_nix_set_channels(rvu, blkaddr);
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+}
+
+static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base)
+{
+ u64 cfg;
+
+ cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG);
+ cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK);
+
+ /* There is no read-only constant register to read
+ * the number of channels for LMAC and it is always 16.
+ */
+ cfg |= FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16));
+ cfg |= FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base);
+ cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg);
+}
+
+static void rvu_rpm_set_channels(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 base = hw->cgx_chan_base;
+ int cgx, lmac;
+
+ for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
+ for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) {
+ __rvu_rpm_set_channels(cgx, lmac, base);
+ base += 16;
+ }
+ }
+}
+
+void rvu_program_channels(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return;
+
+ rvu_nix_set_channels(rvu);
+ rvu_lbk_set_channels(rvu);
+ rvu_rpm_set_channels(rvu);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
new file mode 100644
index 000000000..38bbae5d9
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -0,0 +1,1086 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "mbox.h"
+#include "rvu.h"
+
+/* CPT PF device id */
+#define PCI_DEVID_OTX2_CPT_PF 0xA0FD
+#define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
+
+/* Length of initial context fetch in 128 byte words */
+#define CPT_CTX_ILEN 2ULL
+
+#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
+({ \
+ u64 free_sts = 0, busy_sts = 0; \
+ typeof(rsp) _rsp = rsp; \
+ u32 e, i; \
+ \
+ for (e = (e_min), i = 0; e < (e_max); e++, i++) { \
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); \
+ if (reg & 0x1) \
+ busy_sts |= 1ULL << i; \
+ \
+ if (reg & 0x2) \
+ free_sts |= 1ULL << i; \
+ } \
+ (_rsp)->busy_sts_##etype = busy_sts; \
+ (_rsp)->free_sts_##etype = free_sts; \
+})
+
+static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg0, reg1, reg2;
+
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
+ if (!is_rvu_otx2(rvu)) {
+ reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2));
+ dev_err_ratelimited(rvu->dev,
+ "Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx",
+ reg0, reg1, reg2);
+ } else {
+ dev_err_ratelimited(rvu->dev,
+ "Received CPTAF FLT irq : 0x%llx, 0x%llx",
+ reg0, reg1);
+ }
+
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1);
+ if (!is_rvu_otx2(rvu))
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
+ dev_err_ratelimited(rvu->dev, "Received CPTAF RVU irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_ras_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
+ dev_err_ratelimited(rvu->dev, "Received CPTAF RAS irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static int rvu_cpt_do_register_interrupt(struct rvu_block *block, int irq_offs,
+ irq_handler_t handler,
+ const char *name)
+{
+ struct rvu *rvu = block->rvu;
+ int ret;
+
+ ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0,
+ name, block);
+ if (ret) {
+ dev_err(rvu->dev, "RVUAF: %s irq registration failed", name);
+ return ret;
+ }
+
+ WARN_ON(rvu->irq_allocated[irq_offs]);
+ rvu->irq_allocated[irq_offs] = true;
+ return 0;
+}
+
+static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
+{
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ int i;
+
+ /* Disable all CPT AF interrupts */
+ for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[off + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, off + i), block);
+ rvu->irq_allocated[off + i] = false;
+ }
+}
+
+static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int i, offs;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return;
+ }
+ block = &hw->block[blkaddr];
+ if (!is_rvu_otx2(rvu))
+ return cpt_10k_unregister_interrupts(block, offs);
+
+ /* Disable all CPT AF interrupts */
+ for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), block);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+void rvu_cpt_unregister_interrupts(struct rvu *rvu)
+{
+ cpt_unregister_interrupts(rvu, BLKADDR_CPT0);
+ cpt_unregister_interrupts(rvu, BLKADDR_CPT1);
+}
+
+static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
+{
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ int i, ret;
+
+ for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
+ sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
+ ret = rvu_cpt_do_register_interrupt(block, off + i,
+ rvu_cpt_af_flt_intr_handler,
+ &rvu->irq_name[(off + i) * NAME_SIZE]);
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int i, offs, ret = 0;
+ char irq_name[16];
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return 0;
+
+ block = &hw->block[blkaddr];
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ if (!is_rvu_otx2(rvu))
+ return cpt_10k_register_interrupts(block, offs);
+
+ for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
+ snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ ret = rvu_cpt_do_register_interrupt(block, offs + i,
+ rvu_cpt_af_flt_intr_handler,
+ irq_name);
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+int rvu_cpt_register_interrupts(struct rvu *rvu)
+{
+ int ret;
+
+ ret = cpt_register_interrupts(rvu, BLKADDR_CPT0);
+ if (ret)
+ return ret;
+
+ return cpt_register_interrupts(rvu, BLKADDR_CPT1);
+}
+
+static int get_cpt_pf_num(struct rvu *rvu)
+{
+ int i, domain_nr, cpt_pf_num = -1;
+ struct pci_dev *pdev;
+
+ domain_nr = pci_domain_nr(rvu->pdev->bus);
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ pdev = pci_get_domain_bus_and_slot(domain_nr, i + 1, 0);
+ if (!pdev)
+ continue;
+
+ if (pdev->device == PCI_DEVID_OTX2_CPT_PF ||
+ pdev->device == PCI_DEVID_OTX2_CPT10K_PF) {
+ cpt_pf_num = i;
+ put_device(&pdev->dev);
+ break;
+ }
+ put_device(&pdev->dev);
+ }
+ return cpt_pf_num;
+}
+
+static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
+{
+ int cpt_pf_num = get_cpt_pf_num(rvu);
+
+ if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ return false;
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return false;
+
+ return true;
+}
+
+static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
+{
+ int cpt_pf_num = get_cpt_pf_num(rvu);
+
+ if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ return false;
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return false;
+
+ return true;
+}
+
+static int validate_and_get_cpt_blkaddr(int req_blkaddr)
+{
+ int blkaddr;
+
+ blkaddr = req_blkaddr ? req_blkaddr : BLKADDR_CPT0;
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -EINVAL;
+
+ return blkaddr;
+}
+
+int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
+ struct cpt_lf_alloc_req_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr;
+ int num_lfs, slot;
+ u64 val;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (req->eng_grpmsk == 0x0)
+ return CPT_AF_ERR_GRP_INVALID;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->addr);
+ if (!num_lfs)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
+ if (req->nix_pf_func) {
+ /* If default, use 'this' CPTLF's PFFUNC */
+ if (req->nix_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->nix_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->nix_pf_func, BLKTYPE_NIX))
+ return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
+ }
+
+ /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
+ if (req->sso_pf_func) {
+ /* If default, use 'this' CPTLF's PFFUNC */
+ if (req->sso_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->sso_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->sso_pf_func, BLKTYPE_SSO))
+ return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
+ }
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Set CPT LF group and priority */
+ val = (u64)req->eng_grpmsk << 48 | 1;
+ if (!is_rvu_otx2(rvu))
+ val |= (CPT_CTX_ILEN << 17);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC. EXE_LDWB is set
+ * on reset.
+ */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(47, 32));
+ val |= ((u64)req->nix_pf_func << 48 |
+ (u64)req->sso_pf_func << 32);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+ }
+
+ return 0;
+}
+
+static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int num_lfs, cptlf, slot, err;
+ struct rvu_block *block;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->addr);
+ if (!num_lfs)
+ return 0;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Perform teardown */
+ rvu_cpt_lf_teardown(rvu, pcifunc, blkaddr, cptlf, slot);
+
+ /* Reset LF */
+ err = rvu_lf_reset(rvu, block, cptlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, cptlf);
+ }
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int ret;
+
+ ret = cpt_lf_free(rvu, req, BLKADDR_CPT0);
+ if (ret)
+ return ret;
+
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
+ ret = cpt_lf_free(rvu, req, BLKADDR_CPT1);
+
+ return ret;
+}
+
+static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+ struct cpt_inline_ipsec_cfg_msg *req)
+{
+ u16 sso_pf_func = req->sso_pf_func;
+ u8 nix_sel;
+ u64 val;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ if (req->enable && (val & BIT_ULL(16))) {
+ /* IPSec inline outbound path is already enabled for a given
+ * CPT LF, HRM states that inline inbound & outbound paths
+ * must not be enabled at the same time for a given CPT LF
+ */
+ return CPT_AF_ERR_INLINE_IPSEC_INB_ENA;
+ }
+ /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
+ if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO))
+ return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
+
+ nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0;
+ /* Enable CPT LF for IPsec inline inbound operations */
+ if (req->enable)
+ val |= BIT_ULL(9);
+ else
+ val &= ~BIT_ULL(9);
+
+ val |= (u64)nix_sel << 8;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ if (sso_pf_func) {
+ /* Set SSO_PF_FUNC */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val |= (u64)sso_pf_func << 32;
+ val |= (u64)req->nix_pf_func << 48;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+ }
+ if (req->sso_pf_func_ovrd)
+ /* Set SSO_PF_FUNC_OVRD for inline IPSec */
+ rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1);
+
+ /* Configure the X2P Link register with the cpt base channel number and
+ * range of channels it should propagate to X2P
+ */
+ if (!is_rvu_otx2(rvu)) {
+ val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
+ val |= (u64)rvu->hw->cpt_chan_base;
+
+ rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
+ rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
+ }
+
+ return 0;
+}
+
+static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+ struct cpt_inline_ipsec_cfg_msg *req)
+{
+ u16 nix_pf_func = req->nix_pf_func;
+ int nix_blkaddr;
+ u8 nix_sel;
+ u64 val;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ if (req->enable && (val & BIT_ULL(9))) {
+ /* IPSec inline inbound path is already enabled for a given
+ * CPT LF, HRM states that inline inbound & outbound paths
+ * must not be enabled at the same time for a given CPT LF
+ */
+ return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA;
+ }
+
+ /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
+ if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX))
+ return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
+
+ /* Enable CPT LF for IPsec inline outbound operations */
+ if (req->enable)
+ val |= BIT_ULL(16);
+ else
+ val &= ~BIT_ULL(16);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ if (nix_pf_func) {
+ /* Set NIX_PF_FUNC */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val |= (u64)nix_pf_func << 48;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+
+ nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, nix_pf_func);
+ nix_sel = (nix_blkaddr == BLKADDR_NIX0) ? 0 : 1;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ val |= (u64)nix_sel << 8;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
+ struct cpt_inline_ipsec_cfg_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr, ret;
+ u16 actual_slot;
+
+ blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
+ req->slot, &actual_slot);
+ if (blkaddr < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ block = &rvu->hw->block[blkaddr];
+
+ cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ switch (req->dir) {
+ case CPT_INLINE_INBOUND:
+ ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf, req);
+ break;
+
+ case CPT_INLINE_OUTBOUND:
+ ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf, req);
+ break;
+
+ default:
+ return CPT_AF_ERR_PARAM;
+ }
+
+ return ret;
+}
+
+static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
+{
+ u64 offset = req->reg_offset;
+ int blkaddr, num_lfs, lf;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return false;
+
+ /* Registers that can be accessed from PF/VF */
+ if ((offset & 0xFF000) == CPT_AF_LFX_CTL(0) ||
+ (offset & 0xFF000) == CPT_AF_LFX_CTL2(0)) {
+ if (offset & 7)
+ return false;
+
+ lf = (offset & 0xFFF) >> 3;
+ block = &rvu->hw->block[blkaddr];
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ if (lf >= num_lfs)
+ /* Slot is not valid for that PF/VF */
+ return false;
+
+ /* Translate local LF used by VFs to global CPT LF */
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr],
+ req->hdr.pcifunc, lf);
+ if (lf < 0)
+ return false;
+
+ return true;
+ } else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
+ /* Registers that can be accessed from PF */
+ switch (offset) {
+ case CPT_AF_DIAG:
+ case CPT_AF_CTL:
+ case CPT_AF_PF_FUNC:
+ case CPT_AF_BLK_RST:
+ case CPT_AF_CONSTANTS1:
+ case CPT_AF_CTX_FLUSH_TIMER:
+ return true;
+ }
+
+ switch (offset & 0xFF000) {
+ case CPT_AF_EXEX_STS(0):
+ case CPT_AF_EXEX_CTL(0):
+ case CPT_AF_EXEX_CTL2(0):
+ case CPT_AF_EXEX_UCODE_BASE(0):
+ if (offset & 7)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
+
+int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
+ struct cpt_rd_wr_reg_msg *req,
+ struct cpt_rd_wr_reg_msg *rsp)
+{
+ int blkaddr;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* This message is accepted only if sent from CPT PF/VF */
+ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
+ !is_cpt_vf(rvu, req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ rsp->reg_offset = req->reg_offset;
+ rsp->ret_val = req->ret_val;
+ rsp->is_write = req->is_write;
+
+ if (!is_valid_offset(rvu, req))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ if (req->is_write)
+ rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
+ else
+ rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
+
+ return 0;
+}
+
+static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
+{
+ if (is_rvu_otx2(rvu))
+ return;
+
+ rsp->ctx_mis_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_MIS_PC);
+ rsp->ctx_hit_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_HIT_PC);
+ rsp->ctx_aop_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_AOP_PC);
+ rsp->ctx_aop_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_AOP_LATENCY_PC);
+ rsp->ctx_ifetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_IFETCH_PC);
+ rsp->ctx_ifetch_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_IFETCH_LATENCY_PC);
+ rsp->ctx_ffetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
+ rsp->ctx_ffetch_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_FFETCH_LATENCY_PC);
+ rsp->ctx_wback_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
+ rsp->ctx_wback_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_FFETCH_LATENCY_PC);
+ rsp->ctx_psh_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
+ rsp->ctx_psh_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_FFETCH_LATENCY_PC);
+ rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR);
+ rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID);
+ rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER);
+
+ rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME);
+ rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
+ rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
+ rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
+ rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
+ rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
+ rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
+}
+
+static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
+{
+ u16 max_ses, max_ies, max_aes;
+ u32 e_min = 0, e_max = 0;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ /* Get AE status */
+ e_min = max_ses + max_ies;
+ e_max = max_ses + max_ies + max_aes;
+ cpt_get_eng_sts(e_min, e_max, rsp, ae);
+ /* Get SE status */
+ e_min = 0;
+ e_max = max_ses;
+ cpt_get_eng_sts(e_min, e_max, rsp, se);
+ /* Get IE status */
+ e_min = max_ses;
+ e_max = max_ses + max_ies;
+ cpt_get_eng_sts(e_min, e_max, rsp, ie);
+}
+
+int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req,
+ struct cpt_sts_rsp *rsp)
+{
+ int blkaddr;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* This message is accepted only if sent from CPT PF/VF */
+ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
+ !is_cpt_vf(rvu, req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ get_ctx_pc(rvu, rsp, blkaddr);
+
+ /* Get CPT engines status */
+ get_eng_sts(rvu, rsp, blkaddr);
+
+ /* Read CPT instruction PC registers */
+ rsp->inst_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
+ rsp->inst_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
+ rsp->rd_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
+ rsp->rd_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
+ rsp->rd_uc_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
+ rsp->active_cycles_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_ACTIVE_CYCLES_PC);
+ rsp->exe_err_info = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
+ rsp->cptclk_cnt = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
+ rsp->diag = rvu_read64(rvu, blkaddr, CPT_AF_DIAG);
+
+ return 0;
+}
+
+#define RXC_ZOMBIE_THRES GENMASK_ULL(59, 48)
+#define RXC_ZOMBIE_LIMIT GENMASK_ULL(43, 32)
+#define RXC_ACTIVE_THRES GENMASK_ULL(27, 16)
+#define RXC_ACTIVE_LIMIT GENMASK_ULL(11, 0)
+#define RXC_ACTIVE_COUNT GENMASK_ULL(60, 48)
+#define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48)
+
+static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req,
+ int blkaddr)
+{
+ u64 dfrg_reg;
+
+ dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres);
+ dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit);
+ dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres);
+ dfrg_reg |= FIELD_PREP(RXC_ACTIVE_LIMIT, req->active_limit);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG, req->step);
+ rvu_write64(rvu, blkaddr, CPT_AF_RXC_DFRG, dfrg_reg);
+}
+
+int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
+ struct cpt_rxc_time_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* This message is accepted only if sent from CPT PF/VF */
+ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
+ !is_cpt_vf(rvu, req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ cpt_rxc_time_cfg(rvu, req, blkaddr);
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
+}
+
+static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
+{
+ struct cpt_rxc_time_cfg_req req;
+ int timeout = 2000;
+ u64 reg;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ /* Set time limit to minimum values, so that rxc entries will be
+ * flushed out quickly.
+ */
+ req.step = 1;
+ req.zombie_thres = 1;
+ req.zombie_limit = 1;
+ req.active_thres = 1;
+ req.active_limit = 1;
+
+ cpt_rxc_time_cfg(rvu, &req, blkaddr);
+
+ do {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
+ udelay(1);
+ if (FIELD_GET(RXC_ACTIVE_COUNT, reg))
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for RXC active count hits hard loop counter\n");
+
+ timeout = 2000;
+ do {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
+ udelay(1);
+ if (FIELD_GET(RXC_ZOMBIE_COUNT, reg))
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
+}
+
+#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF)
+#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
+#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF)
+#define INPROG_GWB(reg) (((reg) >> 40) & 0xFF)
+
+static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
+{
+ int i = 0, hard_lp_ctr = 100000;
+ u64 inprog, grp_ptr;
+ u16 nq_ptr, dq_ptr;
+
+ /* Disable instructions enqueuing */
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0);
+
+ /* Disable executions in the LF's queue */
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+ inprog &= ~BIT_ULL(16);
+ rvu_write64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog);
+
+ /* Wait for CPT queue to become execution-quiescent */
+ do {
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+ if (INPROG_GRB_PARTIAL(inprog)) {
+ i = 0;
+ hard_lp_ctr--;
+ } else {
+ i++;
+ }
+
+ grp_ptr = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot,
+ CPT_LF_Q_GRP_PTR));
+ nq_ptr = (grp_ptr >> 32) & 0x7FFF;
+ dq_ptr = grp_ptr & 0x7FFF;
+
+ } while (hard_lp_ctr && (i < 10) && (nq_ptr != dq_ptr));
+
+ if (hard_lp_ctr == 0)
+ dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
+
+ i = 0;
+ hard_lp_ctr = 100000;
+ do {
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+
+ if ((INPROG_INFLIGHT(inprog) == 0) &&
+ (INPROG_GWB(inprog) < 40) &&
+ ((INPROG_GRB(inprog) == 0) ||
+ (INPROG_GRB((inprog)) == 40))) {
+ i++;
+ } else {
+ i = 0;
+ hard_lp_ctr--;
+ }
+ } while (hard_lp_ctr && (i < 10));
+
+ if (hard_lp_ctr == 0)
+ dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
+}
+
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
+{
+ u64 reg;
+
+ if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
+ cpt_rxc_teardown(rvu, blkaddr);
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+
+ cpt_lf_disable_iqueue(rvu, blkaddr, slot);
+
+ /* Set group drop to help clear out hardware */
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+ reg |= BIT_ULL(17);
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+
+ return 0;
+}
+
+#define CPT_RES_LEN 16
+#define CPT_SE_IE_EGRP 1ULL
+
+static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
+ int nix_blkaddr)
+{
+ int cpt_pf_num = get_cpt_pf_num(rvu);
+ struct cpt_inst_lmtst_req *req;
+ dma_addr_t res_daddr;
+ int timeout = 3000;
+ u8 cpt_idx;
+ u64 *inst;
+ u16 *res;
+ int rc;
+
+ res = kzalloc(CPT_RES_LEN, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res_daddr = dma_map_single(rvu->dev, res, CPT_RES_LEN,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(rvu->dev, res_daddr)) {
+ dev_err(rvu->dev, "DMA mapping failed for CPT result\n");
+ rc = -EFAULT;
+ goto res_free;
+ }
+ *res = 0xFFFF;
+
+ /* Send mbox message to CPT PF */
+ req = (struct cpt_inst_lmtst_req *)
+ otx2_mbox_alloc_msg_rsp(&rvu->afpf_wq_info.mbox_up,
+ cpt_pf_num, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req) {
+ rc = -ENOMEM;
+ goto res_daddr_unmap;
+ }
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.id = MBOX_MSG_CPT_INST_LMTST;
+
+ inst = req->inst;
+ /* Prepare CPT_INST_S */
+ inst[0] = 0;
+ inst[1] = res_daddr;
+ /* AF PF FUNC */
+ inst[2] = 0;
+ /* Set QORD */
+ inst[3] = 1;
+ inst[4] = 0;
+ inst[5] = 0;
+ inst[6] = 0;
+ /* Set EGRP */
+ inst[7] = CPT_SE_IE_EGRP << 61;
+
+ /* Subtract 1 from the NIX-CPT credit count to preserve
+ * credit counts.
+ */
+ cpt_idx = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
+ rvu_write64(rvu, nix_blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ BIT_ULL(22) - 1);
+
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
+ rc = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
+ if (rc)
+ dev_warn(rvu->dev, "notification to pf %d failed\n",
+ cpt_pf_num);
+ /* Wait for CPT instruction to be completed */
+ do {
+ mdelay(1);
+ if (*res == 0xFFFF)
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for result hits hard loop counter\n");
+
+res_daddr_unmap:
+ dma_unmap_single(rvu->dev, res_daddr, CPT_RES_LEN, DMA_BIDIRECTIONAL);
+res_free:
+ kfree(res);
+
+ return 0;
+}
+
+#define CTX_CAM_PF_FUNC GENMASK_ULL(61, 46)
+#define CTX_CAM_CPTR GENMASK_ULL(45, 0)
+
+int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
+{
+ int nix_blkaddr, blkaddr;
+ u16 max_ctx_entries, i;
+ int slot = 0, num_lfs;
+ u64 reg, cam_data;
+ int rc;
+
+ nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (nix_blkaddr < 0)
+ return -EINVAL;
+
+ if (is_rvu_otx2(rvu))
+ return 0;
+
+ blkaddr = (nix_blkaddr == BLKADDR_NIX1) ? BLKADDR_CPT1 : BLKADDR_CPT0;
+
+ /* Submit CPT_INST_S to track when all packets have been
+ * flushed through for the NIX PF FUNC in inline inbound case.
+ */
+ rc = cpt_inline_inb_lf_cmd_send(rvu, blkaddr, nix_blkaddr);
+ if (rc)
+ return rc;
+
+ /* Wait for rxc entries to be flushed out */
+ cpt_rxc_teardown(rvu, blkaddr);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
+ max_ctx_entries = (reg >> 48) & 0xFFF;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ blkaddr);
+ if (num_lfs == 0) {
+ dev_warn(rvu->dev, "CPT LF is not configured\n");
+ goto unlock;
+ }
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+
+ for (i = 0; i < max_ctx_entries; i++) {
+ cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
+
+ if ((FIELD_GET(CTX_CAM_PF_FUNC, cam_data) == pcifunc) &&
+ FIELD_GET(CTX_CAM_CPTR, cam_data)) {
+ reg = BIT_ULL(46) | FIELD_GET(CTX_CAM_CPTR, cam_data);
+ rvu_write64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTX_FLUSH),
+ reg);
+ }
+ }
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+
+unlock:
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
new file mode 100644
index 000000000..cc5d342e0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -0,0 +1,3423 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2019 Marvell.
+ *
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "cgx.h"
+#include "lmac_common.h"
+#include "npc.h"
+#include "rvu_npc_hash.h"
+#include "mcs.h"
+
+#define DEBUGFS_DIR_NAME "octeontx2"
+
+enum {
+ CGX_STAT0,
+ CGX_STAT1,
+ CGX_STAT2,
+ CGX_STAT3,
+ CGX_STAT4,
+ CGX_STAT5,
+ CGX_STAT6,
+ CGX_STAT7,
+ CGX_STAT8,
+ CGX_STAT9,
+ CGX_STAT10,
+ CGX_STAT11,
+ CGX_STAT12,
+ CGX_STAT13,
+ CGX_STAT14,
+ CGX_STAT15,
+ CGX_STAT16,
+ CGX_STAT17,
+ CGX_STAT18,
+};
+
+/* NIX TX stats */
+enum nix_stat_lf_tx {
+ TX_UCAST = 0x0,
+ TX_BCAST = 0x1,
+ TX_MCAST = 0x2,
+ TX_DROP = 0x3,
+ TX_OCTS = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+/* NIX RX stats */
+enum nix_stat_lf_rx {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_DROP = 0x4,
+ RX_DROP_OCTS = 0x5,
+ RX_FCS = 0x6,
+ RX_ERR = 0x7,
+ RX_DRP_BCAST = 0x8,
+ RX_DRP_MCAST = 0x9,
+ RX_DRP_L3BCAST = 0xa,
+ RX_DRP_L3MCAST = 0xb,
+ RX_STATS_ENUM_LAST,
+};
+
+static char *cgx_rx_stats_fields[] = {
+ [CGX_STAT0] = "Received packets",
+ [CGX_STAT1] = "Octets of received packets",
+ [CGX_STAT2] = "Received PAUSE packets",
+ [CGX_STAT3] = "Received PAUSE and control packets",
+ [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
+ [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
+ [CGX_STAT6] = "Packets dropped due to RX FIFO full",
+ [CGX_STAT7] = "Octets dropped due to RX FIFO full",
+ [CGX_STAT8] = "Error packets",
+ [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
+ [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
+ [CGX_STAT11] = "NCSI-bound packets dropped",
+ [CGX_STAT12] = "NCSI-bound octets dropped",
+};
+
+static char *cgx_tx_stats_fields[] = {
+ [CGX_STAT0] = "Packets dropped due to excessive collisions",
+ [CGX_STAT1] = "Packets dropped due to excessive deferral",
+ [CGX_STAT2] = "Multiple collisions before successful transmission",
+ [CGX_STAT3] = "Single collisions before successful transmission",
+ [CGX_STAT4] = "Total octets sent on the interface",
+ [CGX_STAT5] = "Total frames sent on the interface",
+ [CGX_STAT6] = "Packets sent with an octet count < 64",
+ [CGX_STAT7] = "Packets sent with an octet count == 64",
+ [CGX_STAT8] = "Packets sent with an octet count of 65-127",
+ [CGX_STAT9] = "Packets sent with an octet count of 128-255",
+ [CGX_STAT10] = "Packets sent with an octet count of 256-511",
+ [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
+ [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
+ [CGX_STAT13] = "Packets sent with an octet count of > 1518",
+ [CGX_STAT14] = "Packets sent to a broadcast DMAC",
+ [CGX_STAT15] = "Packets sent to the multicast DMAC",
+ [CGX_STAT16] = "Transmit underflow and were truncated",
+ [CGX_STAT17] = "Control/PAUSE packets sent",
+};
+
+static char *rpm_rx_stats_fields[] = {
+ "Octets of received packets",
+ "Octets of received packets with out error",
+ "Received packets with alignment errors",
+ "Control/PAUSE packets received",
+ "Packets received with Frame too long Errors",
+ "Packets received with a1nrange length Errors",
+ "Received packets",
+ "Packets received with FrameCheckSequenceErrors",
+ "Packets received with VLAN header",
+ "Error packets",
+ "Packets received with unicast DMAC",
+ "Packets received with multicast DMAC",
+ "Packets received with broadcast DMAC",
+ "Dropped packets",
+ "Total frames received on interface",
+ "Packets received with an octet count < 64",
+ "Packets received with an octet count == 64",
+ "Packets received with an octet count of 65-127",
+ "Packets received with an octet count of 128-255",
+ "Packets received with an octet count of 256-511",
+ "Packets received with an octet count of 512-1023",
+ "Packets received with an octet count of 1024-1518",
+ "Packets received with an octet count of > 1518",
+ "Oversized Packets",
+ "Jabber Packets",
+ "Fragmented Packets",
+ "CBFC(class based flow control) pause frames received for class 0",
+ "CBFC pause frames received for class 1",
+ "CBFC pause frames received for class 2",
+ "CBFC pause frames received for class 3",
+ "CBFC pause frames received for class 4",
+ "CBFC pause frames received for class 5",
+ "CBFC pause frames received for class 6",
+ "CBFC pause frames received for class 7",
+ "CBFC pause frames received for class 8",
+ "CBFC pause frames received for class 9",
+ "CBFC pause frames received for class 10",
+ "CBFC pause frames received for class 11",
+ "CBFC pause frames received for class 12",
+ "CBFC pause frames received for class 13",
+ "CBFC pause frames received for class 14",
+ "CBFC pause frames received for class 15",
+ "MAC control packets received",
+};
+
+static char *rpm_tx_stats_fields[] = {
+ "Total octets sent on the interface",
+ "Total octets transmitted OK",
+ "Control/Pause frames sent",
+ "Total frames transmitted OK",
+ "Total frames sent with VLAN header",
+ "Error Packets",
+ "Packets sent to unicast DMAC",
+ "Packets sent to the multicast DMAC",
+ "Packets sent to a broadcast DMAC",
+ "Packets sent with an octet count == 64",
+ "Packets sent with an octet count of 65-127",
+ "Packets sent with an octet count of 128-255",
+ "Packets sent with an octet count of 256-511",
+ "Packets sent with an octet count of 512-1023",
+ "Packets sent with an octet count of 1024-1518",
+ "Packets sent with an octet count of > 1518",
+ "CBFC(class based flow control) pause frames transmitted for class 0",
+ "CBFC pause frames transmitted for class 1",
+ "CBFC pause frames transmitted for class 2",
+ "CBFC pause frames transmitted for class 3",
+ "CBFC pause frames transmitted for class 4",
+ "CBFC pause frames transmitted for class 5",
+ "CBFC pause frames transmitted for class 6",
+ "CBFC pause frames transmitted for class 7",
+ "CBFC pause frames transmitted for class 8",
+ "CBFC pause frames transmitted for class 9",
+ "CBFC pause frames transmitted for class 10",
+ "CBFC pause frames transmitted for class 11",
+ "CBFC pause frames transmitted for class 12",
+ "CBFC pause frames transmitted for class 13",
+ "CBFC pause frames transmitted for class 14",
+ "CBFC pause frames transmitted for class 15",
+ "MAC control packets sent",
+ "Total frames sent on the interface"
+};
+
+enum cpt_eng_type {
+ CPT_AE_TYPE = 1,
+ CPT_SE_TYPE = 2,
+ CPT_IE_TYPE = 3,
+};
+
+#define rvu_dbg_NULL NULL
+#define rvu_dbg_open_NULL NULL
+
+#define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
+static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, rvu_dbg_##read_op, inode->i_private); \
+} \
+static const struct file_operations rvu_dbg_##name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = rvu_dbg_open_##name, \
+ .read = seq_read, \
+ .write = rvu_dbg_##write_op, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+#define RVU_DEBUG_FOPS(name, read_op, write_op) \
+static const struct file_operations rvu_dbg_##name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = simple_open, \
+ .read = rvu_dbg_##read_op, \
+ .write = rvu_dbg_##write_op \
+}
+
+static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+
+static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_port_stats stats;
+ int lmac;
+
+ seq_puts(filp, "\n port stats\n");
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
+ mcs_get_port_stats(mcs, &stats, lmac, dir);
+ seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
+ seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
+
+ if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
+ seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
+ stats.preempt_err_cnt);
+ if (dir == MCS_TX)
+ seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
+ stats.sectag_insert_err_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
+
+static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sa_stats stats;
+ struct rsrc_bmap *map;
+ int sa_id;
+
+ if (dir == MCS_TX) {
+ map = &mcs->tx.sa;
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
+ seq_puts(filp, "\n TX SA stats\n");
+ mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
+ seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
+ stats.pkt_encrypt_cnt);
+
+ seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
+ stats.pkt_protected_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+ }
+
+ /* RX stats */
+ map = &mcs->rx.sa;
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
+ seq_puts(filp, "\n RX SA stats\n");
+ mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
+ seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
+ seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
+ seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
+ seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
+ seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sc_stats stats;
+ struct rsrc_bmap *map;
+ int sc_id;
+
+ map = &mcs->tx.sc;
+ seq_puts(filp, "\n SC stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
+ mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
+ seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
+ seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
+ seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
+
+ if (mcs->hw->mcs_blks == 1) {
+ seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
+ stats.octet_encrypt_cnt);
+ seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
+ stats.octet_protected_cnt);
+ }
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sc_stats stats;
+ struct rsrc_bmap *map;
+ int sc_id;
+
+ map = &mcs->rx.sc;
+ seq_puts(filp, "\n SC stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
+ mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
+ seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
+ seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
+ seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
+ seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
+ seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
+ seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
+
+ if (mcs->hw->mcs_blks > 1) {
+ seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
+ seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
+ }
+ if (mcs->hw->mcs_blks == 1) {
+ seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
+ stats.octet_decrypt_cnt);
+ seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
+ stats.octet_validate_cnt);
+ }
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
+
+static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_flowid_stats stats;
+ struct rsrc_bmap *map;
+ int flow_id;
+
+ seq_puts(filp, "\n Flowid stats\n");
+
+ if (dir == MCS_RX)
+ map = &mcs->rx.flow_ids;
+ else
+ map = &mcs->tx.flow_ids;
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
+ mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
+ seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_secy_stats stats;
+ struct rsrc_bmap *map;
+ int secy_id;
+
+ map = &mcs->tx.secy;
+ seq_puts(filp, "\n MCS TX secy stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
+ mcs_get_tx_secy_stats(mcs, &stats, secy_id);
+ seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
+ seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
+ seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
+ stats.unctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
+ seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
+ stats.octet_encrypted_cnt);
+ seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
+ stats.octet_protected_cnt);
+ seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
+ stats.pkt_noactivesa_cnt);
+ seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
+ seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_secy_stats stats;
+ struct rsrc_bmap *map;
+ int secy_id;
+
+ map = &mcs->rx.secy;
+ seq_puts(filp, "\n MCS secy stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
+ mcs_get_rx_secy_stats(mcs, &stats, secy_id);
+ seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
+ seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
+ seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
+ stats.unctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
+ seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
+ stats.octet_decrypted_cnt);
+ seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
+ stats.octet_validated_cnt);
+ seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
+ stats.pkt_port_disabled_cnt);
+ seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
+ seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
+ stats.pkt_nosa_cnt);
+ seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
+ stats.pkt_nosaerror_cnt);
+ seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
+ stats.pkt_tagged_ctl_cnt);
+ seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
+ seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
+ if (mcs->hw->mcs_blks > 1)
+ seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
+ stats.pkt_notag_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
+
+static void rvu_dbg_mcs_init(struct rvu *rvu)
+{
+ struct mcs *mcs;
+ char dname[10];
+ int i;
+
+ if (!rvu->mcs_blk_cnt)
+ return;
+
+ rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
+
+ for (i = 0; i < rvu->mcs_blk_cnt; i++) {
+ mcs = mcs_get_pdata(i);
+
+ sprintf(dname, "mcs%d", i);
+ rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
+ rvu->rvu_dbg.mcs_root);
+
+ rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
+
+ debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_flowid_stats_fops);
+
+ debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_secy_stats_fops);
+
+ debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_sc_stats_fops);
+
+ debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_sa_stats_fops);
+
+ debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_port_stats_fops);
+
+ rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
+
+ debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_flowid_stats_fops);
+
+ debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_secy_stats_fops);
+
+ debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_sc_stats_fops);
+
+ debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_sa_stats_fops);
+
+ debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_port_stats_fops);
+ }
+}
+
+#define LMT_MAPTBL_ENTRY_SIZE 16
+/* Dump LMTST map table */
+static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct rvu *rvu = filp->private_data;
+ u64 lmt_addr, val, tbl_base;
+ int pf, vf, num_vfs, hw_vfs;
+ void __iomem *lmt_map_base;
+ int buf_size = 10240;
+ size_t off = 0;
+ int index = 0;
+ char *buf;
+ int ret;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ kfree(buf);
+ return false;
+ }
+
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\n\t\t\t\t\tLmtst Map Table Entries");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\n\t\t\t\t\t=======================");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "Lmtline Base (word 0)\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "Lmt Map Entry (word 1)");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
+ pf);
+
+ index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
+ off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
+ (tbl_base + index));
+ lmt_addr = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\t\t", lmt_addr);
+ index += 8;
+ val = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
+ val);
+ /* Reading num of VFs per PF */
+ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
+ for (vf = 0; vf < num_vfs; vf++) {
+ index = (pf * rvu->hw->total_vfs * 16) +
+ ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "PF%d:VF%d \t\t", pf, vf);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%llx\t\t", (tbl_base + index));
+ lmt_addr = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\t\t", lmt_addr);
+ index += 8;
+ val = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\n", val);
+ }
+ }
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+
+ ret = min(off, count);
+ if (copy_to_user(buffer, buf, ret))
+ ret = -EFAULT;
+ kfree(buf);
+
+ iounmap(lmt_map_base);
+ if (ret < 0)
+ return ret;
+
+ *ppos = ret;
+ return ret;
+}
+
+RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
+
+static void get_lf_str_list(struct rvu_block block, int pcifunc,
+ char *lfs)
+{
+ int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
+
+ for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
+ if (lf >= block.lf.max)
+ break;
+
+ if (block.fn_map[lf] != pcifunc)
+ continue;
+
+ if (lf == prev_lf + 1) {
+ prev_lf = lf;
+ seq = 1;
+ continue;
+ }
+
+ if (seq)
+ len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
+ else
+ len += (len ? sprintf(lfs + len, ",%d", lf) :
+ sprintf(lfs + len, "%d", lf));
+
+ prev_lf = lf;
+ seq = 0;
+ }
+
+ if (seq)
+ len += sprintf(lfs + len, "-%d", prev_lf);
+
+ lfs[len] = '\0';
+}
+
+static int get_max_column_width(struct rvu *rvu)
+{
+ int index, pf, vf, lf_str_size = 12, buf_size = 256;
+ struct rvu_block block;
+ u16 pcifunc;
+ char *buf;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
+ pcifunc = pf << 10 | vf;
+ if (!pcifunc)
+ continue;
+
+ for (index = 0; index < BLK_COUNT; index++) {
+ block = rvu->hw->block[index];
+ if (!strlen(block.name))
+ continue;
+
+ get_lf_str_list(block, pcifunc, buf);
+ if (lf_str_size <= strlen(buf))
+ lf_str_size = strlen(buf) + 1;
+ }
+ }
+ }
+
+ kfree(buf);
+ return lf_str_size;
+}
+
+/* Dumps current provisioning status of all RVU block LFs */
+static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int index, off = 0, flag = 0, len = 0, i = 0;
+ struct rvu *rvu = filp->private_data;
+ int bytes_not_copied = 0;
+ struct rvu_block block;
+ int pf, vf, pcifunc;
+ int buf_size = 2048;
+ int lf_str_size;
+ char *lfs;
+ char *buf;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Get the maximum width of a column */
+ lf_str_size = get_max_column_width(rvu);
+
+ lfs = kzalloc(lf_str_size, GFP_KERNEL);
+ if (!lfs) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+ off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
+ "pcifunc");
+ for (index = 0; index < BLK_COUNT; index++)
+ if (strlen(rvu->hw->block[index].name)) {
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "%-*s", lf_str_size,
+ rvu->hw->block[index].name);
+ }
+
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
+ if (bytes_not_copied)
+ goto out;
+
+ i++;
+ *ppos += off;
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
+ off = 0;
+ flag = 0;
+ pcifunc = pf << 10 | vf;
+ if (!pcifunc)
+ continue;
+
+ if (vf) {
+ sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
+ off = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "%-*s", lf_str_size, lfs);
+ } else {
+ sprintf(lfs, "PF%d", pf);
+ off = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "%-*s", lf_str_size, lfs);
+ }
+
+ for (index = 0; index < BLK_COUNT; index++) {
+ block = rvu->hw->block[index];
+ if (!strlen(block.name))
+ continue;
+ len = 0;
+ lfs[len] = '\0';
+ get_lf_str_list(block, pcifunc, lfs);
+ if (strlen(lfs))
+ flag = 1;
+
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "%-*s", lf_str_size, lfs);
+ }
+ if (flag) {
+ off += scnprintf(&buf[off],
+ buf_size - 1 - off, "\n");
+ bytes_not_copied = copy_to_user(buffer +
+ (i * off),
+ buf, off);
+ if (bytes_not_copied)
+ goto out;
+
+ i++;
+ *ppos += off;
+ }
+ }
+ }
+
+out:
+ kfree(lfs);
+ kfree(buf);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ return *ppos;
+}
+
+RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
+
+static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct pci_dev *pdev = NULL;
+ struct mac_ops *mac_ops;
+ char cgx[10], lmac[10];
+ struct rvu_pfvf *pfvf;
+ int pf, domain, blkid;
+ u8 cgx_id, lmac_id;
+ u16 pcifunc;
+
+ domain = 2;
+ mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+ /* There can be no CGX devices at all */
+ if (!mac_ops)
+ return 0;
+ seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
+ mac_ops->name);
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ continue;
+
+ cgx[0] = 0;
+ lmac[0] = 0;
+ pcifunc = pf << 10;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (pfvf->nix_blkaddr == BLKADDR_NIX0)
+ blkid = 0;
+ else
+ blkid = 1;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
+ &lmac_id);
+ sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
+ sprintf(lmac, "LMAC%d", lmac_id);
+ seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
+ dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
+
+ pci_dev_put(pdev);
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
+
+static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
+ u16 *pcifunc)
+{
+ struct rvu_block *block;
+ struct rvu_hwinfo *hw;
+
+ hw = rvu->hw;
+ block = &hw->block[blkaddr];
+
+ if (lf < 0 || lf >= block->lf.max) {
+ dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
+ block->lf.max - 1);
+ return false;
+ }
+
+ *pcifunc = block->fn_map[lf];
+ if (!*pcifunc) {
+ dev_warn(rvu->dev,
+ "This LF is not attached to any RVU PFFUNC\n");
+ return false;
+ }
+ return true;
+}
+
+static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
+{
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ if (!pfvf->aura_ctx) {
+ seq_puts(m, "Aura context is not initialized\n");
+ } else {
+ bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
+ pfvf->aura_ctx->qsize);
+ seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
+ seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
+ }
+
+ if (!pfvf->pool_ctx) {
+ seq_puts(m, "Pool context is not initialized\n");
+ } else {
+ bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
+ pfvf->pool_ctx->qsize);
+ seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
+ seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
+ }
+ kfree(buf);
+}
+
+/* The 'qsize' entry dumps current Aura/Pool context Qsize
+ * and each context's current enable/disable status in a bitmap.
+ */
+static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
+ int blktype)
+{
+ void (*print_qsize)(struct seq_file *filp,
+ struct rvu_pfvf *pfvf) = NULL;
+ struct dentry *current_dir;
+ struct rvu_pfvf *pfvf;
+ struct rvu *rvu;
+ int qsize_id;
+ u16 pcifunc;
+ int blkaddr;
+
+ rvu = filp->private;
+ switch (blktype) {
+ case BLKTYPE_NPA:
+ qsize_id = rvu->rvu_dbg.npa_qsize_id;
+ print_qsize = print_npa_qsize;
+ break;
+
+ case BLKTYPE_NIX:
+ qsize_id = rvu->rvu_dbg.nix_qsize_id;
+ print_qsize = print_nix_qsize;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (blktype == BLKTYPE_NPA) {
+ blkaddr = BLKADDR_NPA;
+ } else {
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
+ BLKADDR_NIX1 : BLKADDR_NIX0);
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ print_qsize(filp, pfvf);
+
+ return 0;
+}
+
+static ssize_t rvu_dbg_qsize_write(struct file *filp,
+ const char __user *buffer, size_t count,
+ loff_t *ppos, int blktype)
+{
+ char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
+ struct seq_file *seqfile = filp->private_data;
+ char *cmd_buf, *cmd_buf_tmp, *subtoken;
+ struct rvu *rvu = seqfile->private;
+ struct dentry *current_dir;
+ int blkaddr;
+ u16 pcifunc;
+ int ret, lf;
+
+ cmd_buf = memdup_user(buffer, count + 1);
+ if (IS_ERR(cmd_buf))
+ return -ENOMEM;
+
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ cmd_buf_tmp = cmd_buf;
+ subtoken = strsep(&cmd_buf, " ");
+ ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
+ if (cmd_buf)
+ ret = -EINVAL;
+
+ if (ret < 0 || !strncmp(subtoken, "help", 4)) {
+ dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
+ goto qsize_write_done;
+ }
+
+ if (blktype == BLKTYPE_NPA) {
+ blkaddr = BLKADDR_NPA;
+ } else {
+ current_dir = filp->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
+ BLKADDR_NIX1 : BLKADDR_NIX0);
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
+ ret = -EINVAL;
+ goto qsize_write_done;
+ }
+ if (blktype == BLKTYPE_NPA)
+ rvu->rvu_dbg.npa_qsize_id = lf;
+ else
+ rvu->rvu_dbg.nix_qsize_id = lf;
+
+qsize_write_done:
+ kfree(cmd_buf_tmp);
+ return ret ? ret : count;
+}
+
+static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_qsize_write(filp, buffer, count, ppos,
+ BLKTYPE_NPA);
+}
+
+static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
+
+/* Dumps given NPA Aura's context */
+static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
+{
+ struct npa_aura_s *aura = &rsp->aura;
+ struct rvu *rvu = m->private;
+
+ seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
+
+ seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
+ aura->ena, aura->pool_caching);
+ seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
+ aura->pool_way_mask, aura->avg_con);
+ seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
+ aura->pool_drop_ena, aura->aura_drop_ena);
+ seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
+ aura->bp_ena, aura->aura_drop);
+ seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
+ aura->shift, aura->avg_level);
+
+ seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
+ (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
+
+ seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
+ (u64)aura->limit, aura->bp, aura->fc_ena);
+
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
+ seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
+ aura->fc_up_crossing, aura->fc_stype);
+ seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
+
+ seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
+
+ seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
+ aura->pool_drop, aura->update_time);
+ seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
+ aura->err_int, aura->err_int_ena);
+ seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
+ aura->thresh_int, aura->thresh_int_ena);
+ seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
+ aura->thresh_up, aura->thresh_qint_idx);
+ seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
+
+ seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
+}
+
+/* Dumps given NPA Pool's context */
+static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
+{
+ struct npa_pool_s *pool = &rsp->pool;
+ struct rvu *rvu = m->private;
+
+ seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
+
+ seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
+ pool->ena, pool->nat_align);
+ seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
+ pool->stack_caching, pool->stack_way_mask);
+ seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
+ pool->buf_offset, pool->buf_size);
+
+ seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
+ pool->stack_max_pages, pool->stack_pages);
+
+ seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
+
+ seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
+ pool->stack_offset, pool->shift, pool->avg_level);
+ seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
+ pool->avg_con, pool->fc_ena, pool->fc_stype);
+ seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
+ pool->fc_hyst_bits, pool->fc_up_crossing);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
+ seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
+
+ seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
+
+ seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
+
+ seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
+
+ seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
+ pool->err_int, pool->err_int_ena);
+ seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
+ seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
+ pool->thresh_int_ena, pool->thresh_up);
+ seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
+ pool->thresh_qint_idx, pool->err_qint_idx);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
+}
+
+/* Reads aura/pool's ctx from admin queue */
+static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
+{
+ void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
+ struct npa_aq_enq_req aq_req;
+ struct npa_aq_enq_rsp rsp;
+ struct rvu_pfvf *pfvf;
+ int aura, rc, max_id;
+ int npalf, id, all;
+ struct rvu *rvu;
+ u16 pcifunc;
+
+ rvu = m->private;
+
+ switch (ctype) {
+ case NPA_AQ_CTYPE_AURA:
+ npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
+ id = rvu->rvu_dbg.npa_aura_ctx.id;
+ all = rvu->rvu_dbg.npa_aura_ctx.all;
+ break;
+
+ case NPA_AQ_CTYPE_POOL:
+ npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
+ id = rvu->rvu_dbg.npa_pool_ctx.id;
+ all = rvu->rvu_dbg.npa_pool_ctx.all;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
+ seq_puts(m, "Aura context is not initialized\n");
+ return -EINVAL;
+ } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
+ seq_puts(m, "Pool context is not initialized\n");
+ return -EINVAL;
+ }
+
+ memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = ctype;
+ aq_req.op = NPA_AQ_INSTOP_READ;
+ if (ctype == NPA_AQ_CTYPE_AURA) {
+ max_id = pfvf->aura_ctx->qsize;
+ print_npa_ctx = print_npa_aura_ctx;
+ } else {
+ max_id = pfvf->pool_ctx->qsize;
+ print_npa_ctx = print_npa_pool_ctx;
+ }
+
+ if (id < 0 || id >= max_id) {
+ seq_printf(m, "Invalid %s, valid range is 0-%d\n",
+ (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
+ max_id - 1);
+ return -EINVAL;
+ }
+
+ if (all)
+ id = 0;
+ else
+ max_id = id + 1;
+
+ for (aura = id; aura < max_id; aura++) {
+ aq_req.aura_id = aura;
+
+ /* Skip if queue is uninitialized */
+ if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
+ continue;
+
+ seq_printf(m, "======%s : %d=======\n",
+ (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
+ aq_req.aura_id);
+ rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
+ if (rc) {
+ seq_puts(m, "Failed to read context\n");
+ return -EINVAL;
+ }
+ print_npa_ctx(m, &rsp);
+ }
+ return 0;
+}
+
+static int write_npa_ctx(struct rvu *rvu, bool all,
+ int npalf, int id, int ctype)
+{
+ struct rvu_pfvf *pfvf;
+ int max_id = 0;
+ u16 pcifunc;
+
+ if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (ctype == NPA_AQ_CTYPE_AURA) {
+ if (!pfvf->aura_ctx) {
+ dev_warn(rvu->dev, "Aura context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->aura_ctx->qsize;
+ } else if (ctype == NPA_AQ_CTYPE_POOL) {
+ if (!pfvf->pool_ctx) {
+ dev_warn(rvu->dev, "Pool context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->pool_ctx->qsize;
+ }
+
+ if (id < 0 || id >= max_id) {
+ dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
+ (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
+ max_id - 1);
+ return -EINVAL;
+ }
+
+ switch (ctype) {
+ case NPA_AQ_CTYPE_AURA:
+ rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
+ rvu->rvu_dbg.npa_aura_ctx.id = id;
+ rvu->rvu_dbg.npa_aura_ctx.all = all;
+ break;
+
+ case NPA_AQ_CTYPE_POOL:
+ rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
+ rvu->rvu_dbg.npa_pool_ctx.id = id;
+ rvu->rvu_dbg.npa_pool_ctx.all = all;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
+ const char __user *buffer, int *npalf,
+ int *id, bool *all)
+{
+ int bytes_not_copied;
+ char *cmd_buf_tmp;
+ char *subtoken;
+ int ret;
+
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ cmd_buf[*count] = '\0';
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ *count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ subtoken = strsep(&cmd_buf, " ");
+ if (subtoken && strcmp(subtoken, "all") == 0) {
+ *all = true;
+ } else {
+ ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ }
+ if (cmd_buf)
+ return -EINVAL;
+ return ret;
+}
+
+static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos, int ctype)
+{
+ char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
+ "aura" : "pool";
+ struct seq_file *seqfp = filp->private_data;
+ struct rvu *rvu = seqfp->private;
+ int npalf, id = 0, ret;
+ bool all = false;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+ ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
+ &npalf, &id, &all);
+ if (ret < 0) {
+ dev_info(rvu->dev,
+ "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
+ ctype_string, ctype_string);
+ goto done;
+ } else {
+ ret = write_npa_ctx(rvu, all, npalf, id, ctype);
+ }
+done:
+ kfree(cmd_buf);
+ return ret ? ret : count;
+}
+
+static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
+ NPA_AQ_CTYPE_AURA);
+}
+
+static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
+
+static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
+ NPA_AQ_CTYPE_POOL);
+}
+
+static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
+
+static void ndc_cache_stats(struct seq_file *s, int blk_addr,
+ int ctype, int transaction)
+{
+ u64 req, out_req, lat, cant_alloc;
+ struct nix_hw *nix_hw;
+ struct rvu *rvu;
+ int port;
+
+ if (blk_addr == BLKADDR_NDC_NPA0) {
+ rvu = s->private;
+ } else {
+ nix_hw = s->private;
+ rvu = nix_hw->rvu;
+ }
+
+ for (port = 0; port < NDC_MAX_PORT; port++) {
+ req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
+ (port, ctype, transaction));
+ lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
+ (port, ctype, transaction));
+ out_req = rvu_read64(rvu, blk_addr,
+ NDC_AF_PORTX_RTX_RWX_OSTDN_PC
+ (port, ctype, transaction));
+ cant_alloc = rvu_read64(rvu, blk_addr,
+ NDC_AF_PORTX_RTX_CANT_ALLOC_PC
+ (port, transaction));
+ seq_printf(s, "\nPort:%d\n", port);
+ seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
+ seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
+ seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
+ seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
+ seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
+ }
+}
+
+static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
+{
+ seq_puts(s, "\n***** CACHE mode read stats *****\n");
+ ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
+ seq_puts(s, "\n***** CACHE mode write stats *****\n");
+ ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
+ seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
+ ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
+ seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
+ ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
+ return 0;
+}
+
+static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
+{
+ return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
+
+static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
+{
+ struct nix_hw *nix_hw;
+ struct rvu *rvu;
+ int bank, max_bank;
+ u64 ndc_af_const;
+
+ if (blk_addr == BLKADDR_NDC_NPA0) {
+ rvu = s->private;
+ } else {
+ nix_hw = s->private;
+ rvu = nix_hw->rvu;
+ }
+
+ ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
+ max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
+ for (bank = 0; bank < max_bank; bank++) {
+ seq_printf(s, "BANK:%d\n", bank);
+ seq_printf(s, "\tHits:\t%lld\n",
+ (u64)rvu_read64(rvu, blk_addr,
+ NDC_AF_BANKX_HIT_PC(bank)));
+ seq_printf(s, "\tMiss:\t%lld\n",
+ (u64)rvu_read64(rvu, blk_addr,
+ NDC_AF_BANKX_MISS_PC(bank)));
+ }
+ return 0;
+}
+
+static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
+{
+ struct nix_hw *nix_hw = filp->private;
+ int blkaddr = 0;
+ int ndc_idx = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
+ ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
+
+ return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
+
+static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
+{
+ struct nix_hw *nix_hw = filp->private;
+ int blkaddr = 0;
+ int ndc_idx = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
+ ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
+
+ return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
+
+static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
+ void *unused)
+{
+ return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
+
+static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
+ void *unused)
+{
+ struct nix_hw *nix_hw = filp->private;
+ int ndc_idx = NPA0_U;
+ int blkaddr = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
+
+ return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
+
+static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
+ void *unused)
+{
+ struct nix_hw *nix_hw = filp->private;
+ int ndc_idx = NPA0_U;
+ int blkaddr = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
+
+ return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
+
+static void print_nix_cn10k_sq_ctx(struct seq_file *m,
+ struct nix_cn10k_sq_ctx_s *sq_ctx)
+{
+ seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
+ sq_ctx->ena, sq_ctx->qint_idx);
+ seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
+ sq_ctx->substream, sq_ctx->sdp_mcast);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
+ sq_ctx->cq, sq_ctx->sqe_way_mask);
+
+ seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
+ sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
+ seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
+ sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
+ seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
+ sq_ctx->default_chan, sq_ctx->sqb_count);
+
+ seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
+ seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
+ seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
+ sq_ctx->sqb_aura, sq_ctx->sq_int);
+ seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
+ sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
+
+ seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
+ sq_ctx->max_sqe_size, sq_ctx->cq_limit);
+ seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
+ sq_ctx->mnq_dis, sq_ctx->lmt_dis);
+ seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
+ sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
+ seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
+ sq_ctx->tail_offset, sq_ctx->smenq_offset);
+ seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
+ sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
+
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
+ seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
+ seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
+ seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
+ seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
+ sq_ctx->smenq_next_sqb);
+
+ seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
+
+ seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
+ seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
+ sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
+ seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
+ sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
+ seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
+ sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
+
+ seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
+ (u64)sq_ctx->scm_lso_rem);
+ seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
+ seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
+ seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_octs);
+ seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_pkts);
+}
+
+/* Dumps given nix_sq's context */
+static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+
+ if (!is_rvu_otx2(rvu)) {
+ print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
+ return;
+ }
+ seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
+ sq_ctx->sqe_way_mask, sq_ctx->cq);
+ seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
+ sq_ctx->sdp_mcast, sq_ctx->substream);
+ seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
+ sq_ctx->qint_idx, sq_ctx->ena);
+
+ seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
+ sq_ctx->sqb_count, sq_ctx->default_chan);
+ seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
+ sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
+ seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
+ sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
+
+ seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
+ sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
+ seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
+ sq_ctx->sq_int, sq_ctx->sqb_aura);
+ seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
+
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
+ seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
+ sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
+ seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
+ sq_ctx->smenq_offset, sq_ctx->tail_offset);
+ seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
+ sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
+ seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
+ sq_ctx->mnq_dis, sq_ctx->lmt_dis);
+ seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
+ sq_ctx->cq_limit, sq_ctx->max_sqe_size);
+
+ seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
+ seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
+ seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
+ seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
+ sq_ctx->smenq_next_sqb);
+
+ seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
+
+ seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
+ sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
+ seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
+ sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
+ seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
+ sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
+ seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
+
+ seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
+ (u64)sq_ctx->scm_lso_rem);
+ seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
+ seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
+ seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_octs);
+ seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_pkts);
+}
+
+static void print_nix_cn10k_rq_ctx(struct seq_file *m,
+ struct nix_cn10k_rq_ctx_s *rq_ctx)
+{
+ seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
+ rq_ctx->ena, rq_ctx->sso_ena);
+ seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
+ rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
+ rq_ctx->cq, rq_ctx->lenerr_dis);
+ seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
+ rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
+ seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
+ rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
+ seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
+ rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
+ seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
+
+ seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
+ rq_ctx->spb_aura, rq_ctx->lpb_aura);
+ seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
+ seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
+ rq_ctx->sso_grp, rq_ctx->sso_tt);
+ seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
+ rq_ctx->pb_caching, rq_ctx->wqe_caching);
+ seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
+ rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
+ seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
+ rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
+ seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
+ rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
+
+ seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
+ seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
+ seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
+ seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
+ rq_ctx->wqe_skip, rq_ctx->spb_ena);
+ seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
+ rq_ctx->lpb_sizem1, rq_ctx->first_skip);
+ seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
+ rq_ctx->later_skip, rq_ctx->xqe_imm_size);
+ seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
+ rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
+
+ seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
+ rq_ctx->xqe_drop, rq_ctx->xqe_pass);
+ seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
+ rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
+ seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
+ rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
+ seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
+ rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
+
+ seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
+ rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
+ seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
+ rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
+ seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
+ rq_ctx->rq_int, rq_ctx->rq_int_ena);
+ seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
+
+ seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
+ rq_ctx->ltag, rq_ctx->good_utag);
+ seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
+ rq_ctx->bad_utag, rq_ctx->flow_tagw);
+ seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
+ rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
+ seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
+ rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
+ seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
+
+ seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
+ seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
+ seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
+ seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
+ seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
+}
+
+/* Dumps given nix_rq's context */
+static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+
+ if (!is_rvu_otx2(rvu)) {
+ print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
+ return;
+ }
+
+ seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
+ rq_ctx->wqe_aura, rq_ctx->substream);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
+ rq_ctx->cq, rq_ctx->ena_wqwd);
+ seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
+ rq_ctx->ipsech_ena, rq_ctx->sso_ena);
+ seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
+
+ seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
+ rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
+ seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
+ rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
+ seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
+ rq_ctx->pb_caching, rq_ctx->sso_tt);
+ seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
+ rq_ctx->sso_grp, rq_ctx->lpb_aura);
+ seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
+
+ seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
+ rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
+ seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
+ rq_ctx->xqe_imm_size, rq_ctx->later_skip);
+ seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
+ rq_ctx->first_skip, rq_ctx->lpb_sizem1);
+ seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
+ rq_ctx->spb_ena, rq_ctx->wqe_skip);
+ seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
+
+ seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
+ rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
+ seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
+ rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
+ seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
+ rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
+ seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
+ rq_ctx->xqe_pass, rq_ctx->xqe_drop);
+
+ seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
+ rq_ctx->qint_idx, rq_ctx->rq_int_ena);
+ seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
+ rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
+ seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
+ rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
+ seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
+
+ seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
+ rq_ctx->flow_tagw, rq_ctx->bad_utag);
+ seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
+ rq_ctx->good_utag, rq_ctx->ltag);
+
+ seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
+ seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
+ seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
+ seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
+ seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
+}
+
+/* Dumps given nix_cq's context */
+static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
+
+ seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
+
+ seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
+ seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
+ cq_ctx->avg_con, cq_ctx->cint_idx);
+ seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
+ cq_ctx->cq_err, cq_ctx->qint_idx);
+ seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
+ cq_ctx->bpid, cq_ctx->bp_ena);
+
+ seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
+ cq_ctx->update_time, cq_ctx->avg_level);
+ seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
+ cq_ctx->head, cq_ctx->tail);
+
+ seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
+ cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
+ seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
+ cq_ctx->qsize, cq_ctx->caching);
+ seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
+ cq_ctx->substream, cq_ctx->ena);
+ seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
+ cq_ctx->drop_ena, cq_ctx->drop);
+ seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
+}
+
+static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
+ void *unused, int ctype)
+{
+ void (*print_nix_ctx)(struct seq_file *filp,
+ struct nix_aq_enq_rsp *rsp) = NULL;
+ struct nix_hw *nix_hw = filp->private;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_aq_enq_req aq_req;
+ struct nix_aq_enq_rsp rsp;
+ char *ctype_string = NULL;
+ int qidx, rc, max_id = 0;
+ struct rvu_pfvf *pfvf;
+ int nixlf, id, all;
+ u16 pcifunc;
+
+ switch (ctype) {
+ case NIX_AQ_CTYPE_CQ:
+ nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
+ id = rvu->rvu_dbg.nix_cq_ctx.id;
+ all = rvu->rvu_dbg.nix_cq_ctx.all;
+ break;
+
+ case NIX_AQ_CTYPE_SQ:
+ nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
+ id = rvu->rvu_dbg.nix_sq_ctx.id;
+ all = rvu->rvu_dbg.nix_sq_ctx.all;
+ break;
+
+ case NIX_AQ_CTYPE_RQ:
+ nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
+ id = rvu->rvu_dbg.nix_rq_ctx.id;
+ all = rvu->rvu_dbg.nix_rq_ctx.all;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
+ seq_puts(filp, "SQ context is not initialized\n");
+ return -EINVAL;
+ } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
+ seq_puts(filp, "RQ context is not initialized\n");
+ return -EINVAL;
+ } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
+ seq_puts(filp, "CQ context is not initialized\n");
+ return -EINVAL;
+ }
+
+ if (ctype == NIX_AQ_CTYPE_SQ) {
+ max_id = pfvf->sq_ctx->qsize;
+ ctype_string = "sq";
+ print_nix_ctx = print_nix_sq_ctx;
+ } else if (ctype == NIX_AQ_CTYPE_RQ) {
+ max_id = pfvf->rq_ctx->qsize;
+ ctype_string = "rq";
+ print_nix_ctx = print_nix_rq_ctx;
+ } else if (ctype == NIX_AQ_CTYPE_CQ) {
+ max_id = pfvf->cq_ctx->qsize;
+ ctype_string = "cq";
+ print_nix_ctx = print_nix_cq_ctx;
+ }
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = ctype;
+ aq_req.op = NIX_AQ_INSTOP_READ;
+ if (all)
+ id = 0;
+ else
+ max_id = id + 1;
+ for (qidx = id; qidx < max_id; qidx++) {
+ aq_req.qidx = qidx;
+ seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
+ ctype_string, nixlf, aq_req.qidx);
+ rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
+ if (rc) {
+ seq_puts(filp, "Failed to read the context\n");
+ return -EINVAL;
+ }
+ print_nix_ctx(filp, &rsp);
+ }
+ return 0;
+}
+
+static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
+ int id, int ctype, char *ctype_string,
+ struct seq_file *m)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct rvu_pfvf *pfvf;
+ int max_id = 0;
+ u16 pcifunc;
+
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (ctype == NIX_AQ_CTYPE_SQ) {
+ if (!pfvf->sq_ctx) {
+ dev_warn(rvu->dev, "SQ context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->sq_ctx->qsize;
+ } else if (ctype == NIX_AQ_CTYPE_RQ) {
+ if (!pfvf->rq_ctx) {
+ dev_warn(rvu->dev, "RQ context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->rq_ctx->qsize;
+ } else if (ctype == NIX_AQ_CTYPE_CQ) {
+ if (!pfvf->cq_ctx) {
+ dev_warn(rvu->dev, "CQ context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->cq_ctx->qsize;
+ }
+
+ if (id < 0 || id >= max_id) {
+ dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
+ ctype_string, max_id - 1);
+ return -EINVAL;
+ }
+ switch (ctype) {
+ case NIX_AQ_CTYPE_CQ:
+ rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
+ rvu->rvu_dbg.nix_cq_ctx.id = id;
+ rvu->rvu_dbg.nix_cq_ctx.all = all;
+ break;
+
+ case NIX_AQ_CTYPE_SQ:
+ rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
+ rvu->rvu_dbg.nix_sq_ctx.id = id;
+ rvu->rvu_dbg.nix_sq_ctx.all = all;
+ break;
+
+ case NIX_AQ_CTYPE_RQ:
+ rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
+ rvu->rvu_dbg.nix_rq_ctx.id = id;
+ rvu->rvu_dbg.nix_rq_ctx.all = all;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos,
+ int ctype)
+{
+ struct seq_file *m = filp->private_data;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ char *cmd_buf, *ctype_string;
+ int nixlf, id = 0, ret;
+ bool all = false;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ switch (ctype) {
+ case NIX_AQ_CTYPE_SQ:
+ ctype_string = "sq";
+ break;
+ case NIX_AQ_CTYPE_RQ:
+ ctype_string = "rq";
+ break;
+ case NIX_AQ_CTYPE_CQ:
+ ctype_string = "cq";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+
+ if (!cmd_buf)
+ return count;
+
+ ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
+ &nixlf, &id, &all);
+ if (ret < 0) {
+ dev_info(rvu->dev,
+ "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
+ ctype_string, ctype_string);
+ goto done;
+ } else {
+ ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
+ ctype_string, m);
+ }
+done:
+ kfree(cmd_buf);
+ return ret ? ret : count;
+}
+
+static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
+ NIX_AQ_CTYPE_SQ);
+}
+
+static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
+
+static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
+ NIX_AQ_CTYPE_RQ);
+}
+
+static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
+
+static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
+ NIX_AQ_CTYPE_CQ);
+}
+
+static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
+
+static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
+ unsigned long *bmap, char *qtype)
+{
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ bitmap_print_to_pagebuf(false, buf, bmap, qsize);
+ seq_printf(filp, "%s context count : %d\n", qtype, qsize);
+ seq_printf(filp, "%s context ena/dis bitmap : %s\n",
+ qtype, buf);
+ kfree(buf);
+}
+
+static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
+{
+ if (!pfvf->cq_ctx)
+ seq_puts(filp, "cq context is not initialized\n");
+ else
+ print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
+ "cq");
+
+ if (!pfvf->rq_ctx)
+ seq_puts(filp, "rq context is not initialized\n");
+ else
+ print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
+ "rq");
+
+ if (!pfvf->sq_ctx)
+ seq_puts(filp, "sq context is not initialized\n");
+ else
+ print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
+ "sq");
+}
+
+static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_qsize_write(filp, buffer, count, ppos,
+ BLKTYPE_NIX);
+}
+
+static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
+
+static void print_band_prof_ctx(struct seq_file *m,
+ struct nix_bandprof_s *prof)
+{
+ char *str;
+
+ switch (prof->pc_mode) {
+ case NIX_RX_PC_MODE_VLAN:
+ str = "VLAN";
+ break;
+ case NIX_RX_PC_MODE_DSCP:
+ str = "DSCP";
+ break;
+ case NIX_RX_PC_MODE_GEN:
+ str = "Generic";
+ break;
+ case NIX_RX_PC_MODE_RSVD:
+ str = "Reserved";
+ break;
+ }
+ seq_printf(m, "W0: pc_mode\t\t%s\n", str);
+ str = (prof->icolor == 3) ? "Color blind" :
+ (prof->icolor == 0) ? "Green" :
+ (prof->icolor == 1) ? "Yellow" : "Red";
+ seq_printf(m, "W0: icolor\t\t%s\n", str);
+ seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
+ seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
+ seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
+ seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
+ seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
+ seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
+ seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
+ seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
+
+ seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
+ str = (prof->lmode == 0) ? "byte" : "packet";
+ seq_printf(m, "W1: lmode\t\t%s\n", str);
+ seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
+ seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
+ seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
+ seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
+ str = (prof->gc_action == 0) ? "PASS" :
+ (prof->gc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: gc_action\t\t%s\n", str);
+ str = (prof->yc_action == 0) ? "PASS" :
+ (prof->yc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: yc_action\t\t%s\n", str);
+ str = (prof->rc_action == 0) ? "PASS" :
+ (prof->rc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: rc_action\t\t%s\n", str);
+ seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
+ seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
+ seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
+
+ seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
+ seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
+ seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
+ seq_printf(m, "W4: green_pkt_pass\t%lld\n",
+ (u64)prof->green_pkt_pass);
+ seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
+ (u64)prof->yellow_pkt_pass);
+ seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
+ seq_printf(m, "W7: green_octs_pass\t%lld\n",
+ (u64)prof->green_octs_pass);
+ seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
+ (u64)prof->yellow_octs_pass);
+ seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
+ seq_printf(m, "W10: green_pkt_drop\t%lld\n",
+ (u64)prof->green_pkt_drop);
+ seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
+ (u64)prof->yellow_pkt_drop);
+ seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
+ seq_printf(m, "W13: green_octs_drop\t%lld\n",
+ (u64)prof->green_octs_drop);
+ seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
+ (u64)prof->yellow_octs_drop);
+ seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
+ seq_puts(m, "==============================\n");
+}
+
+static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_ipolicer *ipolicer;
+ int layer, prof_idx, idx, rc;
+ u16 pcifunc;
+ char *str;
+
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
+
+ seq_printf(m, "\n%s bandwidth profiles\n", str);
+ seq_puts(m, "=======================\n");
+
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (is_rsrc_free(&ipolicer->band_prof, idx))
+ continue;
+
+ prof_idx = (idx & 0x3FFF) | (layer << 14);
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ 0x00, NIX_AQ_CTYPE_BANDPROF,
+ prof_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of %s profile %d, err %d\n",
+ __func__, str, idx, rc);
+ return 0;
+ }
+ seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
+ pcifunc = ipolicer->pfvf_map[idx];
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ seq_printf(m, "Allocated to :: PF %d\n",
+ rvu_get_pf(pcifunc));
+ else
+ seq_printf(m, "Allocated to :: PF %d VF %d\n",
+ rvu_get_pf(pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+ print_band_prof_ctx(m, &aq_rsp.prof);
+ }
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
+
+static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct nix_ipolicer *ipolicer;
+ int layer;
+ char *str;
+
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
+
+ seq_puts(m, "\nBandwidth profile resource free count\n");
+ seq_puts(m, "=====================================\n");
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
+ ipolicer->band_prof.max,
+ rvu_rsrc_free_count(&ipolicer->band_prof));
+ }
+ seq_puts(m, "=====================================\n");
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
+
+static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
+{
+ struct nix_hw *nix_hw;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+
+ if (blkaddr == BLKADDR_NIX0) {
+ rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
+ nix_hw = &rvu->hw->nix[0];
+ } else {
+ rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
+ rvu->rvu_dbg.root);
+ nix_hw = &rvu->hw->nix[1];
+ }
+
+ debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_sq_ctx_fops);
+ debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_rq_ctx_fops);
+ debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_cq_ctx_fops);
+ debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_tx_cache_fops);
+ debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_rx_cache_fops);
+ debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_tx_hits_miss_fops);
+ debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_rx_hits_miss_fops);
+ debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_qsize_fops);
+ debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_ctx_fops);
+ debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_rsrc_fops);
+}
+
+static void rvu_dbg_npa_init(struct rvu *rvu)
+{
+ rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
+
+ debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_qsize_fops);
+ debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_aura_ctx_fops);
+ debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_pool_ctx_fops);
+ debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_ndc_cache_fops);
+ debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_ndc_hits_miss_fops);
+}
+
+#define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
+ ({ \
+ u64 cnt; \
+ err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
+ NIX_STATS_RX, &(cnt)); \
+ if (!err) \
+ seq_printf(s, "%s: %llu\n", name, cnt); \
+ cnt; \
+ })
+
+#define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
+ ({ \
+ u64 cnt; \
+ err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
+ NIX_STATS_TX, &(cnt)); \
+ if (!err) \
+ seq_printf(s, "%s: %llu\n", name, cnt); \
+ cnt; \
+ })
+
+static int cgx_print_stats(struct seq_file *s, int lmac_id)
+{
+ struct cgx_link_user_info linfo;
+ struct mac_ops *mac_ops;
+ void *cgxd = s->private;
+ u64 ucast, mcast, bcast;
+ int stat = 0, err = 0;
+ u64 tx_stat, rx_stat;
+ struct rvu *rvu;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ mac_ops = get_mac_ops(cgxd);
+ /* There can be no CGX devices at all */
+ if (!mac_ops)
+ return 0;
+
+ /* Link status */
+ seq_puts(s, "\n=======Link Status======\n\n");
+ err = cgx_get_link_info(cgxd, lmac_id, &linfo);
+ if (err)
+ seq_puts(s, "Failed to read link status\n");
+ seq_printf(s, "\nLink is %s %d Mbps\n\n",
+ linfo.link_up ? "UP" : "DOWN", linfo.speed);
+
+ /* Rx stats */
+ seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
+ mac_ops->name);
+ ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
+ if (err)
+ return err;
+ mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
+ if (err)
+ return err;
+ bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
+ if (err)
+ return err;
+ seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
+ PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
+ if (err)
+ return err;
+ PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
+ if (err)
+ return err;
+ PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
+ if (err)
+ return err;
+
+ /* Tx stats */
+ seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
+ mac_ops->name);
+ ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
+ if (err)
+ return err;
+ mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
+ if (err)
+ return err;
+ bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
+ if (err)
+ return err;
+ seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
+ PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
+ if (err)
+ return err;
+ PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
+ if (err)
+ return err;
+
+ /* Rx stats */
+ seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
+ while (stat < mac_ops->rx_stats_cnt) {
+ err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
+ if (err)
+ return err;
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
+ rx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
+ rx_stat);
+ stat++;
+ }
+
+ /* Tx stats */
+ stat = 0;
+ seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
+ while (stat < mac_ops->tx_stats_cnt) {
+ err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
+ if (err)
+ return err;
+
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
+ tx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
+ tx_stat);
+ stat++;
+ }
+
+ return err;
+}
+
+static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
+{
+ struct dentry *current_dir;
+ char *buf;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ buf = strrchr(current_dir->d_name.name, 'c');
+ if (!buf)
+ return -EINVAL;
+
+ return kstrtoint(buf + 1, 10, lmac_id);
+}
+
+static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+{
+ int lmac_id, err;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_stats(filp, lmac_id);
+
+ return err;
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
+
+static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
+{
+ struct pci_dev *pdev = NULL;
+ void *cgxd = s->private;
+ char *bcast, *mcast;
+ u16 index, domain;
+ u8 dmac[ETH_ALEN];
+ struct rvu *rvu;
+ u64 cfg, mac;
+ int pf;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
+ domain = 2;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ return 0;
+
+ cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
+ bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
+ mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
+
+ seq_puts(s,
+ "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
+ seq_printf(s, "%s PF%d %9s %9s",
+ dev_name(&pdev->dev), pf, bcast, mcast);
+ if (cfg & CGX_DMAC_CAM_ACCEPT)
+ seq_printf(s, "%12s\n\n", "UNICAST");
+ else
+ seq_printf(s, "%16s\n\n", "PROMISCUOUS");
+
+ seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
+
+ for (index = 0 ; index < 32 ; index++) {
+ cfg = cgx_read_dmac_entry(cgxd, index);
+ /* Display enabled dmac entries associated with current lmac */
+ if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
+ FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
+ mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
+ u64_to_ether_addr(mac, dmac);
+ seq_printf(s, "%7d %pM\n", index, dmac);
+ }
+ }
+
+ pci_dev_put(pdev);
+ return 0;
+}
+
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+{
+ int err, lmac_id;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_dmac_flt(filp, lmac_id);
+
+ return err;
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
+
+static void rvu_dbg_cgx_init(struct rvu *rvu)
+{
+ struct mac_ops *mac_ops;
+ unsigned long lmac_bmap;
+ int i, lmac_id;
+ char dname[20];
+ void *cgx;
+
+ if (!cgx_get_cgxcnt_max())
+ return;
+
+ mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+ if (!mac_ops)
+ return;
+
+ rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
+ rvu->rvu_dbg.root);
+
+ for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
+ cgx = rvu_cgx_pdata(i, rvu);
+ if (!cgx)
+ continue;
+ lmac_bmap = cgx_get_lmac_bmap(cgx);
+ /* cgx debugfs dir */
+ sprintf(dname, "%s%d", mac_ops->name, i);
+ rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
+ rvu->rvu_dbg.cgx_root);
+
+ for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ /* lmac debugfs dir */
+ sprintf(dname, "lmac%d", lmac_id);
+ rvu->rvu_dbg.lmac =
+ debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
+
+ debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
+ cgx, &rvu_dbg_cgx_stat_fops);
+ debugfs_create_file("mac_filter", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_dmac_flt_fops);
+ }
+ }
+}
+
+/* NPC debugfs APIs */
+static void rvu_print_npc_mcam_info(struct seq_file *s,
+ u16 pcifunc, int blkaddr)
+{
+ struct rvu *rvu = s->private;
+ int entry_acnt, entry_ecnt;
+ int cntr_acnt, cntr_ecnt;
+
+ rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
+ &entry_acnt, &entry_ecnt);
+ rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
+ &cntr_acnt, &cntr_ecnt);
+ if (!entry_acnt && !cntr_acnt)
+ return;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
+ rvu_get_pf(pcifunc));
+ else
+ seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
+ rvu_get_pf(pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+
+ if (entry_acnt) {
+ seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
+ seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
+ }
+ if (cntr_acnt) {
+ seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
+ seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
+ }
+}
+
+static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
+{
+ struct rvu *rvu = filp->private;
+ int pf, vf, numvfs, blkaddr;
+ struct npc_mcam *mcam;
+ u16 pcifunc, counters;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ mcam = &rvu->hw->mcam;
+ counters = rvu->hw->npc_counters;
+
+ seq_puts(filp, "\nNPC MCAM info:\n");
+ /* MCAM keywidth on receive and transmit sides */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
+ cfg = (cfg >> 32) & 0x07;
+ seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
+ "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
+ "224bits" : "448bits"));
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
+ cfg = (cfg >> 32) & 0x07;
+ seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
+ "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
+ "224bits" : "448bits"));
+
+ mutex_lock(&mcam->lock);
+ /* MCAM entries */
+ seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
+ seq_printf(filp, "\t\t Reserved \t: %d\n",
+ mcam->total_entries - mcam->bmap_entries);
+ seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
+
+ /* MCAM counters */
+ seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
+ seq_printf(filp, "\t\t Reserved \t: %d\n",
+ counters - mcam->counters.max);
+ seq_printf(filp, "\t\t Available \t: %d\n",
+ rvu_rsrc_free_count(&mcam->counters));
+
+ if (mcam->bmap_entries == mcam->bmap_fcnt) {
+ mutex_unlock(&mcam->lock);
+ return 0;
+ }
+
+ seq_puts(filp, "\n\t\t Current allocation\n");
+ seq_puts(filp, "\t\t====================\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
+
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ numvfs = (cfg >> 12) & 0xFF;
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
+ rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
+
+static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
+ void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct npc_mcam *mcam;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ mcam = &rvu->hw->mcam;
+
+ seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
+ seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
+
+static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
+ struct rvu_npc_mcam_rule *rule)
+{
+ u8 bit;
+
+ for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
+ seq_printf(s, "\t%s ", npc_get_field_name(bit));
+ switch (bit) {
+ case NPC_LXMB:
+ if (rule->lxmb == 1)
+ seq_puts(s, "\tL2M nibble is set\n");
+ else
+ seq_puts(s, "\tL2B nibble is set\n");
+ break;
+ case NPC_DMAC:
+ seq_printf(s, "%pM ", rule->packet.dmac);
+ seq_printf(s, "mask %pM\n", rule->mask.dmac);
+ break;
+ case NPC_SMAC:
+ seq_printf(s, "%pM ", rule->packet.smac);
+ seq_printf(s, "mask %pM\n", rule->mask.smac);
+ break;
+ case NPC_ETYPE:
+ seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
+ break;
+ case NPC_OUTER_VID:
+ seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
+ seq_printf(s, "mask 0x%x\n",
+ ntohs(rule->mask.vlan_tci));
+ break;
+ case NPC_TOS:
+ seq_printf(s, "%d ", rule->packet.tos);
+ seq_printf(s, "mask 0x%x\n", rule->mask.tos);
+ break;
+ case NPC_SIP_IPV4:
+ seq_printf(s, "%pI4 ", &rule->packet.ip4src);
+ seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
+ break;
+ case NPC_DIP_IPV4:
+ seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
+ seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
+ break;
+ case NPC_SIP_IPV6:
+ seq_printf(s, "%pI6 ", rule->packet.ip6src);
+ seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
+ break;
+ case NPC_DIP_IPV6:
+ seq_printf(s, "%pI6 ", rule->packet.ip6dst);
+ seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
+ break;
+ case NPC_SPORT_TCP:
+ case NPC_SPORT_UDP:
+ case NPC_SPORT_SCTP:
+ seq_printf(s, "%d ", ntohs(rule->packet.sport));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
+ break;
+ case NPC_DPORT_TCP:
+ case NPC_DPORT_UDP:
+ case NPC_DPORT_SCTP:
+ seq_printf(s, "%d ", ntohs(rule->packet.dport));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
+ break;
+ default:
+ seq_puts(s, "\n");
+ break;
+ }
+ }
+}
+
+static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
+ struct rvu_npc_mcam_rule *rule)
+{
+ if (is_npc_intf_tx(rule->intf)) {
+ switch (rule->tx_action.op) {
+ case NIX_TX_ACTIONOP_DROP:
+ seq_puts(s, "\taction: Drop\n");
+ break;
+ case NIX_TX_ACTIONOP_UCAST_DEFAULT:
+ seq_puts(s, "\taction: Unicast to default channel\n");
+ break;
+ case NIX_TX_ACTIONOP_UCAST_CHAN:
+ seq_printf(s, "\taction: Unicast to channel %d\n",
+ rule->tx_action.index);
+ break;
+ case NIX_TX_ACTIONOP_MCAST:
+ seq_puts(s, "\taction: Multicast\n");
+ break;
+ case NIX_TX_ACTIONOP_DROP_VIOL:
+ seq_puts(s, "\taction: Lockdown Violation Drop\n");
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (rule->rx_action.op) {
+ case NIX_RX_ACTIONOP_DROP:
+ seq_puts(s, "\taction: Drop\n");
+ break;
+ case NIX_RX_ACTIONOP_UCAST:
+ seq_printf(s, "\taction: Direct to queue %d\n",
+ rule->rx_action.index);
+ break;
+ case NIX_RX_ACTIONOP_RSS:
+ seq_puts(s, "\taction: RSS\n");
+ break;
+ case NIX_RX_ACTIONOP_UCAST_IPSEC:
+ seq_puts(s, "\taction: Unicast ipsec\n");
+ break;
+ case NIX_RX_ACTIONOP_MCAST:
+ seq_puts(s, "\taction: Multicast\n");
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static const char *rvu_dbg_get_intf_name(int intf)
+{
+ switch (intf) {
+ case NIX_INTFX_RX(0):
+ return "NIX0_RX";
+ case NIX_INTFX_RX(1):
+ return "NIX1_RX";
+ case NIX_INTFX_TX(0):
+ return "NIX0_TX";
+ case NIX_INTFX_TX(1):
+ return "NIX1_TX";
+ default:
+ break;
+ }
+
+ return "unknown";
+}
+
+static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
+{
+ struct rvu_npc_mcam_rule *iter;
+ struct rvu *rvu = s->private;
+ struct npc_mcam *mcam;
+ int pf, vf = -1;
+ bool enabled;
+ int blkaddr;
+ u16 target;
+ u64 hits;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ mcam = &rvu->hw->mcam;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ seq_printf(s, "\n\tInstalled by: PF%d ", pf);
+
+ if (iter->owner & RVU_PFVF_FUNC_MASK) {
+ vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
+ seq_printf(s, "VF%d", vf);
+ }
+ seq_puts(s, "\n");
+
+ seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
+ "RX" : "TX");
+ seq_printf(s, "\tinterface: %s\n",
+ rvu_dbg_get_intf_name(iter->intf));
+ seq_printf(s, "\tmcam entry: %d\n", iter->entry);
+
+ rvu_dbg_npc_mcam_show_flows(s, iter);
+ if (is_npc_intf_rx(iter->intf)) {
+ target = iter->rx_action.pf_func;
+ pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ seq_printf(s, "\tForward to: PF%d ", pf);
+
+ if (target & RVU_PFVF_FUNC_MASK) {
+ vf = (target & RVU_PFVF_FUNC_MASK) - 1;
+ seq_printf(s, "VF%d", vf);
+ }
+ seq_puts(s, "\n");
+ seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
+ seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
+ }
+
+ rvu_dbg_npc_mcam_show_action(s, iter);
+
+ enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
+ seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
+
+ if (!iter->has_cntr)
+ continue;
+ seq_printf(s, "\tcounter: %d\n", iter->cntr);
+
+ hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
+ seq_printf(s, "\thits: %lld\n", hits);
+ }
+ mutex_unlock(&mcam->lock);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
+
+static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
+ struct npc_exact_table_entry *cam_entry;
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ int i, j;
+
+ u8 bitmap = 0;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Check if there is at least one entry in mem table */
+ if (!table->mem_tbl_entry_cnt)
+ goto dump_cam_table;
+
+ /* Print table headers */
+ seq_puts(s, "\n\tExact Match MEM Table\n");
+ seq_puts(s, "Index\t");
+
+ for (i = 0; i < table->mem_table.ways; i++) {
+ mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
+ struct npc_exact_table_entry, list);
+
+ seq_printf(s, "Way-%d\t\t\t\t\t", i);
+ }
+
+ seq_puts(s, "\n");
+ for (i = 0; i < table->mem_table.ways; i++)
+ seq_puts(s, "\tChan MAC \t");
+
+ seq_puts(s, "\n\n");
+
+ /* Print mem table entries */
+ for (i = 0; i < table->mem_table.depth; i++) {
+ bitmap = 0;
+ for (j = 0; j < table->mem_table.ways; j++) {
+ if (!mem_entry[j])
+ continue;
+
+ if (mem_entry[j]->index != i)
+ continue;
+
+ bitmap |= BIT(j);
+ }
+
+ /* No valid entries */
+ if (!bitmap)
+ continue;
+
+ seq_printf(s, "%d\t", i);
+ for (j = 0; j < table->mem_table.ways; j++) {
+ if (!(bitmap & BIT(j))) {
+ seq_puts(s, "nil\t\t\t\t\t");
+ continue;
+ }
+
+ seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
+ mem_entry[j]->mac);
+ mem_entry[j] = list_next_entry(mem_entry[j], list);
+ }
+ seq_puts(s, "\n");
+ }
+
+dump_cam_table:
+
+ if (!table->cam_tbl_entry_cnt)
+ goto done;
+
+ seq_puts(s, "\n\tExact Match CAM Table\n");
+ seq_puts(s, "index\tchan\tMAC\n");
+
+ /* Traverse cam table entries */
+ list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
+ seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
+ cam_entry->mac);
+ }
+
+done:
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
+
+static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ int i;
+
+ table = rvu->hw->table;
+
+ seq_puts(s, "\n\tExact Table Info\n");
+ seq_printf(s, "Exact Match Feature : %s\n",
+ rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return 0;
+
+ seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
+ for (i = 0; i < table->num_drop_rules; i++)
+ seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
+
+ seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
+ for (i = 0; i < table->num_drop_rules; i++)
+ seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
+
+ seq_puts(s, "\n\tMEM Table Info\n");
+ seq_printf(s, "Ways : %d\n", table->mem_table.ways);
+ seq_printf(s, "Depth : %d\n", table->mem_table.depth);
+ seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
+ seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
+ seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
+
+ seq_puts(s, "\n\tCAM Table Info\n");
+ seq_printf(s, "Depth : %d\n", table->cam_table.depth);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
+
+static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ struct npc_key_field *field;
+ u16 chan, pcifunc;
+ int blkaddr, i;
+ u64 cfg, cam1;
+ char *str;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ table = rvu->hw->table;
+
+ field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
+
+ seq_puts(s, "\n\t Exact Hit on drop status\n");
+ seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
+
+ for (i = 0; i < table->num_drop_rules; i++) {
+ pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
+
+ /* channel will be always in keyword 0 */
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
+ chan = field->kw_mask[0] & cam1;
+
+ str = (cfg & 1) ? "enabled" : "disabled";
+
+ seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(table->counter_idx[i])),
+ chan, str);
+ }
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
+
+static void rvu_dbg_npc_init(struct rvu *rvu)
+{
+ rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
+
+ debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_mcam_info_fops);
+ debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_mcam_rules_fops);
+
+ debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_rx_miss_act_fops);
+
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return;
+
+ debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_entries_fops);
+
+ debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_info_fops);
+
+ debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_drop_cnt_fops);
+
+}
+
+static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
+{
+ struct cpt_ctx *ctx = filp->private;
+ u64 busy_sts = 0, free_sts = 0;
+ u32 e_min = 0, e_max = 0, e, i;
+ u16 max_ses, max_ies, max_aes;
+ struct rvu *rvu = ctx->rvu;
+ int blkaddr = ctx->blkaddr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ switch (eng_type) {
+ case CPT_AE_TYPE:
+ e_min = max_ses + max_ies;
+ e_max = max_ses + max_ies + max_aes;
+ break;
+ case CPT_SE_TYPE:
+ e_min = 0;
+ e_max = max_ses;
+ break;
+ case CPT_IE_TYPE:
+ e_min = max_ses;
+ e_max = max_ses + max_ies;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (e = e_min, i = 0; e < e_max; e++, i++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
+ if (reg & 0x1)
+ busy_sts |= 1ULL << i;
+
+ if (reg & 0x2)
+ free_sts |= 1ULL << i;
+ }
+ seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
+ seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
+
+ return 0;
+}
+
+static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
+{
+ return cpt_eng_sts_display(filp, CPT_AE_TYPE);
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
+
+static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
+{
+ return cpt_eng_sts_display(filp, CPT_SE_TYPE);
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
+
+static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
+{
+ return cpt_eng_sts_display(filp, CPT_IE_TYPE);
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
+
+static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
+{
+ struct cpt_ctx *ctx = filp->private;
+ u16 max_ses, max_ies, max_aes;
+ struct rvu *rvu = ctx->rvu;
+ int blkaddr = ctx->blkaddr;
+ u32 e_max, e;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ e_max = max_ses + max_ies + max_aes;
+
+ seq_puts(filp, "===========================================\n");
+ for (e = 0; e < e_max; e++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
+ seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
+ reg & 0xff);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
+ seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
+ reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
+ seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
+ reg);
+ seq_puts(filp, "===========================================\n");
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
+
+static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
+{
+ struct cpt_ctx *ctx = filp->private;
+ int blkaddr = ctx->blkaddr;
+ struct rvu *rvu = ctx->rvu;
+ struct rvu_block *block;
+ struct rvu_hwinfo *hw;
+ u64 reg;
+ u32 lf;
+
+ hw = rvu->hw;
+ block = &hw->block[blkaddr];
+ if (!block->lf.bmap)
+ return -ENODEV;
+
+ seq_puts(filp, "===========================================\n");
+ for (lf = 0; lf < block->lf.max; lf++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
+ seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
+ seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
+ seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift));
+ seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
+ seq_puts(filp, "===========================================\n");
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
+
+static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
+{
+ struct cpt_ctx *ctx = filp->private;
+ struct rvu *rvu = ctx->rvu;
+ int blkaddr = ctx->blkaddr;
+ u64 reg0, reg1;
+
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
+ seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
+ seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
+ seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
+ seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
+ seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
+ seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
+
+static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
+{
+ struct cpt_ctx *ctx = filp->private;
+ struct rvu *rvu = ctx->rvu;
+ int blkaddr = ctx->blkaddr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
+ seq_printf(filp, "CPT instruction requests %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
+ seq_printf(filp, "CPT instruction latency %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
+ seq_printf(filp, "CPT NCB read requests %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
+ seq_printf(filp, "CPT NCB read latency %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
+ seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
+ seq_printf(filp, "CPT active cycles pc %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
+ seq_printf(filp, "CPT clock count pc %llu\n", reg);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
+
+static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
+{
+ struct cpt_ctx *ctx;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+
+ if (blkaddr == BLKADDR_CPT0) {
+ rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
+ ctx = &rvu->rvu_dbg.cpt_ctx[0];
+ ctx->blkaddr = BLKADDR_CPT0;
+ ctx->rvu = rvu;
+ } else {
+ rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
+ rvu->rvu_dbg.root);
+ ctx = &rvu->rvu_dbg.cpt_ctx[1];
+ ctx->blkaddr = BLKADDR_CPT1;
+ ctx->rvu = rvu;
+ }
+
+ debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
+ &rvu_dbg_cpt_pc_fops);
+ debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
+ &rvu_dbg_cpt_ae_sts_fops);
+ debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
+ &rvu_dbg_cpt_se_sts_fops);
+ debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
+ &rvu_dbg_cpt_ie_sts_fops);
+ debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
+ &rvu_dbg_cpt_engines_info_fops);
+ debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
+ &rvu_dbg_cpt_lfs_info_fops);
+ debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
+ &rvu_dbg_cpt_err_info_fops);
+}
+
+static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
+{
+ if (!is_rvu_otx2(rvu))
+ return "cn10k";
+ else
+ return "octeontx2";
+}
+
+void rvu_dbg_init(struct rvu *rvu)
+{
+ rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
+
+ debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
+ &rvu_dbg_rsrc_status_fops);
+
+ if (!is_rvu_otx2(rvu))
+ debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_lmtst_map_table_fops);
+
+ if (!cgx_get_cgxcnt_max())
+ goto create;
+
+ if (is_rvu_otx2(rvu))
+ debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
+ else
+ debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
+
+create:
+ rvu_dbg_npa_init(rvu);
+ rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
+
+ rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
+ rvu_dbg_cgx_init(rvu);
+ rvu_dbg_npc_init(rvu);
+ rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
+ rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
+ rvu_dbg_mcs_init(rvu);
+}
+
+void rvu_dbg_exit(struct rvu *rvu)
+{
+ debugfs_remove_recursive(rvu->rvu_dbg.root);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
new file mode 100644
index 000000000..b9a4efb95
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -0,0 +1,1632 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function Devlink
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include<linux/bitfield.h>
+
+#include "rvu.h"
+#include "rvu_reg.h"
+#include "rvu_struct.h"
+#include "rvu_npc_hash.h"
+
+#define DRV_NAME "octeontx2-af"
+
+static int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ return devlink_fmsg_obj_nest_start(fmsg);
+}
+
+static int rvu_report_pair_end(struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return devlink_fmsg_pair_nest_end(fmsg);
+}
+
+static bool rvu_common_request_irq(struct rvu *rvu, int offset,
+ const char *name, irq_handler_t fn)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int rc;
+
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
+ rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
+ if (rc)
+ dev_warn(rvu->dev, "Failed to register %s irq\n", name);
+ else
+ rvu->irq_allocated[offset] = true;
+
+ return rvu->irq_allocated[offset];
+}
+
+static void rvu_nix_intr_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
+ "NIX_AF_RVU Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
+ nix_event_context->nix_af_rvu_int = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_gen_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
+ "NIX_AF_GEN Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
+ nix_event_context->nix_af_rvu_gen = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_err_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
+ "NIX_AF_ERR Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
+ nix_event_context->nix_af_rvu_err = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_ras_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
+ "NIX_AF_RAS Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
+ nix_event_context->nix_af_rvu_ras = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_unregister_interrupts(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int offs, i, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return;
+
+ offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!offs)
+ return;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
+
+ if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
+ rvu_dl);
+ rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
+ }
+
+ for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+static int rvu_nix_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, base;
+ bool rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* Get NIX AF MSIX vectors offset. */
+ base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!base) {
+ dev_warn(rvu->dev,
+ "Failed to get NIX%d NIX_AF_INT vector offsets\n",
+ blkaddr - BLKADDR_NIX0);
+ return 0;
+ }
+ /* Register and enable NIX_AF_RVU_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_RVU,
+ "NIX_AF_RVU_INT",
+ rvu_nix_af_rvu_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_GEN_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_GEN,
+ "NIX_AF_GEN_INT",
+ rvu_nix_af_rvu_gen_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_ERR_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
+ "NIX_AF_ERR_INT",
+ rvu_nix_af_rvu_err_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_RAS interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
+ "NIX_AF_RAS",
+ rvu_nix_af_rvu_ras_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_nix_unregister_interrupts(rvu);
+ return rc;
+}
+
+static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
+ enum nix_af_rvu_health health_reporter)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ u64 intr_val;
+ int err;
+
+ nix_event_context = ctx;
+ switch (health_reporter) {
+ case NIX_AF_RVU_INTR:
+ intr_val = nix_event_context->nix_af_rvu_int;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_RVU");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
+ nix_event_context->nix_af_rvu_int);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_GEN:
+ intr_val = nix_event_context->nix_af_rvu_gen;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
+ nix_event_context->nix_af_rvu_gen);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_ERR:
+ intr_val = nix_event_context->nix_af_rvu_err;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_ERR");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(14)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(13)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(12)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(6)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(5)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(3)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(2)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_RAS:
+ intr_val = nix_event_context->nix_af_rvu_err;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_RAS");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ if (err)
+ return err;
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(34)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(33)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(32)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(3)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
+
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(2)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
+}
+
+static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
+}
+
+static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_gen)
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
+}
+
+static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_err)
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
+}
+
+static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+RVU_REPORTERS(hw_nix_intr);
+RVU_REPORTERS(hw_nix_gen);
+RVU_REPORTERS(hw_nix_err);
+RVU_REPORTERS(hw_nix_ras);
+
+static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
+
+static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_nix_health_reporters *rvu_reporters;
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
+ if (!rvu_reporters)
+ return -ENOMEM;
+
+ rvu_dl->rvu_nix_health_reporter = rvu_reporters;
+ nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
+ if (!nix_event_context)
+ return -ENOMEM;
+
+ rvu_reporters->nix_event_ctx = nix_event_context;
+ rvu_reporters->rvu_hw_nix_intr_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_gen_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_err_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_ras_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
+ }
+
+ rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
+ if (!rvu_dl->devlink_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
+ INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
+ INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
+ INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
+
+ return 0;
+}
+
+static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
+{
+ struct rvu *rvu = rvu_dl->rvu;
+ int err;
+
+ err = rvu_nix_register_reporters(rvu_dl);
+ if (err) {
+ dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
+ err);
+ return err;
+ }
+ rvu_nix_register_interrupts(rvu);
+
+ return 0;
+}
+
+static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_nix_health_reporters *nix_reporters;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ nix_reporters = rvu_dl->rvu_nix_health_reporter;
+
+ if (!nix_reporters->rvu_hw_nix_ras_reporter)
+ return;
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
+
+ rvu_nix_unregister_interrupts(rvu);
+ kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
+ kfree(rvu_dl->rvu_nix_health_reporter);
+}
+
+static void rvu_npa_intr_work(struct work_struct *work)
+{
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+
+ rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
+ devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter,
+ "NPA_AF_RVU Error",
+ rvu_npa_health_reporter->npa_event_ctx);
+}
+
+static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
+ npa_event_context->npa_af_rvu_int = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_npa_gen_work(struct work_struct *work)
+{
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+
+ rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
+ devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter,
+ "NPA_AF_GEN Error",
+ rvu_npa_health_reporter->npa_event_ctx);
+}
+
+static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
+ npa_event_context->npa_af_rvu_gen = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_npa_err_work(struct work_struct *work)
+{
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+
+ rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
+ devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter,
+ "NPA_AF_ERR Error",
+ rvu_npa_health_reporter->npa_event_ctx);
+}
+
+static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+ npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
+ npa_event_context->npa_af_rvu_err = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_npa_ras_work(struct work_struct *work)
+{
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+
+ rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
+ devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter,
+ "HW NPA_AF_RAS Error reported",
+ rvu_npa_health_reporter->npa_event_ctx);
+}
+
+static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
+ npa_event_context->npa_af_rvu_ras = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_npa_unregister_interrupts(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int i, offs, blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return;
+
+ reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
+ offs = reg & 0x3FF;
+
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
+
+ for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+static int rvu_npa_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, base;
+ bool rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* Get NPA AF MSIX vectors offset. */
+ base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!base) {
+ dev_warn(rvu->dev,
+ "Failed to get NPA_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ /* Register and enable NPA_AF_RVU_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_RVU,
+ "NPA_AF_RVU_INT",
+ rvu_npa_af_rvu_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_GEN_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN,
+ "NPA_AF_RVU_GEN",
+ rvu_npa_af_gen_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_ERR_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR,
+ "NPA_AF_ERR_INT",
+ rvu_npa_af_err_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_RAS interrupt */
+ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON,
+ "NPA_AF_RAS",
+ rvu_npa_af_ras_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_npa_unregister_interrupts(rvu);
+ return rc;
+}
+
+static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
+ enum npa_af_rvu_health health_reporter)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ unsigned int alloc_dis, free_dis;
+ u64 intr_val;
+ int err;
+
+ npa_event_context = ctx;
+ switch (health_reporter) {
+ case NPA_AF_RVU_GEN:
+ intr_val = npa_event_context->npa_af_rvu_gen;
+ err = rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
+ npa_event_context->npa_af_rvu_gen);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(32)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
+ if (err)
+ return err;
+ }
+
+ free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
+ if (free_dis & BIT(NPA_INPQ_NIX0_RX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_NIX0_TX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_NIX1_RX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_NIX1_TX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_SSO)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_TIM)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_DPI)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_AURA_OP)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
+ if (err)
+ return err;
+ }
+
+ alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
+ if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_SSO)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_TIM)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_DPI)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NPA_AF_RVU_ERR:
+ err = rvu_report_pair_start(fmsg, "NPA_AF_ERR");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
+ npa_event_context->npa_af_rvu_err);
+ if (err)
+ return err;
+
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
+ if (err)
+ return err;
+ }
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
+ if (err)
+ return err;
+ }
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NPA_AF_RVU_RAS:
+ err = rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
+ npa_event_context->npa_af_rvu_ras);
+ if (err)
+ return err;
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
+ if (err)
+ return err;
+ }
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
+ if (err)
+ return err;
+ }
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NPA_AF_RVU_INTR:
+ err = rvu_report_pair_start(fmsg, "NPA_AF_RVU");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
+ npa_event_context->npa_af_rvu_int);
+ if (err)
+ return err;
+ if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
+ if (err)
+ return err;
+ }
+ return rvu_report_pair_end(fmsg);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_npa_event_ctx *npa_ctx;
+
+ npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+
+ return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) :
+ rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR);
+}
+
+static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_npa_event_ctx *npa_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (npa_event_ctx->npa_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_npa_event_ctx *npa_ctx;
+
+ npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+
+ return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) :
+ rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN);
+}
+
+static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_npa_event_ctx *npa_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (npa_event_ctx->npa_af_rvu_gen)
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_npa_event_ctx *npa_ctx;
+
+ npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+
+ return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) :
+ rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR);
+}
+
+static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_npa_event_ctx *npa_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (npa_event_ctx->npa_af_rvu_err)
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_npa_event_ctx *npa_ctx;
+
+ npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+
+ return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) :
+ rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS);
+}
+
+static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_npa_event_ctx *npa_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (npa_event_ctx->npa_af_rvu_ras)
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+RVU_REPORTERS(hw_npa_intr);
+RVU_REPORTERS(hw_npa_gen);
+RVU_REPORTERS(hw_npa_err);
+RVU_REPORTERS(hw_npa_ras);
+
+static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl);
+
+static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_npa_health_reporters *rvu_reporters;
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
+ if (!rvu_reporters)
+ return -ENOMEM;
+
+ rvu_dl->rvu_npa_health_reporter = rvu_reporters;
+ npa_event_context = kzalloc(sizeof(*npa_event_context), GFP_KERNEL);
+ if (!npa_event_context)
+ return -ENOMEM;
+
+ rvu_reporters->npa_event_ctx = npa_event_context;
+ rvu_reporters->rvu_hw_npa_intr_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter);
+ }
+
+ rvu_reporters->rvu_hw_npa_gen_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter);
+ }
+
+ rvu_reporters->rvu_hw_npa_err_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter);
+ }
+
+ rvu_reporters->rvu_hw_npa_ras_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter);
+ }
+
+ rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
+ if (!rvu_dl->devlink_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
+ INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
+ INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work);
+ INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
+
+ return 0;
+}
+
+static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
+{
+ struct rvu *rvu = rvu_dl->rvu;
+ int err;
+
+ err = rvu_npa_register_reporters(rvu_dl);
+ if (err) {
+ dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n",
+ err);
+ return err;
+ }
+ rvu_npa_register_interrupts(rvu);
+
+ return 0;
+}
+
+static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_npa_health_reporters *npa_reporters;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ npa_reporters = rvu_dl->rvu_npa_health_reporter;
+
+ if (!npa_reporters->rvu_hw_npa_ras_reporter)
+ return;
+ if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter))
+ devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter);
+
+ if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter))
+ devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter);
+
+ if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter))
+ devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter);
+
+ if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter))
+ devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter);
+
+ rvu_npa_unregister_interrupts(rvu);
+ kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx);
+ kfree(rvu_dl->rvu_npa_health_reporter);
+}
+
+static int rvu_health_reporters_create(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl;
+ int err;
+
+ rvu_dl = rvu->rvu_dl;
+ err = rvu_npa_health_reporters_create(rvu_dl);
+ if (err)
+ return err;
+
+ return rvu_nix_health_reporters_create(rvu_dl);
+}
+
+static void rvu_health_reporters_destroy(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl;
+
+ if (!rvu->rvu_dl)
+ return;
+
+ rvu_dl = rvu->rvu_dl;
+ rvu_npa_health_reporters_destroy(rvu_dl);
+ rvu_nix_health_reporters_destroy(rvu_dl);
+}
+
+/* Devlink Params APIs */
+static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ int dwrr_mtu = val.vu32;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.nix_common_dwrr_mtu) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Setting DWRR_MTU is not supported on this silicon");
+ return -EOPNOTSUPP;
+ }
+
+ if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
+ (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
+ return -EINVAL;
+ }
+
+ nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
+ if (!nix_hw)
+ return -ENODEV;
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Changing DWRR MTU is not supported when there are active NIXLFs");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Make sure none of the PF/VF interfaces are initialized and retry");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 dwrr_mtu;
+
+ dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
+ rvu_write64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU, dwrr_mtu);
+
+ return 0;
+}
+
+static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 dwrr_mtu;
+
+ if (!rvu->hw->cap.nix_common_dwrr_mtu)
+ return -EOPNOTSUPP;
+
+ dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
+ ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
+ return 0;
+}
+
+enum rvu_af_dl_param_id {
+ RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
+ RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+};
+
+static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ bool enabled;
+
+ enabled = rvu_npc_exact_has_match_table(rvu);
+
+ snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s",
+ enabled ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_npc_exact_disable_feature(rvu);
+
+ return 0;
+}
+
+static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 enable;
+
+ if (kstrtoull(val.vstr, 10, &enable)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only 1 value is supported");
+ return -EINVAL;
+ }
+
+ if (enable != 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only disabling exact match feature is supported");
+ return -EINVAL;
+ }
+
+ if (rvu_npc_exact_can_disable_feature(rvu))
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't disable exact match feature; Please try before any configuration");
+ return -EFAULT;
+}
+
+static const struct devlink_param rvu_af_dl_params[] = {
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
+ "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
+ rvu_af_dl_dwrr_mtu_validate),
+};
+
+static const struct devlink_param rvu_af_dl_param_exact_match[] = {
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+ "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_npc_exact_feature_get,
+ rvu_af_npc_exact_feature_disable,
+ rvu_af_npc_exact_feature_validate),
+};
+
+/* Devlink switch mode */
+static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_switch *rswitch;
+
+ rswitch = &rvu->rswitch;
+ *mode = rswitch->mode;
+
+ return 0;
+}
+
+static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_switch *rswitch;
+
+ rswitch = &rvu->rswitch;
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ if (rswitch->mode == mode)
+ return 0;
+ rswitch->mode = mode;
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ rvu_switch_enable(rvu);
+ else
+ rvu_switch_disable(rvu);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ return devlink_info_driver_name_put(req, DRV_NAME);
+}
+
+static const struct devlink_ops rvu_devlink_ops = {
+ .info_get = rvu_devlink_info_get,
+ .eswitch_mode_get = rvu_devlink_eswitch_mode_get,
+ .eswitch_mode_set = rvu_devlink_eswitch_mode_set,
+};
+
+int rvu_register_dl(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl;
+ struct devlink *dl;
+ int err;
+
+ dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
+ rvu->dev);
+ if (!dl) {
+ dev_warn(rvu->dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ rvu_dl = devlink_priv(dl);
+ rvu_dl->dl = dl;
+ rvu_dl->rvu = rvu;
+ rvu->rvu_dl = rvu_dl;
+
+ err = rvu_health_reporters_create(rvu);
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink health reporter creation failed with error %d\n", err);
+ goto err_dl_health;
+ }
+
+ err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink params register failed with error %d", err);
+ goto err_dl_health;
+ }
+
+ /* Register exact match devlink only for CN10K-B */
+ if (!rvu_npc_exact_has_match_table(rvu))
+ goto done;
+
+ err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
+ ARRAY_SIZE(rvu_af_dl_param_exact_match));
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink exact match params register failed with error %d", err);
+ goto err_dl_exact_match;
+ }
+
+done:
+ devlink_register(dl);
+ return 0;
+
+err_dl_exact_match:
+ devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+
+err_dl_health:
+ rvu_health_reporters_destroy(rvu);
+ devlink_free(dl);
+ return err;
+}
+
+void rvu_unregister_dl(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct devlink *dl = rvu_dl->dl;
+
+ devlink_unregister(dl);
+
+ devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+
+ /* Unregister exact match devlink only for CN10K-B */
+ if (rvu_npc_exact_has_match_table(rvu))
+ devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
+ ARRAY_SIZE(rvu_af_dl_param_exact_match));
+
+ rvu_health_reporters_destroy(rvu);
+ devlink_free(dl);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
new file mode 100644
index 000000000..51efe88dc
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function Devlink
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef RVU_DEVLINK_H
+#define RVU_DEVLINK_H
+
+#define RVU_REPORTERS(_name) \
+static const struct devlink_health_reporter_ops rvu_ ## _name ## _reporter_ops = { \
+ .name = #_name, \
+ .recover = rvu_ ## _name ## _recover, \
+ .dump = rvu_ ## _name ## _dump, \
+}
+
+enum npa_af_rvu_health {
+ NPA_AF_RVU_INTR,
+ NPA_AF_RVU_GEN,
+ NPA_AF_RVU_ERR,
+ NPA_AF_RVU_RAS,
+};
+
+struct rvu_npa_event_ctx {
+ u64 npa_af_rvu_int;
+ u64 npa_af_rvu_gen;
+ u64 npa_af_rvu_err;
+ u64 npa_af_rvu_ras;
+};
+
+struct rvu_npa_health_reporters {
+ struct rvu_npa_event_ctx *npa_event_ctx;
+ struct devlink_health_reporter *rvu_hw_npa_intr_reporter;
+ struct work_struct intr_work;
+ struct devlink_health_reporter *rvu_hw_npa_gen_reporter;
+ struct work_struct gen_work;
+ struct devlink_health_reporter *rvu_hw_npa_err_reporter;
+ struct work_struct err_work;
+ struct devlink_health_reporter *rvu_hw_npa_ras_reporter;
+ struct work_struct ras_work;
+};
+
+enum nix_af_rvu_health {
+ NIX_AF_RVU_INTR,
+ NIX_AF_RVU_GEN,
+ NIX_AF_RVU_ERR,
+ NIX_AF_RVU_RAS,
+};
+
+struct rvu_nix_event_ctx {
+ u64 nix_af_rvu_int;
+ u64 nix_af_rvu_gen;
+ u64 nix_af_rvu_err;
+ u64 nix_af_rvu_ras;
+};
+
+struct rvu_nix_health_reporters {
+ struct rvu_nix_event_ctx *nix_event_ctx;
+ struct devlink_health_reporter *rvu_hw_nix_intr_reporter;
+ struct work_struct intr_work;
+ struct devlink_health_reporter *rvu_hw_nix_gen_reporter;
+ struct work_struct gen_work;
+ struct devlink_health_reporter *rvu_hw_nix_err_reporter;
+ struct work_struct err_work;
+ struct devlink_health_reporter *rvu_hw_nix_ras_reporter;
+ struct work_struct ras_work;
+};
+
+struct rvu_devlink {
+ struct devlink *dl;
+ struct rvu *rvu;
+ struct workqueue_struct *devlink_wq;
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+};
+
+/* Devlink APIs */
+int rvu_register_dl(struct rvu *rvu);
+void rvu_unregister_dl(struct rvu *rvu);
+
+#endif /* RVU_DEVLINK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
new file mode 100644
index 000000000..bb99302ea
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -0,0 +1,5440 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "mcs.h"
+#include "cgx.h"
+#include "lmac_common.h"
+#include "rvu_npc_hash.h"
+
+static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
+static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
+ int type, int chan_id);
+static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
+ int type, bool add);
+static int nix_setup_ipolicers(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr);
+static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
+static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
+ struct nix_hw *nix_hw, u16 pcifunc);
+static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
+static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
+ u32 leaf_prof);
+static const char *nix_get_ctx_name(int ctype);
+
+enum mc_tbl_sz {
+ MC_TBL_SZ_256,
+ MC_TBL_SZ_512,
+ MC_TBL_SZ_1K,
+ MC_TBL_SZ_2K,
+ MC_TBL_SZ_4K,
+ MC_TBL_SZ_8K,
+ MC_TBL_SZ_16K,
+ MC_TBL_SZ_32K,
+ MC_TBL_SZ_64K,
+};
+
+enum mc_buf_cnt {
+ MC_BUF_CNT_8,
+ MC_BUF_CNT_16,
+ MC_BUF_CNT_32,
+ MC_BUF_CNT_64,
+ MC_BUF_CNT_128,
+ MC_BUF_CNT_256,
+ MC_BUF_CNT_512,
+ MC_BUF_CNT_1024,
+ MC_BUF_CNT_2048,
+};
+
+enum nix_makr_fmt_indexes {
+ NIX_MARK_CFG_IP_DSCP_RED,
+ NIX_MARK_CFG_IP_DSCP_YELLOW,
+ NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
+ NIX_MARK_CFG_IP_ECN_RED,
+ NIX_MARK_CFG_IP_ECN_YELLOW,
+ NIX_MARK_CFG_IP_ECN_YELLOW_RED,
+ NIX_MARK_CFG_VLAN_DEI_RED,
+ NIX_MARK_CFG_VLAN_DEI_YELLOW,
+ NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
+ NIX_MARK_CFG_MAX,
+};
+
+/* For now considering MC resources needed for broadcast
+ * pkt replication only. i.e 256 HWVFs + 12 PFs.
+ */
+#define MC_TBL_SIZE MC_TBL_SZ_512
+#define MC_BUF_CNT MC_BUF_CNT_128
+
+struct mce {
+ struct hlist_node node;
+ u16 pcifunc;
+};
+
+int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
+{
+ int i = 0;
+
+ /*If blkaddr is 0, return the first nix block address*/
+ if (blkaddr == 0)
+ return rvu->nix_blkaddr[blkaddr];
+
+ while (i + 1 < MAX_NIX_BLKS) {
+ if (rvu->nix_blkaddr[i] == blkaddr)
+ return rvu->nix_blkaddr[i + 1];
+ i++;
+ }
+
+ return 0;
+}
+
+bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return false;
+ return true;
+}
+
+int rvu_get_nixlf_count(struct rvu *rvu)
+{
+ int blkaddr = 0, max = 0;
+ struct rvu_block *block;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ block = &rvu->hw->block[blkaddr];
+ max += block->lf.max;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+ return max;
+}
+
+int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (*nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (nix_blkaddr)
+ *nix_blkaddr = blkaddr;
+
+ return 0;
+}
+
+int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
+ struct nix_hw **nix_hw, int *blkaddr)
+{
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || *blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
+ if (!*nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+ return 0;
+}
+
+static void nix_mce_list_init(struct nix_mce_list *list, int max)
+{
+ INIT_HLIST_HEAD(&list->head);
+ list->count = 0;
+ list->max = max;
+}
+
+static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
+{
+ int idx;
+
+ if (!mcast)
+ return 0;
+
+ idx = mcast->next_free_mce;
+ mcast->next_free_mce += count;
+ return idx;
+}
+
+struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
+{
+ int nix_blkaddr = 0, i = 0;
+ struct rvu *rvu = hw->rvu;
+
+ nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
+ while (nix_blkaddr) {
+ if (blkaddr == nix_blkaddr && hw->nix)
+ return &hw->nix[i];
+ nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
+ i++;
+ }
+ return NULL;
+}
+
+u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
+{
+ dwrr_mtu &= 0x1FULL;
+
+ /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
+ * Value of 4 is reserved for MTU value of 9728 bytes.
+ * Value of 5 is reserved for MTU value of 10240 bytes.
+ */
+ switch (dwrr_mtu) {
+ case 4:
+ return 9728;
+ case 5:
+ return 10240;
+ default:
+ return BIT_ULL(dwrr_mtu);
+ }
+
+ return 0;
+}
+
+u32 convert_bytes_to_dwrr_mtu(u32 bytes)
+{
+ /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
+ * Value of 4 is reserved for MTU value of 9728 bytes.
+ * Value of 5 is reserved for MTU value of 10240 bytes.
+ */
+ if (bytes > BIT_ULL(16))
+ return 0;
+
+ switch (bytes) {
+ case 9728:
+ return 4;
+ case 10240:
+ return 5;
+ default:
+ return ilog2(bytes);
+ }
+
+ return 0;
+}
+
+static void nix_rx_sync(struct rvu *rvu, int blkaddr)
+{
+ int err;
+
+ /* Sync all in flight RX packets to LLC/DRAM */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+ if (err)
+ dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
+
+ /* SW_SYNC ensures all existing transactions are finished and pkts
+ * are written to LLC/DRAM, queues should be teared down after
+ * successful SW_SYNC. Due to a HW errata, in some rare scenarios
+ * an existing transaction might end after SW_SYNC operation. To
+ * ensure operation is fully done, do the SW_SYNC twice.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+ if (err)
+ dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
+}
+
+static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
+ int lvl, u16 pcifunc, u16 schq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u16 map_func;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return false;
+
+ txsch = &nix_hw->txsch[lvl];
+ /* Check out of bounds */
+ if (schq >= txsch->schq.max)
+ return false;
+
+ mutex_lock(&rvu->rsrc_lock);
+ map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
+ mutex_unlock(&rvu->rsrc_lock);
+
+ /* TLs aggegating traffic are shared across PF and VFs */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
+ return false;
+ else
+ return true;
+ }
+
+ if (map_func != pcifunc)
+ return false;
+
+ return true;
+}
+
+static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
+ struct nix_lf_alloc_rsp *rsp, bool loop)
+{
+ struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u16 req_chan_base, req_chan_end, req_chan_cnt;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct sdp_node_info *sdp_info;
+ int pkind, pf, vf, lbkid, vfid;
+ u8 cgx_id, lmac_id;
+ bool from_vf;
+ int err;
+
+ pf = rvu_get_pf(pcifunc);
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
+ type != NIX_INTF_TYPE_SDP)
+ return 0;
+
+ switch (type) {
+ case NIX_INTF_TYPE_CGX:
+ pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
+ rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+
+ pkind = rvu_npc_get_pkind(rvu, pf);
+ if (pkind < 0) {
+ dev_err(rvu->dev,
+ "PF_Func 0x%x: Invalid pkind\n", pcifunc);
+ return -EINVAL;
+ }
+ pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
+ pfvf->tx_chan_base = pfvf->rx_chan_base;
+ pfvf->rx_chan_cnt = 1;
+ pfvf->tx_chan_cnt = 1;
+ rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
+
+ cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
+ rvu_npc_set_pkind(rvu, pkind, pfvf);
+
+ break;
+ case NIX_INTF_TYPE_LBK:
+ vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+
+ /* If NIX1 block is present on the silicon then NIXes are
+ * assigned alternatively for lbk interfaces. NIX0 should
+ * send packets on lbk link 1 channels and NIX1 should send
+ * on lbk link 0 channels for the communication between
+ * NIX0 and NIX1.
+ */
+ lbkid = 0;
+ if (rvu->hw->lbk_links > 1)
+ lbkid = vf & 0x1 ? 0 : 1;
+
+ /* By default NIX0 is configured to send packet on lbk link 1
+ * (which corresponds to LBK1), same packet will receive on
+ * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
+ * (which corresponds to LBK2) packet will receive on NIX0 lbk
+ * link 1.
+ * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
+ * transmits and receives on lbk link 0, whick corresponds
+ * to LBK1 block, back to back connectivity between NIX and
+ * LBK can be achieved (which is similar to 96xx)
+ *
+ * RX TX
+ * NIX0 lbk link 1 (LBK2) 1 (LBK1)
+ * NIX0 lbk link 0 (LBK0) 0 (LBK0)
+ * NIX1 lbk link 0 (LBK1) 0 (LBK2)
+ * NIX1 lbk link 1 (LBK3) 1 (LBK3)
+ */
+ if (loop)
+ lbkid = !lbkid;
+
+ /* Note that AF's VFs work in pairs and talk over consecutive
+ * loopback channels.Therefore if odd number of AF VFs are
+ * enabled then the last VF remains with no pair.
+ */
+ pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
+ pfvf->tx_chan_base = vf & 0x1 ?
+ rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
+ rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
+ pfvf->rx_chan_cnt = 1;
+ pfvf->tx_chan_cnt = 1;
+ rsp->tx_link = hw->cgx_links + lbkid;
+ pfvf->lbkid = lbkid;
+ rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base,
+ pfvf->rx_chan_cnt);
+
+ break;
+ case NIX_INTF_TYPE_SDP:
+ from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
+ parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ sdp_info = parent_pf->sdp_info;
+ if (!sdp_info) {
+ dev_err(rvu->dev, "Invalid sdp_info pointer\n");
+ return -EINVAL;
+ }
+ if (from_vf) {
+ req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
+ sdp_info->num_pf_rings;
+ vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+ for (vfid = 0; vfid < vf; vfid++)
+ req_chan_base += sdp_info->vf_rings[vfid];
+ req_chan_cnt = sdp_info->vf_rings[vf];
+ req_chan_end = req_chan_base + req_chan_cnt - 1;
+ if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
+ req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
+ dev_err(rvu->dev,
+ "PF_Func 0x%x: Invalid channel base and count\n",
+ pcifunc);
+ return -EINVAL;
+ }
+ } else {
+ req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
+ req_chan_cnt = sdp_info->num_pf_rings;
+ }
+
+ pfvf->rx_chan_base = req_chan_base;
+ pfvf->rx_chan_cnt = req_chan_cnt;
+ pfvf->tx_chan_base = pfvf->rx_chan_base;
+ pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
+
+ rsp->tx_link = hw->cgx_links + hw->lbk_links;
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base,
+ pfvf->rx_chan_cnt);
+ break;
+ }
+
+ /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
+ * RVU PF/VF's MAC address.
+ */
+ rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, pfvf->mac_addr);
+
+ /* Add this PF_FUNC to bcast pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
+ if (err) {
+ dev_err(rvu->dev,
+ "Bcast list, failed to enable PF_FUNC 0x%x\n",
+ pcifunc);
+ return err;
+ }
+ /* Install MCAM rule matching Ethernet broadcast mac address */
+ rvu_npc_install_bcast_match_entry(rvu, pcifunc,
+ nixlf, pfvf->rx_chan_base);
+
+ pfvf->maxlen = NIC_HW_MIN_FRS;
+ pfvf->minlen = NIC_HW_MIN_FRS;
+
+ return 0;
+}
+
+static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int err;
+
+ pfvf->maxlen = 0;
+ pfvf->minlen = 0;
+
+ /* Remove this PF_FUNC from bcast pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
+ if (err) {
+ dev_err(rvu->dev,
+ "Bcast list, failed to disable PF_FUNC 0x%x\n",
+ pcifunc);
+ }
+
+ /* Free and disable any MCAM entries used by this NIX LF */
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+
+ /* Disable DMAC filters used */
+ rvu_cgx_disable_dmac_entries(rvu, pcifunc);
+}
+
+int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, pf, type;
+ u16 chan_base, chan;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ return 0;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+
+ chan_base = pfvf->rx_chan_base + req->chan_base;
+ for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ cfg & ~BIT_ULL(16));
+ }
+ return 0;
+}
+
+static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
+ int type, int chan_id)
+{
+ int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
+ u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ lmac_chan_cnt = cfg & 0xFF;
+
+ cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
+ lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ sdp_chan_cnt = cfg & 0xFFF;
+ sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ /* Backpressure IDs range division
+ * CGX channles are mapped to (0 - 191) BPIDs
+ * LBK channles are mapped to (192 - 255) BPIDs
+ * SDP channles are mapped to (256 - 511) BPIDs
+ *
+ * Lmac channles and bpids mapped as follows
+ * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
+ * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
+ * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
+ */
+ switch (type) {
+ case NIX_INTF_TYPE_CGX:
+ if ((req->chan_base + req->chan_cnt) > 16)
+ return -EINVAL;
+ rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+ /* Assign bpid based on cgx, lmac and chan id */
+ bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
+ (lmac_id * lmac_chan_cnt) + req->chan_base;
+
+ if (req->bpid_per_chan)
+ bpid += chan_id;
+ if (bpid > cgx_bpid_cnt)
+ return -EINVAL;
+ break;
+
+ case NIX_INTF_TYPE_LBK:
+ if ((req->chan_base + req->chan_cnt) > 63)
+ return -EINVAL;
+ bpid = cgx_bpid_cnt + req->chan_base;
+ if (req->bpid_per_chan)
+ bpid += chan_id;
+ if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
+ return -EINVAL;
+ break;
+ case NIX_INTF_TYPE_SDP:
+ if ((req->chan_base + req->chan_cnt) > 255)
+ return -EINVAL;
+
+ bpid = sdp_bpid_cnt + req->chan_base;
+ if (req->bpid_per_chan)
+ bpid += chan_id;
+
+ if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return bpid;
+}
+
+int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ int blkaddr, pf, type, chan_id = 0;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ u16 chan_base, chan;
+ s16 bpid, bpid_base;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ if (is_sdp_pfvf(pcifunc))
+ type = NIX_INTF_TYPE_SDP;
+
+ /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
+ type != NIX_INTF_TYPE_SDP)
+ return 0;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+
+ bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
+ chan_base = pfvf->rx_chan_base + req->chan_base;
+ bpid = bpid_base;
+
+ for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
+ if (bpid < 0) {
+ dev_warn(rvu->dev, "Fail to enable backpressure\n");
+ return -EINVAL;
+ }
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ cfg &= ~GENMASK_ULL(8, 0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
+ chan_id++;
+ bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
+ }
+
+ for (chan = 0; chan < req->chan_cnt; chan++) {
+ /* Map channel and bpid assign to it */
+ rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
+ (bpid_base & 0x3FF);
+ if (req->bpid_per_chan)
+ bpid_base++;
+ }
+ rsp->chan_cnt = req->chan_cnt;
+
+ return 0;
+}
+
+static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
+ u64 format, bool v4, u64 *fidx)
+{
+ struct nix_lso_format field = {0};
+
+ /* IP's Length field */
+ field.layer = NIX_TXLAYER_OL3;
+ /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
+ field.offset = v4 ? 2 : 4;
+ field.sizem1 = 1; /* i.e 2 bytes */
+ field.alg = NIX_LSOALG_ADD_PAYLEN;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+
+ /* No ID field in IPv6 header */
+ if (!v4)
+ return;
+
+ /* IP's ID field */
+ field.layer = NIX_TXLAYER_OL3;
+ field.offset = 4;
+ field.sizem1 = 1; /* i.e 2 bytes */
+ field.alg = NIX_LSOALG_ADD_SEGNUM;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+}
+
+static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
+ u64 format, u64 *fidx)
+{
+ struct nix_lso_format field = {0};
+
+ /* TCP's sequence number field */
+ field.layer = NIX_TXLAYER_OL4;
+ field.offset = 4;
+ field.sizem1 = 3; /* i.e 4 bytes */
+ field.alg = NIX_LSOALG_ADD_OFFSET;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+
+ /* TCP's flags field */
+ field.layer = NIX_TXLAYER_OL4;
+ field.offset = 12;
+ field.sizem1 = 1; /* 2 bytes */
+ field.alg = NIX_LSOALG_TCP_FLAGS;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+}
+
+static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+ u64 cfg, idx, fidx = 0;
+
+ /* Get max HW supported format indices */
+ cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
+ nix_hw->lso.total = cfg;
+
+ /* Enable LSO */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
+ /* For TSO, set first and middle segment flags to
+ * mask out PSH, RST & FIN flags in TCP packet
+ */
+ cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
+ cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
+ rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
+
+ /* Setup default static LSO formats
+ *
+ * Configure format fields for TCPv4 segmentation offload
+ */
+ idx = NIX_LSO_FORMAT_IDX_TSOV4;
+ nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
+ nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
+
+ /* Set rest of the fields to NOP */
+ for (; fidx < 8; fidx++) {
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
+ }
+ nix_hw->lso.in_use++;
+
+ /* Configure format fields for TCPv6 segmentation offload */
+ idx = NIX_LSO_FORMAT_IDX_TSOV6;
+ fidx = 0;
+ nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
+ nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
+
+ /* Set rest of the fields to NOP */
+ for (; fidx < 8; fidx++) {
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
+ }
+ nix_hw->lso.in_use++;
+}
+
+static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
+{
+ kfree(pfvf->rq_bmap);
+ kfree(pfvf->sq_bmap);
+ kfree(pfvf->cq_bmap);
+ if (pfvf->rq_ctx)
+ qmem_free(rvu->dev, pfvf->rq_ctx);
+ if (pfvf->sq_ctx)
+ qmem_free(rvu->dev, pfvf->sq_ctx);
+ if (pfvf->cq_ctx)
+ qmem_free(rvu->dev, pfvf->cq_ctx);
+ if (pfvf->rss_ctx)
+ qmem_free(rvu->dev, pfvf->rss_ctx);
+ if (pfvf->nix_qints_ctx)
+ qmem_free(rvu->dev, pfvf->nix_qints_ctx);
+ if (pfvf->cq_ints_ctx)
+ qmem_free(rvu->dev, pfvf->cq_ints_ctx);
+
+ pfvf->rq_bmap = NULL;
+ pfvf->cq_bmap = NULL;
+ pfvf->sq_bmap = NULL;
+ pfvf->rq_ctx = NULL;
+ pfvf->sq_ctx = NULL;
+ pfvf->cq_ctx = NULL;
+ pfvf->rss_ctx = NULL;
+ pfvf->nix_qints_ctx = NULL;
+ pfvf->cq_ints_ctx = NULL;
+}
+
+static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
+ struct rvu_pfvf *pfvf, int nixlf,
+ int rss_sz, int rss_grps, int hwctx_size,
+ u64 way_mask, bool tag_lsb_as_adder)
+{
+ int err, grp, num_indices;
+ u64 val;
+
+ /* RSS is not requested for this NIXLF */
+ if (!rss_sz)
+ return 0;
+ num_indices = rss_sz * rss_grps;
+
+ /* Alloc NIX RSS HW context memory and config the base */
+ err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
+ (u64)pfvf->rss_ctx->iova);
+
+ /* Config full RSS table size, enable RSS and caching */
+ val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
+ ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
+
+ if (tag_lsb_as_adder)
+ val |= BIT_ULL(5);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
+ /* Config RSS group offset and sizes */
+ for (grp = 0; grp < rss_grps; grp++)
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
+ ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
+ return 0;
+}
+
+static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ struct nix_aq_inst_s *inst)
+{
+ struct admin_queue *aq = block->aq;
+ struct nix_aq_res_s *result;
+ int timeout = 1000;
+ u64 reg, head;
+ int ret;
+
+ result = (struct nix_aq_res_s *)aq->res->base;
+
+ /* Get current head pointer where to append this instruction */
+ reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
+ head = (reg >> 4) & AQ_PTR_MASK;
+
+ memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
+ (void *)inst, aq->inst->entry_sz);
+ memset(result, 0, sizeof(*result));
+ /* sync into memory */
+ wmb();
+
+ /* Ring the doorbell and wait for result */
+ rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
+ while (result->compcode == NIX_AQ_COMP_NOTDONE) {
+ cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ }
+
+ if (result->compcode != NIX_AQ_COMP_GOOD) {
+ /* TODO: Replace this with some error code */
+ if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
+ result->compcode == NIX_AQ_COMP_LOCKERR ||
+ result->compcode == NIX_AQ_COMP_CTX_POISON) {
+ ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
+ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
+ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
+ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
+ if (ret)
+ dev_err(rvu->dev,
+ "%s: Not able to unlock cachelines\n", __func__);
+ }
+
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
+ u16 *smq, u16 *smq_mask)
+{
+ struct nix_cn10k_aq_enq_req *aq_req;
+
+ if (!is_rvu_otx2(rvu)) {
+ aq_req = (struct nix_cn10k_aq_enq_req *)req;
+ *smq = aq_req->sq.smq;
+ *smq_mask = aq_req->sq_mask.smq;
+ } else {
+ *smq = req->sq.smq;
+ *smq_mask = req->sq_mask.smq;
+ }
+}
+
+static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int nixlf, blkaddr, rc = 0;
+ struct nix_aq_inst_s inst;
+ struct rvu_block *block;
+ struct admin_queue *aq;
+ struct rvu_pfvf *pfvf;
+ u16 smq, smq_mask;
+ void *ctx, *mask;
+ bool ena;
+ u64 cfg;
+
+ blkaddr = nix_hw->blkaddr;
+ block = &hw->block[blkaddr];
+ aq = block->aq;
+ if (!aq) {
+ dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
+ return NIX_AF_ERR_AQ_ENQUEUE;
+ }
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+
+ /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
+ * operations done by AF itself.
+ */
+ if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
+ (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
+ if (!pfvf->nixlf || nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+ }
+
+ switch (req->ctype) {
+ case NIX_AQ_CTYPE_RQ:
+ /* Check if index exceeds max no of queues */
+ if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_SQ:
+ if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_CQ:
+ if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_RSS:
+ /* Check if RSS is enabled and qidx is within range */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
+ if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
+ (req->qidx >= (256UL << (cfg & 0xF))))
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_MCE:
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
+
+ /* Check if index exceeds MCE list length */
+ if (!nix_hw->mcast.mce_ctx ||
+ (req->qidx >= (256UL << (cfg & 0xF))))
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+
+ /* Adding multicast lists for requests from PF/VFs is not
+ * yet supported, so ignore this.
+ */
+ if (rsp)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_BANDPROF:
+ if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
+ nix_hw, pcifunc))
+ rc = NIX_AF_ERR_INVALID_BANDPROF;
+ break;
+ default:
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ }
+
+ if (rc)
+ return rc;
+
+ nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
+ /* Check if SQ pointed SMQ belongs to this PF/VF or not */
+ if (req->ctype == NIX_AQ_CTYPE_SQ &&
+ ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
+ (req->op == NIX_AQ_INSTOP_WRITE &&
+ req->sq_mask.ena && req->sq.ena && smq_mask))) {
+ if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
+ pcifunc, smq))
+ return NIX_AF_ERR_AQ_ENQUEUE;
+ }
+
+ memset(&inst, 0, sizeof(struct nix_aq_inst_s));
+ inst.lf = nixlf;
+ inst.cindex = req->qidx;
+ inst.ctype = req->ctype;
+ inst.op = req->op;
+ /* Currently we are not supporting enqueuing multiple instructions,
+ * so always choose first entry in result memory.
+ */
+ inst.res_addr = (u64)aq->res->iova;
+
+ /* Hardware uses same aq->res->base for updating result of
+ * previous instruction hence wait here till it is done.
+ */
+ spin_lock(&aq->lock);
+
+ /* Clean result + context memory */
+ memset(aq->res->base, 0, aq->res->entry_sz);
+ /* Context needs to be written at RES_ADDR + 128 */
+ ctx = aq->res->base + 128;
+ /* Mask needs to be written at RES_ADDR + 256 */
+ mask = aq->res->base + 256;
+
+ switch (req->op) {
+ case NIX_AQ_INSTOP_WRITE:
+ if (req->ctype == NIX_AQ_CTYPE_RQ)
+ memcpy(mask, &req->rq_mask,
+ sizeof(struct nix_rq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_SQ)
+ memcpy(mask, &req->sq_mask,
+ sizeof(struct nix_sq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_CQ)
+ memcpy(mask, &req->cq_mask,
+ sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_RSS)
+ memcpy(mask, &req->rss_mask,
+ sizeof(struct nix_rsse_s));
+ else if (req->ctype == NIX_AQ_CTYPE_MCE)
+ memcpy(mask, &req->mce_mask,
+ sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(mask, &req->prof_mask,
+ sizeof(struct nix_bandprof_s));
+ fallthrough;
+ case NIX_AQ_INSTOP_INIT:
+ if (req->ctype == NIX_AQ_CTYPE_RQ)
+ memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_SQ)
+ memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_CQ)
+ memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_RSS)
+ memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
+ else if (req->ctype == NIX_AQ_CTYPE_MCE)
+ memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
+ break;
+ case NIX_AQ_INSTOP_NOP:
+ case NIX_AQ_INSTOP_READ:
+ case NIX_AQ_INSTOP_LOCK:
+ case NIX_AQ_INSTOP_UNLOCK:
+ break;
+ default:
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ spin_unlock(&aq->lock);
+ return rc;
+ }
+
+ /* Submit the instruction to AQ */
+ rc = nix_aq_enqueue_wait(rvu, block, &inst);
+ if (rc) {
+ spin_unlock(&aq->lock);
+ return rc;
+ }
+
+ /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
+ if (req->op == NIX_AQ_INSTOP_INIT) {
+ if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
+ __set_bit(req->qidx, pfvf->rq_bmap);
+ if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
+ __set_bit(req->qidx, pfvf->sq_bmap);
+ if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
+ __set_bit(req->qidx, pfvf->cq_bmap);
+ }
+
+ if (req->op == NIX_AQ_INSTOP_WRITE) {
+ if (req->ctype == NIX_AQ_CTYPE_RQ) {
+ ena = (req->rq.ena & req->rq_mask.ena) |
+ (test_bit(req->qidx, pfvf->rq_bmap) &
+ ~req->rq_mask.ena);
+ if (ena)
+ __set_bit(req->qidx, pfvf->rq_bmap);
+ else
+ __clear_bit(req->qidx, pfvf->rq_bmap);
+ }
+ if (req->ctype == NIX_AQ_CTYPE_SQ) {
+ ena = (req->rq.ena & req->sq_mask.ena) |
+ (test_bit(req->qidx, pfvf->sq_bmap) &
+ ~req->sq_mask.ena);
+ if (ena)
+ __set_bit(req->qidx, pfvf->sq_bmap);
+ else
+ __clear_bit(req->qidx, pfvf->sq_bmap);
+ }
+ if (req->ctype == NIX_AQ_CTYPE_CQ) {
+ ena = (req->rq.ena & req->cq_mask.ena) |
+ (test_bit(req->qidx, pfvf->cq_bmap) &
+ ~req->cq_mask.ena);
+ if (ena)
+ __set_bit(req->qidx, pfvf->cq_bmap);
+ else
+ __clear_bit(req->qidx, pfvf->cq_bmap);
+ }
+ }
+
+ if (rsp) {
+ /* Copy read context into mailbox */
+ if (req->op == NIX_AQ_INSTOP_READ) {
+ if (req->ctype == NIX_AQ_CTYPE_RQ)
+ memcpy(&rsp->rq, ctx,
+ sizeof(struct nix_rq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_SQ)
+ memcpy(&rsp->sq, ctx,
+ sizeof(struct nix_sq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_CQ)
+ memcpy(&rsp->cq, ctx,
+ sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_RSS)
+ memcpy(&rsp->rss, ctx,
+ sizeof(struct nix_rsse_s));
+ else if (req->ctype == NIX_AQ_CTYPE_MCE)
+ memcpy(&rsp->mce, ctx,
+ sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(&rsp->prof, ctx,
+ sizeof(struct nix_bandprof_s));
+ }
+ }
+
+ spin_unlock(&aq->lock);
+ return 0;
+}
+
+static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_aq_enq_req *req, u8 ctype)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ int rc, word;
+
+ if (req->ctype != NIX_AQ_CTYPE_CQ)
+ return 0;
+
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ req->hdr.pcifunc, ctype, req->qidx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
+ __func__, nix_get_ctx_name(ctype), req->qidx,
+ req->hdr.pcifunc);
+ return rc;
+ }
+
+ /* Make copy of original context & mask which are required
+ * for resubmission
+ */
+ memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
+ memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
+
+ /* exclude fields which HW can update */
+ aq_req.cq_mask.cq_err = 0;
+ aq_req.cq_mask.wrptr = 0;
+ aq_req.cq_mask.tail = 0;
+ aq_req.cq_mask.head = 0;
+ aq_req.cq_mask.avg_level = 0;
+ aq_req.cq_mask.update_time = 0;
+ aq_req.cq_mask.substream = 0;
+
+ /* Context mask (cq_mask) holds mask value of fields which
+ * are changed in AQ WRITE operation.
+ * for example cq.drop = 0xa;
+ * cq_mask.drop = 0xff;
+ * Below logic performs '&' between cq and cq_mask so that non
+ * updated fields are masked out for request and response
+ * comparison
+ */
+ for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
+ word++) {
+ *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
+ (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
+ *(u64 *)((u8 *)&aq_req.cq + word * 8) &=
+ (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
+ }
+
+ if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
+ return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
+
+ return 0;
+}
+
+static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_hw *nix_hw;
+ int err, retries = 5;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+retry:
+ err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
+
+ /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
+ * As a work around perfrom CQ context read after each AQ write. If AQ
+ * read shows AQ write is not updated perform AQ write again.
+ */
+ if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
+ err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
+ if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
+ if (retries--)
+ goto retry;
+ else
+ return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
+ }
+ }
+
+ return err;
+}
+
+static const char *nix_get_ctx_name(int ctype)
+{
+ switch (ctype) {
+ case NIX_AQ_CTYPE_CQ:
+ return "CQ";
+ case NIX_AQ_CTYPE_SQ:
+ return "SQ";
+ case NIX_AQ_CTYPE_RQ:
+ return "RQ";
+ case NIX_AQ_CTYPE_RSS:
+ return "RSS";
+ }
+ return "";
+}
+
+static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ struct nix_aq_enq_req aq_req;
+ unsigned long *bmap;
+ int qidx, q_cnt = 0;
+ int err = 0, rc;
+
+ if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
+ return NIX_AF_ERR_AQ_ENQUEUE;
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = req->hdr.pcifunc;
+
+ if (req->ctype == NIX_AQ_CTYPE_CQ) {
+ aq_req.cq.ena = 0;
+ aq_req.cq_mask.ena = 1;
+ aq_req.cq.bp_ena = 0;
+ aq_req.cq_mask.bp_ena = 1;
+ q_cnt = pfvf->cq_ctx->qsize;
+ bmap = pfvf->cq_bmap;
+ }
+ if (req->ctype == NIX_AQ_CTYPE_SQ) {
+ aq_req.sq.ena = 0;
+ aq_req.sq_mask.ena = 1;
+ q_cnt = pfvf->sq_ctx->qsize;
+ bmap = pfvf->sq_bmap;
+ }
+ if (req->ctype == NIX_AQ_CTYPE_RQ) {
+ aq_req.rq.ena = 0;
+ aq_req.rq_mask.ena = 1;
+ q_cnt = pfvf->rq_ctx->qsize;
+ bmap = pfvf->rq_bmap;
+ }
+
+ aq_req.ctype = req->ctype;
+ aq_req.op = NIX_AQ_INSTOP_WRITE;
+
+ for (qidx = 0; qidx < q_cnt; qidx++) {
+ if (!test_bit(qidx, bmap))
+ continue;
+ aq_req.qidx = qidx;
+ rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
+ if (rc) {
+ err = rc;
+ dev_err(rvu->dev, "Failed to disable %s:%d context\n",
+ nix_get_ctx_name(req->ctype), qidx);
+ }
+ }
+
+ return err;
+}
+
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
+{
+ struct nix_aq_enq_req lock_ctx_req;
+ int err;
+
+ if (req->op != NIX_AQ_INSTOP_INIT)
+ return 0;
+
+ if (req->ctype == NIX_AQ_CTYPE_MCE ||
+ req->ctype == NIX_AQ_CTYPE_DYNO)
+ return 0;
+
+ memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
+ lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
+ lock_ctx_req.ctype = req->ctype;
+ lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
+ lock_ctx_req.qidx = req->qidx;
+ err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
+ if (err)
+ dev_err(rvu->dev,
+ "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
+ req->hdr.pcifunc,
+ nix_get_ctx_name(req->ctype), req->qidx);
+ return err;
+}
+
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ int err;
+
+ err = rvu_nix_aq_enq_inst(rvu, req, rsp);
+ if (!err)
+ err = nix_lf_hwctx_lockdown(rvu, req);
+ return err;
+}
+#else
+
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ return rvu_nix_aq_enq_inst(rvu, req, rsp);
+}
+#endif
+/* CN10K mbox handler */
+int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
+ struct nix_cn10k_aq_enq_req *req,
+ struct nix_cn10k_aq_enq_rsp *rsp)
+{
+ return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
+ (struct nix_aq_enq_rsp *)rsp);
+}
+
+int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
+ struct hwctx_disable_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_lf_hwctx_disable(rvu, req);
+}
+
+int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
+ struct nix_lf_alloc_req *req,
+ struct nix_lf_alloc_rsp *rsp)
+{
+ int nixlf, qints, hwctx_size, intf, err, rc = 0;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ u64 cfg, ctx_cfg;
+ int blkaddr;
+
+ if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
+ return NIX_AF_ERR_PARAM;
+
+ if (req->way_mask)
+ req->way_mask &= 0xFFFF;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
+ if (req->npa_func) {
+ /* If default, use 'this' NIXLF's PFFUNC */
+ if (req->npa_func == RVU_DEFAULT_PF_FUNC)
+ req->npa_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
+ return NIX_AF_INVAL_NPA_PF_FUNC;
+ }
+
+ /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
+ if (req->sso_func) {
+ /* If default, use 'this' NIXLF's PFFUNC */
+ if (req->sso_func == RVU_DEFAULT_PF_FUNC)
+ req->sso_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
+ return NIX_AF_INVAL_SSO_PF_FUNC;
+ }
+
+ /* If RSS is being enabled, check if requested config is valid.
+ * RSS table size should be power of two, otherwise
+ * RSS_GRP::OFFSET + adder might go beyond that group or
+ * won't be able to use entire table.
+ */
+ if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
+ !is_power_of_2(req->rss_sz)))
+ return NIX_AF_ERR_RSS_SIZE_INVALID;
+
+ if (req->rss_sz &&
+ (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
+ return NIX_AF_ERR_RSS_GRPS_INVALID;
+
+ /* Reset this NIX LF */
+ err = rvu_lf_reset(rvu, block, nixlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
+ block->addr - BLKADDR_NIX0, nixlf);
+ return NIX_AF_ERR_LF_RESET;
+ }
+
+ ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
+
+ /* Alloc NIX RQ HW context memory and config the base */
+ hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
+ if (!pfvf->rq_bmap)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
+ (u64)pfvf->rq_ctx->iova);
+
+ /* Set caching and queue count in HW */
+ cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
+
+ /* Alloc NIX SQ HW context memory and config the base */
+ hwctx_size = 1UL << (ctx_cfg & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
+ if (!pfvf->sq_bmap)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
+ (u64)pfvf->sq_ctx->iova);
+
+ cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
+
+ /* Alloc NIX CQ HW context memory and config the base */
+ hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
+ if (!pfvf->cq_bmap)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
+ (u64)pfvf->cq_ctx->iova);
+
+ cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
+
+ /* Initialize receive side scaling (RSS) */
+ hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
+ err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
+ req->rss_grps, hwctx_size, req->way_mask,
+ !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
+ if (err)
+ goto free_mem;
+
+ /* Alloc memory for CQINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 24) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
+ (u64)pfvf->cq_ints_ctx->iova);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
+ BIT_ULL(36) | req->way_mask << 20);
+
+ /* Alloc memory for QINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 12) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
+ (u64)pfvf->nix_qints_ctx->iova);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
+ BIT_ULL(36) | req->way_mask << 20);
+
+ /* Setup VLANX TPID's.
+ * Use VLAN1 for 802.1Q
+ * and VLAN0 for 802.1AD.
+ */
+ cfg = (0x8100ULL << 16) | 0x88A8ULL;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+
+ /* Enable LMTST for this NIX LF */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
+
+ /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
+ if (req->npa_func)
+ cfg = req->npa_func;
+ if (req->sso_func)
+ cfg |= (u64)req->sso_func << 16;
+
+ cfg |= (u64)req->xqe_sz << 33;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
+
+ /* Config Rx pkt length, csum checks and apad enable / disable */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
+
+ /* Configure pkind for TX parse config */
+ cfg = NPC_TX_DEF_PKIND;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
+
+ intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ if (is_sdp_pfvf(pcifunc))
+ intf = NIX_INTF_TYPE_SDP;
+
+ err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
+ !!(req->flags & NIX_LF_LBK_BLK_SEL));
+ if (err)
+ goto free_mem;
+
+ /* Disable NPC entries as NIXLF's contexts are not initialized yet */
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+
+ /* Configure RX VTAG Type 7 (strip) for vf vlan */
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
+ VTAGSIZE_T4 | VTAG_STRIP);
+
+ goto exit;
+
+free_mem:
+ nix_ctx_free(rvu, pfvf);
+ rc = -ENOMEM;
+
+exit:
+ /* Set macaddr of this PF/VF */
+ ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
+
+ /* set SQB size info */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
+ rsp->sqb_size = (cfg >> 34) & 0xFFFF;
+ rsp->rx_chan_base = pfvf->rx_chan_base;
+ rsp->tx_chan_base = pfvf->tx_chan_base;
+ rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
+ rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
+ rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
+ rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
+ /* Get HW supported stat count */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
+ rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
+ /* Get count of CQ IRQs and error IRQs supported per LF */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ rsp->qints = ((cfg >> 12) & 0xFFF);
+ rsp->cints = ((cfg >> 24) & 0xFFF);
+ rsp->cgx_links = hw->cgx_links;
+ rsp->lbk_links = hw->lbk_links;
+ rsp->sdp_links = hw->sdp_links;
+
+ return rc;
+}
+
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (req->flags & NIX_LF_DISABLE_FLOWS)
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ else
+ rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
+
+ /* Free any tx vtag def entries used by this NIX LF */
+ if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
+ nix_free_tx_vtag_entries(rvu, pcifunc);
+
+ nix_interface_deinit(rvu, pcifunc, nixlf);
+
+ /* Reset this NIX LF */
+ err = rvu_lf_reset(rvu, block, nixlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
+ block->addr - BLKADDR_NIX0, nixlf);
+ return NIX_AF_ERR_LF_RESET;
+ }
+
+ nix_ctx_free(rvu, pfvf);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
+ struct nix_mark_format_cfg *req,
+ struct nix_mark_format_cfg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, rc;
+ u32 cfg;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ cfg = (((u32)req->offset & 0x7) << 16) |
+ (((u32)req->y_mask & 0xF) << 12) |
+ (((u32)req->y_val & 0xF) << 8) |
+ (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
+
+ rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
+ if (rc < 0) {
+ dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
+ rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ return NIX_AF_ERR_MARK_CFG_FAIL;
+ }
+
+ rsp->mark_format_idx = rc;
+ return 0;
+}
+
+/* Handle shaper update specially for few revisions */
+static bool
+handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
+ int lvl, u64 reg, u64 regval)
+{
+ u64 regbase, oldval, sw_xoff = 0;
+ u64 dbgval, md_debug0 = 0;
+ unsigned long poll_tmo;
+ bool rate_reg = 0;
+ u32 schq;
+
+ regbase = reg & 0xFFFF;
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+
+ /* Check for rate register */
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
+
+ rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
+
+ rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
+ regbase == NIX_AF_TL2X_PIR(0));
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
+
+ rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
+ regbase == NIX_AF_TL3X_PIR(0));
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
+
+ rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
+ regbase == NIX_AF_TL4X_PIR(0));
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
+ rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
+ regbase == NIX_AF_MDQX_PIR(0));
+ break;
+ }
+
+ if (!rate_reg)
+ return false;
+
+ /* Nothing special to do when state is not toggled */
+ oldval = rvu_read64(rvu, blkaddr, reg);
+ if ((oldval & 0x1) == (regval & 0x1)) {
+ rvu_write64(rvu, blkaddr, reg, regval);
+ return true;
+ }
+
+ /* PIR/CIR disable */
+ if (!(regval & 0x1)) {
+ rvu_write64(rvu, blkaddr, sw_xoff, 1);
+ rvu_write64(rvu, blkaddr, reg, 0);
+ udelay(4);
+ rvu_write64(rvu, blkaddr, sw_xoff, 0);
+ return true;
+ }
+
+ /* PIR/CIR enable */
+ rvu_write64(rvu, blkaddr, sw_xoff, 1);
+ if (md_debug0) {
+ poll_tmo = jiffies + usecs_to_jiffies(10000);
+ /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
+ do {
+ if (time_after(jiffies, poll_tmo)) {
+ dev_err(rvu->dev,
+ "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
+ nixlf, schq, lvl);
+ goto exit;
+ }
+ usleep_range(1, 5);
+ dbgval = rvu_read64(rvu, blkaddr, md_debug0);
+ } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
+ }
+ rvu_write64(rvu, blkaddr, reg, regval);
+exit:
+ rvu_write64(rvu, blkaddr, sw_xoff, 0);
+ return true;
+}
+
+static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr,
+ int lvl, int schq)
+{
+ u64 tlx_parent = 0, tlx_schedule = 0;
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL2:
+ tlx_parent = NIX_AF_TL2X_PARENT(schq);
+ tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ tlx_parent = NIX_AF_TL3X_PARENT(schq);
+ tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ tlx_parent = NIX_AF_TL4X_PARENT(schq);
+ tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq);
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ /* no need to reset SMQ_CFG as HW clears this CSR
+ * on SMQ flush
+ */
+ tlx_parent = NIX_AF_MDQX_PARENT(schq);
+ tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq);
+ break;
+ default:
+ return;
+ }
+
+ if (tlx_parent)
+ rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
+
+ if (tlx_schedule)
+ rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
+}
+
+/* Disable shaping of pkts by a scheduler queue
+ * at a given scheduler level.
+ */
+static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
+ int nixlf, int lvl, int schq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 cir_reg = 0, pir_reg = 0;
+ u64 cfg;
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ cir_reg = NIX_AF_TL1X_CIR(schq);
+ pir_reg = 0; /* PIR not available at TL1 */
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ cir_reg = NIX_AF_TL2X_CIR(schq);
+ pir_reg = NIX_AF_TL2X_PIR(schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ cir_reg = NIX_AF_TL3X_CIR(schq);
+ pir_reg = NIX_AF_TL3X_PIR(schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ cir_reg = NIX_AF_TL4X_CIR(schq);
+ pir_reg = NIX_AF_TL4X_PIR(schq);
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ cir_reg = NIX_AF_MDQX_CIR(schq);
+ pir_reg = NIX_AF_MDQX_PIR(schq);
+ break;
+ }
+
+ /* Shaper state toggle needs wait/poll */
+ if (hw->cap.nix_shaper_toggle_wait) {
+ if (cir_reg)
+ handle_txschq_shaper_update(rvu, blkaddr, nixlf,
+ lvl, cir_reg, 0);
+ if (pir_reg)
+ handle_txschq_shaper_update(rvu, blkaddr, nixlf,
+ lvl, pir_reg, 0);
+ return;
+ }
+
+ if (!cir_reg)
+ return;
+ cfg = rvu_read64(rvu, blkaddr, cir_reg);
+ rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
+
+ if (!pir_reg)
+ return;
+ cfg = rvu_read64(rvu, blkaddr, pir_reg);
+ rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
+}
+
+static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
+ int lvl, int schq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int link_level;
+ int link;
+
+ if (lvl >= hw->cap.nix_tx_aggr_lvl)
+ return;
+
+ /* Reset TL4's SDP link config */
+ if (lvl == NIX_TXSCH_LVL_TL4)
+ rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
+
+ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
+ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ if (lvl != link_level)
+ return;
+
+ /* Reset TL2's CGX or LBK link config */
+ for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
+}
+
+static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
+ int lvl, int schq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 reg;
+
+ /* Skip this if shaping is not supported */
+ if (!hw->cap.nix_shaping)
+ return;
+
+ /* Clear level specific SW_XOFF */
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ reg = NIX_AF_TL1X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ reg = NIX_AF_TL2X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ reg = NIX_AF_TL3X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ reg = NIX_AF_TL4X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ reg = NIX_AF_MDQX_SW_XOFF(schq);
+ break;
+ default:
+ return;
+ }
+
+ rvu_write64(rvu, blkaddr, reg, 0x0);
+}
+
+static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id = 0, lmac_id = 0;
+
+ if (is_afvf(pcifunc)) {/* LBK links */
+ return hw->cgx_links;
+ } else if (is_pf_cgxmapped(rvu, pf)) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return (cgx_id * hw->lmac_per_cgx) + lmac_id;
+ }
+
+ /* SDP link */
+ return hw->cgx_links + hw->lbk_links;
+}
+
+static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
+ int link, int *start, int *end)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf = rvu_get_pf(pcifunc);
+
+ if (is_afvf(pcifunc)) { /* LBK links */
+ *start = hw->cap.nix_txsch_per_cgx_lmac * link;
+ *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
+ } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
+ *start = hw->cap.nix_txsch_per_cgx_lmac * link;
+ *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
+ } else { /* SDP link */
+ *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
+ (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
+ *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
+ }
+}
+
+static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
+ struct nix_hw *nix_hw,
+ struct nix_txsch_alloc_req *req)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int schq, req_schq, free_cnt;
+ struct nix_txsch *txsch;
+ int link, start, end;
+
+ txsch = &nix_hw->txsch[lvl];
+ req_schq = req->schq_contig[lvl] + req->schq[lvl];
+
+ if (!req_schq)
+ return 0;
+
+ link = nix_get_tx_link(rvu, pcifunc);
+
+ /* For traffic aggregating scheduler level, one queue is enough */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ if (req_schq != 1)
+ return NIX_AF_ERR_TLX_ALLOC_FAIL;
+ return 0;
+ }
+
+ /* Get free SCHQ count and check if request can be accomodated */
+ if (hw->cap.nix_fixed_txschq_mapping) {
+ nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
+ schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
+ if (end <= txsch->schq.max && schq < end &&
+ !test_bit(schq, txsch->schq.bmap))
+ free_cnt = 1;
+ else
+ free_cnt = 0;
+ } else {
+ free_cnt = rvu_rsrc_free_count(&txsch->schq);
+ }
+
+ if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
+ req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
+ return NIX_AF_ERR_TLX_ALLOC_FAIL;
+
+ /* If contiguous queues are needed, check for availability */
+ if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
+ !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
+ return NIX_AF_ERR_TLX_ALLOC_FAIL;
+
+ return 0;
+}
+
+static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
+ struct nix_txsch_alloc_rsp *rsp,
+ int lvl, int start, int end)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = rsp->hdr.pcifunc;
+ int idx, schq;
+
+ /* For traffic aggregating levels, queue alloc is based
+ * on transmit link to which PF_FUNC is mapped to.
+ */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ /* A single TL queue is allocated */
+ if (rsp->schq_contig[lvl]) {
+ rsp->schq_contig[lvl] = 1;
+ rsp->schq_contig_list[lvl][0] = start;
+ }
+
+ /* Both contig and non-contig reqs doesn't make sense here */
+ if (rsp->schq_contig[lvl])
+ rsp->schq[lvl] = 0;
+
+ if (rsp->schq[lvl]) {
+ rsp->schq[lvl] = 1;
+ rsp->schq_list[lvl][0] = start;
+ }
+ return;
+ }
+
+ /* Adjust the queue request count if HW supports
+ * only one queue per level configuration.
+ */
+ if (hw->cap.nix_fixed_txschq_mapping) {
+ idx = pcifunc & RVU_PFVF_FUNC_MASK;
+ schq = start + idx;
+ if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
+ rsp->schq_contig[lvl] = 0;
+ rsp->schq[lvl] = 0;
+ return;
+ }
+
+ if (rsp->schq_contig[lvl]) {
+ rsp->schq_contig[lvl] = 1;
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_contig_list[lvl][0] = schq;
+ rsp->schq[lvl] = 0;
+ } else if (rsp->schq[lvl]) {
+ rsp->schq[lvl] = 1;
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_list[lvl][0] = schq;
+ }
+ return;
+ }
+
+ /* Allocate contiguous queue indices requesty first */
+ if (rsp->schq_contig[lvl]) {
+ schq = bitmap_find_next_zero_area(txsch->schq.bmap,
+ txsch->schq.max, start,
+ rsp->schq_contig[lvl], 0);
+ if (schq >= end)
+ rsp->schq_contig[lvl] = 0;
+ for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_contig_list[lvl][idx] = schq;
+ schq++;
+ }
+ }
+
+ /* Allocate non-contiguous queue indices */
+ if (rsp->schq[lvl]) {
+ idx = 0;
+ for (schq = start; schq < end; schq++) {
+ if (!test_bit(schq, txsch->schq.bmap)) {
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_list[lvl][idx++] = schq;
+ }
+ if (idx == rsp->schq[lvl])
+ break;
+ }
+ /* Update how many were allocated */
+ rsp->schq[lvl] = idx;
+ }
+}
+
+int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
+ struct nix_txsch_alloc_req *req,
+ struct nix_txsch_alloc_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int link, blkaddr, rc = 0;
+ int lvl, idx, start, end;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u32 *pfvf_map;
+ int nixlf;
+ u16 schq;
+
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (rc)
+ return rc;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ /* Check if request is valid as per HW capabilities
+ * and can be accomodated.
+ */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
+ if (rc)
+ goto err;
+ }
+
+ /* Allocate requested Tx scheduler queues */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ pfvf_map = txsch->pfvf_map;
+
+ if (!req->schq[lvl] && !req->schq_contig[lvl])
+ continue;
+
+ rsp->schq[lvl] = req->schq[lvl];
+ rsp->schq_contig[lvl] = req->schq_contig[lvl];
+
+ link = nix_get_tx_link(rvu, pcifunc);
+
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ start = link;
+ end = link;
+ } else if (hw->cap.nix_fixed_txschq_mapping) {
+ nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
+ } else {
+ start = 0;
+ end = txsch->schq.max;
+ }
+
+ nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
+
+ /* Reset queue config */
+ for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
+ schq = rsp->schq_contig_list[lvl][idx];
+ if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
+ NIX_TXSCHQ_CFG_DONE))
+ pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
+ nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
+ }
+
+ for (idx = 0; idx < req->schq[lvl]; idx++) {
+ schq = rsp->schq_list[lvl][idx];
+ if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
+ NIX_TXSCHQ_CFG_DONE))
+ pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
+ nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
+ }
+ }
+
+ rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
+ rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
+ rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
+ NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
+ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ goto exit;
+err:
+ rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
+exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return rc;
+}
+
+static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ int smq, u16 pcifunc, int nixlf)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id = 0, lmac_id = 0;
+ int err, restore_tx_en = 0;
+ u64 cfg;
+
+ /* enable cgx tx if disabled */
+ if (is_pf_cgxmapped(rvu, pf)) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true);
+ }
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ /* Do SMQ flush and set enqueue xoff */
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
+
+ /* Disable backpressure from physical link,
+ * otherwise SMQ flush may stall.
+ */
+ rvu_cgx_enadis_rx_bp(rvu, pf, false);
+
+ /* Wait for flush to complete */
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
+ if (err)
+ dev_err(rvu->dev,
+ "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
+
+ rvu_cgx_enadis_rx_bp(rvu, pf, true);
+ /* restore cgx tx state */
+ if (restore_tx_en)
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ return err;
+}
+
+static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, nixlf, lvl, schq, err;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u16 map_func;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
+ mutex_lock(&rvu->rsrc_lock);
+ for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+
+ if (lvl >= hw->cap.nix_tx_aggr_lvl)
+ continue;
+
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+ continue;
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
+ }
+ }
+ nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
+ nix_get_tx_link(rvu, pcifunc));
+
+ /* On PF cleanup, clear cfg done flag as
+ * PF would have changed default config.
+ */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
+ schq = nix_get_tx_link(rvu, pcifunc);
+ /* Do not clear pcifunc in txsch->pfvf_map[schq] because
+ * VF might be using this TL1 queue
+ */
+ map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
+ txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
+ }
+
+ /* Flush SMQs */
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+ continue;
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
+ }
+
+ /* Now free scheduler queues to free pool */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ /* TLs above aggregation level are shared across all PF
+ * and it's VFs, hence skip freeing them.
+ */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl)
+ continue;
+
+ txsch = &nix_hw->txsch[lvl];
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+ continue;
+ nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
+ rvu_free_rsrc(&txsch->schq, schq);
+ txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
+ rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
+ if (err)
+ dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
+
+ return 0;
+}
+
+static int nix_txschq_free_one(struct rvu *rvu,
+ struct nix_txsch_free_req *req)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int lvl, schq, nixlf, blkaddr;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u32 *pfvf_map;
+ int rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ lvl = req->schq_lvl;
+ schq = req->schq;
+ txsch = &nix_hw->txsch[lvl];
+
+ if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
+ return 0;
+
+ pfvf_map = txsch->pfvf_map;
+ mutex_lock(&rvu->rsrc_lock);
+
+ if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
+ rc = NIX_AF_ERR_TLX_INVALID;
+ goto err;
+ }
+
+ /* Clear SW_XOFF of this resource only.
+ * For SMQ level, all path XOFF's
+ * need to be made clear by user
+ */
+ nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
+
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
+
+ /* Flush if it is a SMQ. Onus of disabling
+ * TL2/3 queue links before SMQ flush is on user
+ */
+ if (lvl == NIX_TXSCH_LVL_SMQ &&
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
+ rc = NIX_AF_SMQ_FLUSH_FAILED;
+ goto err;
+ }
+
+ nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
+
+ /* Free the resource */
+ rvu_free_rsrc(&txsch->schq, schq);
+ txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+err:
+ mutex_unlock(&rvu->rsrc_lock);
+ return rc;
+}
+
+int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
+ struct nix_txsch_free_req *req,
+ struct msg_rsp *rsp)
+{
+ if (req->flags & TXSCHQ_FREE_ALL)
+ return nix_txschq_free(rvu, req->hdr.pcifunc);
+ else
+ return nix_txschq_free_one(rvu, req);
+}
+
+static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
+ int lvl, u64 reg, u64 regval)
+{
+ u64 regbase = reg & 0xFFFF;
+ u16 schq, parent;
+
+ if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
+ return false;
+
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ /* Check if this schq belongs to this PF/VF or not */
+ if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
+ return false;
+
+ parent = (regval >> 16) & 0x1FF;
+ /* Validate MDQ's TL4 parent */
+ if (regbase == NIX_AF_MDQX_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
+ return false;
+
+ /* Validate TL4's TL3 parent */
+ if (regbase == NIX_AF_TL4X_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
+ return false;
+
+ /* Validate TL3's TL2 parent */
+ if (regbase == NIX_AF_TL3X_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
+ return false;
+
+ /* Validate TL2's TL1 parent */
+ if (regbase == NIX_AF_TL2X_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
+ return false;
+
+ return true;
+}
+
+static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
+{
+ u64 regbase;
+
+ if (hw->cap.nix_shaping)
+ return true;
+
+ /* If shaping and coloring is not supported, then
+ * *_CIR and *_PIR registers should not be configured.
+ */
+ regbase = reg & 0xFFFF;
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ if (regbase == NIX_AF_TL1X_CIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ if (regbase == NIX_AF_TL2X_CIR(0) ||
+ regbase == NIX_AF_TL2X_PIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ if (regbase == NIX_AF_TL3X_CIR(0) ||
+ regbase == NIX_AF_TL3X_PIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ if (regbase == NIX_AF_TL4X_CIR(0) ||
+ regbase == NIX_AF_TL4X_PIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ if (regbase == NIX_AF_MDQX_CIR(0) ||
+ regbase == NIX_AF_MDQX_PIR(0))
+ return false;
+ break;
+ }
+ return true;
+}
+
+static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
+ u16 pcifunc, int blkaddr)
+{
+ u32 *pfvf_map;
+ int schq;
+
+ schq = nix_get_tx_link(rvu, pcifunc);
+ pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
+ /* Skip if PF has already done the config */
+ if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
+ return;
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
+ (TXSCH_TL1_DFLT_RR_PRIO << 1));
+
+ /* On OcteonTx2 the config was in bytes and newer silcons
+ * it's changed to weight.
+ */
+ if (!rvu->hw->cap.nix_common_dwrr_mtu)
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
+ TXSCH_TL1_DFLT_RR_QTM);
+ else
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
+ CN10K_MAX_DWRR_WEIGHT);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
+ pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
+}
+
+/* Register offset - [15:0]
+ * Scheduler Queue number - [25:16]
+ */
+#define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0)
+
+static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr, struct nix_txschq_config *req,
+ struct nix_txschq_config *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int idx, schq;
+ u64 reg;
+
+ for (idx = 0; idx < req->num_regs; idx++) {
+ reg = req->reg[idx];
+ reg &= NIX_TX_SCHQ_MASK;
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
+ !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
+ return NIX_AF_INVAL_TXSCHQ_CFG;
+ rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
+ }
+ rsp->lvl = req->lvl;
+ rsp->num_regs = req->num_regs;
+ return 0;
+}
+
+static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
+ u16 pcifunc, struct nix_txsch *txsch)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lbk_link_start, lbk_links;
+ u8 pf = rvu_get_pf(pcifunc);
+ int schq;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ lbk_link_start = hw->cgx_links;
+
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+ continue;
+ /* Enable all LBK links with channel 63 by default so that
+ * packets can be sent to LBK with a NPC TX MCAM rule
+ */
+ lbk_links = hw->lbk_links;
+ while (lbk_links--)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq,
+ lbk_link_start +
+ lbk_links),
+ BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
+ }
+}
+
+int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
+ struct nix_txschq_config *req,
+ struct nix_txschq_config *rsp)
+{
+ u64 reg, val, regval, schq_regbase, val_mask;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ int blkaddr, idx, err;
+ int nixlf, schq;
+ u32 *pfvf_map;
+
+ if (req->lvl >= NIX_TXSCH_LVL_CNT ||
+ req->num_regs > MAX_REGS_PER_MBOX_MSG)
+ return NIX_AF_INVAL_TXSCHQ_CFG;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ if (req->read)
+ return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
+
+ txsch = &nix_hw->txsch[req->lvl];
+ pfvf_map = txsch->pfvf_map;
+
+ if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
+ pcifunc & RVU_PFVF_FUNC_MASK) {
+ mutex_lock(&rvu->rsrc_lock);
+ if (req->lvl == NIX_TXSCH_LVL_TL1)
+ nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+ }
+
+ for (idx = 0; idx < req->num_regs; idx++) {
+ reg = req->reg[idx];
+ reg &= NIX_TX_SCHQ_MASK;
+ regval = req->regval[idx];
+ schq_regbase = reg & 0xFFFF;
+ val_mask = req->regval_mask[idx];
+
+ if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
+ txsch->lvl, reg, regval))
+ return NIX_AF_INVAL_TXSCHQ_CFG;
+
+ /* Check if shaping and coloring is supported */
+ if (!is_txschq_shaping_valid(hw, req->lvl, reg))
+ continue;
+
+ val = rvu_read64(rvu, blkaddr, reg);
+ regval = (val & val_mask) | (regval & ~val_mask);
+
+ /* Handle shaping state toggle specially */
+ if (hw->cap.nix_shaper_toggle_wait &&
+ handle_txschq_shaper_update(rvu, blkaddr, nixlf,
+ req->lvl, reg, regval))
+ continue;
+
+ /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
+ if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
+ pcifunc, 0);
+ regval &= ~(0x7FULL << 24);
+ regval |= ((u64)nixlf << 24);
+ }
+
+ /* Clear 'BP_ENA' config, if it's not allowed */
+ if (!hw->cap.nix_tx_link_bp) {
+ if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
+ (schq_regbase & 0xFF00) ==
+ NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
+ regval &= ~BIT_ULL(13);
+ }
+
+ /* Mark config as done for TL1 by PF */
+ if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
+ schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ mutex_lock(&rvu->rsrc_lock);
+ pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
+ NIX_TXSCHQ_CFG_DONE);
+ mutex_unlock(&rvu->rsrc_lock);
+ }
+
+ /* SMQ flush is special hence split register writes such
+ * that flush first and write rest of the bits later.
+ */
+ if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
+ (regval & BIT_ULL(49))) {
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
+ regval &= ~BIT_ULL(49);
+ }
+ rvu_write64(rvu, blkaddr, reg, regval);
+ }
+
+ rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
+ &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
+ return 0;
+}
+
+static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
+ struct nix_vtag_config *req)
+{
+ u64 regval = req->vtag_size;
+
+ if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
+ req->vtag_size > VTAGSIZE_T8)
+ return -EINVAL;
+
+ /* RX VTAG Type 7 reserved for vf vlan */
+ if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
+ return NIX_AF_ERR_RX_VTAG_INUSE;
+
+ if (req->rx.capture_vtag)
+ regval |= BIT_ULL(5);
+ if (req->rx.strip_vtag)
+ regval |= BIT_ULL(4);
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
+ return 0;
+}
+
+static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
+ u16 pcifunc, int index)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+ if (vlan->entry2pfvf_map[index] != pcifunc)
+ return NIX_AF_ERR_PARAM;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
+
+ vlan->entry2pfvf_map[index] = 0;
+ rvu_free_rsrc(&vlan->rsrc, index);
+
+ return 0;
+}
+
+static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
+{
+ struct nix_txvlan *vlan;
+ struct nix_hw *nix_hw;
+ int index, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ vlan = &nix_hw->txvlan;
+
+ mutex_lock(&vlan->rsrc_lock);
+ /* Scan all the entries and free the ones mapped to 'pcifunc' */
+ for (index = 0; index < vlan->rsrc.max; index++) {
+ if (vlan->entry2pfvf_map[index] == pcifunc)
+ nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
+ }
+ mutex_unlock(&vlan->rsrc_lock);
+}
+
+static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
+ u64 vtag, u8 size)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan;
+ u64 regval;
+ int index;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+
+ mutex_lock(&vlan->rsrc_lock);
+
+ index = rvu_alloc_rsrc(&vlan->rsrc);
+ if (index < 0) {
+ mutex_unlock(&vlan->rsrc_lock);
+ return index;
+ }
+
+ mutex_unlock(&vlan->rsrc_lock);
+
+ regval = size ? vtag : vtag << 32;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_CTL(index), size);
+
+ return index;
+}
+
+static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
+ struct nix_vtag_config *req)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ u16 pcifunc = req->hdr.pcifunc;
+ int idx0 = req->tx.vtag0_idx;
+ int idx1 = req->tx.vtag1_idx;
+ struct nix_txvlan *vlan;
+ int err = 0;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+ if (req->tx.free_vtag0 && req->tx.free_vtag1)
+ if (vlan->entry2pfvf_map[idx0] != pcifunc ||
+ vlan->entry2pfvf_map[idx1] != pcifunc)
+ return NIX_AF_ERR_PARAM;
+
+ mutex_lock(&vlan->rsrc_lock);
+
+ if (req->tx.free_vtag0) {
+ err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
+ if (err)
+ goto exit;
+ }
+
+ if (req->tx.free_vtag1)
+ err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
+
+exit:
+ mutex_unlock(&vlan->rsrc_lock);
+ return err;
+}
+
+static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
+ struct nix_vtag_config *req,
+ struct nix_vtag_config_rsp *rsp)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan;
+ u16 pcifunc = req->hdr.pcifunc;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+ if (req->tx.cfg_vtag0) {
+ rsp->vtag0_idx =
+ nix_tx_vtag_alloc(rvu, blkaddr,
+ req->tx.vtag0, req->vtag_size);
+
+ if (rsp->vtag0_idx < 0)
+ return NIX_AF_ERR_TX_VTAG_NOSPC;
+
+ vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
+ }
+
+ if (req->tx.cfg_vtag1) {
+ rsp->vtag1_idx =
+ nix_tx_vtag_alloc(rvu, blkaddr,
+ req->tx.vtag1, req->vtag_size);
+
+ if (rsp->vtag1_idx < 0)
+ goto err_free;
+
+ vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
+ }
+
+ return 0;
+
+err_free:
+ if (req->tx.cfg_vtag0)
+ nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
+
+ return NIX_AF_ERR_TX_VTAG_NOSPC;
+}
+
+int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
+ struct nix_vtag_config *req,
+ struct nix_vtag_config_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ if (req->cfg_type) {
+ /* rx vtag configuration */
+ err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
+ if (err)
+ return NIX_AF_ERR_PARAM;
+ } else {
+ /* tx vtag configuration */
+ if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
+ (req->tx.free_vtag0 || req->tx.free_vtag1))
+ return NIX_AF_ERR_PARAM;
+
+ if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
+ return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
+
+ if (req->tx.free_vtag0 || req->tx.free_vtag1)
+ return nix_tx_vtag_decfg(rvu, blkaddr, req);
+ }
+
+ return 0;
+}
+
+static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
+ int mce, u8 op, u16 pcifunc, int next, bool eol)
+{
+ struct nix_aq_enq_req aq_req;
+ int err;
+
+ aq_req.hdr.pcifunc = 0;
+ aq_req.ctype = NIX_AQ_CTYPE_MCE;
+ aq_req.op = op;
+ aq_req.qidx = mce;
+
+ /* Use RSS with RSS index 0 */
+ aq_req.mce.op = 1;
+ aq_req.mce.index = 0;
+ aq_req.mce.eol = eol;
+ aq_req.mce.pf_func = pcifunc;
+ aq_req.mce.next = next;
+
+ /* All fields valid */
+ *(u64 *)(&aq_req.mce_mask) = ~0ULL;
+
+ err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
+ if (err) {
+ dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
+ rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ return err;
+ }
+ return 0;
+}
+
+static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
+ u16 pcifunc, bool add)
+{
+ struct mce *mce, *tail = NULL;
+ bool delete = false;
+
+ /* Scan through the current list */
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ /* If already exists, then delete */
+ if (mce->pcifunc == pcifunc && !add) {
+ delete = true;
+ break;
+ } else if (mce->pcifunc == pcifunc && add) {
+ /* entry already exists */
+ return 0;
+ }
+ tail = mce;
+ }
+
+ if (delete) {
+ hlist_del(&mce->node);
+ kfree(mce);
+ mce_list->count--;
+ return 0;
+ }
+
+ if (!add)
+ return 0;
+
+ /* Add a new one to the list, at the tail */
+ mce = kzalloc(sizeof(*mce), GFP_KERNEL);
+ if (!mce)
+ return -ENOMEM;
+ mce->pcifunc = pcifunc;
+ if (!tail)
+ hlist_add_head(&mce->node, &mce_list->head);
+ else
+ hlist_add_behind(&mce->node, &tail->node);
+ mce_list->count++;
+ return 0;
+}
+
+int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
+ struct nix_mce_list *mce_list,
+ int mce_idx, int mcam_index, bool add)
+{
+ int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_mcast *mcast;
+ struct nix_hw *nix_hw;
+ struct mce *mce;
+
+ if (!mce_list)
+ return -EINVAL;
+
+ /* Get this PF/VF func's MCE index */
+ idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
+
+ if (idx > (mce_idx + mce_list->max)) {
+ dev_err(rvu->dev,
+ "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
+ __func__, idx, mce_list->max,
+ pcifunc >> RVU_PFVF_PF_SHIFT);
+ return -EINVAL;
+ }
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mcast = &nix_hw->mcast;
+ mutex_lock(&mcast->mce_lock);
+
+ err = nix_update_mce_list_entry(mce_list, pcifunc, add);
+ if (err)
+ goto end;
+
+ /* Disable MCAM entry in NPC */
+ if (!mce_list->count) {
+ npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
+ goto end;
+ }
+
+ /* Dump the updated list to HW */
+ idx = mce_idx;
+ last_idx = idx + mce_list->count - 1;
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ if (idx > last_idx)
+ break;
+
+ next_idx = idx + 1;
+ /* EOL should be set in last MCE */
+ err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
+ mce->pcifunc, next_idx,
+ (next_idx > last_idx) ? true : false);
+ if (err)
+ goto end;
+ idx++;
+ }
+
+end:
+ mutex_unlock(&mcast->mce_lock);
+ return err;
+}
+
+void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
+ struct nix_mce_list **mce_list, int *mce_idx)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_pfvf *pfvf;
+
+ if (!hw->cap.nix_rx_multicast ||
+ !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
+ *mce_list = NULL;
+ *mce_idx = 0;
+ return;
+ }
+
+ /* Get this PF/VF func's MCE index */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+
+ if (type == NIXLF_BCAST_ENTRY) {
+ *mce_list = &pfvf->bcast_mce_list;
+ *mce_idx = pfvf->bcast_mce_idx;
+ } else if (type == NIXLF_ALLMULTI_ENTRY) {
+ *mce_list = &pfvf->mcast_mce_list;
+ *mce_idx = pfvf->mcast_mce_idx;
+ } else if (type == NIXLF_PROMISC_ENTRY) {
+ *mce_list = &pfvf->promisc_mce_list;
+ *mce_idx = pfvf->promisc_mce_idx;
+ } else {
+ *mce_list = NULL;
+ *mce_idx = 0;
+ }
+}
+
+static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
+ int type, bool add)
+{
+ int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_mce_list *mce_list;
+ int pf;
+
+ /* skip multicast pkt replication for AF's VFs & SDP links */
+ if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
+ return 0;
+
+ if (!hw->cap.nix_rx_multicast)
+ return 0;
+
+ pf = rvu_get_pf(pcifunc);
+ if (!is_pf_cgxmapped(rvu, pf))
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return -EINVAL;
+
+ nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
+
+ mcam_index = npc_get_nixlf_mcam_index(mcam,
+ pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, type);
+ err = nix_update_mce_list(rvu, pcifunc, mce_list,
+ mce_idx, mcam_index, add);
+ return err;
+}
+
+static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ struct nix_mcast *mcast = &nix_hw->mcast;
+ int err, pf, numvfs, idx;
+ struct rvu_pfvf *pfvf;
+ u16 pcifunc;
+ u64 cfg;
+
+ /* Skip PF0 (i.e AF) */
+ for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ /* If PF is not enabled, nothing to do */
+ if (!((cfg >> 20) & 0x01))
+ continue;
+ /* Get numVFs attached to this PF */
+ numvfs = (cfg >> 12) & 0xFF;
+
+ pfvf = &rvu->pf[pf];
+
+ /* This NIX0/1 block mapped to PF ? */
+ if (pfvf->nix_blkaddr != nix_hw->blkaddr)
+ continue;
+
+ /* save start idx of broadcast mce list */
+ pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
+
+ /* save start idx of multicast mce list */
+ pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
+
+ /* save the start idx of promisc mce list */
+ pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
+
+ for (idx = 0; idx < (numvfs + 1); idx++) {
+ /* idx-0 is for PF, followed by VFs */
+ pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ pcifunc |= idx;
+ /* Add dummy entries now, so that we don't have to check
+ * for whether AQ_OP should be INIT/WRITE later on.
+ * Will be updated when a NIXLF is attached/detached to
+ * these PF/VFs.
+ */
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->bcast_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
+
+ /* add dummy entries to multicast mce list */
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->mcast_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
+
+ /* add dummy entries to promisc mce list */
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->promisc_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+ struct nix_mcast *mcast = &nix_hw->mcast;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int err, size;
+
+ size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
+ size = (1ULL << size);
+
+ /* Alloc memory for multicast/mirror replication entries */
+ err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
+ (256UL << MC_TBL_SIZE), size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
+ (u64)mcast->mce_ctx->iova);
+
+ /* Set max list length equal to max no of VFs per PF + PF itself */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
+ BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
+
+ /* Alloc memory for multicast replication buffers */
+ size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
+ err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
+ (8UL << MC_BUF_CNT), size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
+ (u64)mcast->mcast_buf->iova);
+
+ /* Alloc pkind for NIX internal RX multicast/mirror replay */
+ mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
+ BIT_ULL(63) | (mcast->replay_pkind << 24) |
+ BIT_ULL(20) | MC_BUF_CNT);
+
+ mutex_init(&mcast->mce_lock);
+
+ return nix_setup_mce_tables(rvu, nix_hw);
+}
+
+static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+ int err;
+
+ /* Allocate resource bimap for tx vtag def registers*/
+ vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
+ err = rvu_alloc_bitmap(&vlan->rsrc);
+ if (err)
+ return -ENOMEM;
+
+ /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
+ vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!vlan->entry2pfvf_map)
+ goto free_mem;
+
+ mutex_init(&vlan->rsrc_lock);
+ return 0;
+
+free_mem:
+ kfree(vlan->rsrc.bmap);
+ return -ENOMEM;
+}
+
+static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+ struct nix_txsch *txsch;
+ int err, lvl, schq;
+ u64 cfg, reg;
+
+ /* Get scheduler queue count of each type and alloc
+ * bitmap for each for alloc/free/attach operations.
+ */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ txsch->lvl = lvl;
+ switch (lvl) {
+ case NIX_TXSCH_LVL_SMQ:
+ reg = NIX_AF_MDQ_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ reg = NIX_AF_TL4_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ reg = NIX_AF_TL3_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ reg = NIX_AF_TL2_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL1:
+ reg = NIX_AF_TL1_CONST;
+ break;
+ }
+ cfg = rvu_read64(rvu, blkaddr, reg);
+ txsch->schq.max = cfg & 0xFFFF;
+ err = rvu_alloc_bitmap(&txsch->schq);
+ if (err)
+ return err;
+
+ /* Allocate memory for scheduler queues to
+ * PF/VF pcifunc mapping info.
+ */
+ txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
+ sizeof(u32), GFP_KERNEL);
+ if (!txsch->pfvf_map)
+ return -ENOMEM;
+ for (schq = 0; schq < txsch->schq.max; schq++)
+ txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
+ }
+
+ /* Setup a default value of 8192 as DWRR MTU */
+ if (rvu->hw->cap.nix_common_dwrr_mtu) {
+ rvu_write64(rvu, blkaddr, NIX_AF_DWRR_RPM_MTU,
+ convert_bytes_to_dwrr_mtu(8192));
+ rvu_write64(rvu, blkaddr, NIX_AF_DWRR_SDP_MTU,
+ convert_bytes_to_dwrr_mtu(8192));
+ }
+
+ return 0;
+}
+
+int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr, u32 cfg)
+{
+ int fmt_idx;
+
+ for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
+ if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
+ return fmt_idx;
+ }
+ if (fmt_idx >= nix_hw->mark_format.total)
+ return -ERANGE;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
+ nix_hw->mark_format.cfg[fmt_idx] = cfg;
+ nix_hw->mark_format.in_use++;
+ return fmt_idx;
+}
+
+static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr)
+{
+ u64 cfgs[] = {
+ [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
+ [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
+ [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
+ [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
+ [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
+ [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
+ [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
+ [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
+ [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
+ };
+ int i, rc;
+ u64 total;
+
+ total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
+ nix_hw->mark_format.total = (u8)total;
+ nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
+ GFP_KERNEL);
+ if (!nix_hw->mark_format.cfg)
+ return -ENOMEM;
+ for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
+ rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
+ if (rc < 0)
+ dev_err(rvu->dev, "Err %d in setup mark format %d\n",
+ i, rc);
+ }
+
+ return 0;
+}
+
+static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
+{
+ /* CN10K supports LBK FIFO size 72 KB */
+ if (rvu->hw->lbk_bufsize == 0x12000)
+ *max_mtu = CN10K_LBK_LINK_MAX_FRS;
+ else
+ *max_mtu = NIC_HW_MAX_FRS;
+}
+
+static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
+{
+ /* RPM supports FIFO len 128 KB */
+ if (rvu_cgx_get_fifolen(rvu) == 0x20000)
+ *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
+ else
+ *max_mtu = NIC_HW_MAX_FRS;
+}
+
+int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
+ struct nix_hw_info *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ u64 dwrr_mtu;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (is_afvf(pcifunc))
+ rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
+ else
+ rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
+
+ rsp->min_mtu = NIC_HW_MIN_FRS;
+
+ if (!rvu->hw->cap.nix_common_dwrr_mtu) {
+ /* Return '1' on OTx2 */
+ rsp->rpm_dwrr_mtu = 1;
+ rsp->sdp_dwrr_mtu = 1;
+ return 0;
+ }
+
+ dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
+ rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
+ dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_SDP_MTU);
+ rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int i, nixlf, blkaddr, err;
+ u64 stats;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ /* Get stats count supported by HW */
+ stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+
+ /* Reset tx stats */
+ for (i = 0; i < ((stats >> 24) & 0xFF); i++)
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
+
+ /* Reset rx stats */
+ for (i = 0; i < ((stats >> 32) & 0xFF); i++)
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
+
+ return 0;
+}
+
+/* Returns the ALG index to be set into NPC_RX_ACTION */
+static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
+{
+ int i;
+
+ /* Scan over exiting algo entries to find a match */
+ for (i = 0; i < nix_hw->flowkey.in_use; i++)
+ if (nix_hw->flowkey.flowkey[i] == flow_cfg)
+ return i;
+
+ return -ERANGE;
+}
+
+static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+{
+ int idx, nr_field, key_off, field_marker, keyoff_marker;
+ int max_key_off, max_bit_pos, group_member;
+ struct nix_rx_flowkey_alg *field;
+ struct nix_rx_flowkey_alg tmp;
+ u32 key_type, valid_key;
+ int l4_key_offset = 0;
+
+ if (!alg)
+ return -EINVAL;
+
+#define FIELDS_PER_ALG 5
+#define MAX_KEY_OFF 40
+ /* Clear all fields */
+ memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
+
+ /* Each of the 32 possible flow key algorithm definitions should
+ * fall into above incremental config (except ALG0). Otherwise a
+ * single NPC MCAM entry is not sufficient for supporting RSS.
+ *
+ * If a different definition or combination needed then NPC MCAM
+ * has to be programmed to filter such pkts and it's action should
+ * point to this definition to calculate flowtag or hash.
+ *
+ * The `for loop` goes over _all_ protocol field and the following
+ * variables depicts the state machine forward progress logic.
+ *
+ * keyoff_marker - Enabled when hash byte length needs to be accounted
+ * in field->key_offset update.
+ * field_marker - Enabled when a new field needs to be selected.
+ * group_member - Enabled when protocol is part of a group.
+ */
+
+ keyoff_marker = 0; max_key_off = 0; group_member = 0;
+ nr_field = 0; key_off = 0; field_marker = 1;
+ field = &tmp; max_bit_pos = fls(flow_cfg);
+ for (idx = 0;
+ idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
+ key_off < MAX_KEY_OFF; idx++) {
+ key_type = BIT(idx);
+ valid_key = flow_cfg & key_type;
+ /* Found a field marker, reset the field values */
+ if (field_marker)
+ memset(&tmp, 0, sizeof(tmp));
+
+ field_marker = true;
+ keyoff_marker = true;
+ switch (key_type) {
+ case NIX_FLOW_KEY_TYPE_PORT:
+ field->sel_chan = true;
+ /* This should be set to 1, when SEL_CHAN is set */
+ field->bytesm1 = 1;
+ break;
+ case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 9; /* offset */
+ field->bytesm1 = 0; /* 1 byte */
+ field->ltype_match = NPC_LT_LC_IP;
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_IPV4:
+ case NIX_FLOW_KEY_TYPE_INNR_IPV4:
+ field->lid = NPC_LID_LC;
+ field->ltype_match = NPC_LT_LC_IP;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
+ field->lid = NPC_LID_LG;
+ field->ltype_match = NPC_LT_LG_TU_IP;
+ }
+ field->hdr_offset = 12; /* SIP offset */
+ field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
+ field->ltype_mask = 0xF; /* Match only IPv4 */
+ keyoff_marker = false;
+ break;
+ case NIX_FLOW_KEY_TYPE_IPV6:
+ case NIX_FLOW_KEY_TYPE_INNR_IPV6:
+ field->lid = NPC_LID_LC;
+ field->ltype_match = NPC_LT_LC_IP6;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
+ field->lid = NPC_LID_LG;
+ field->ltype_match = NPC_LT_LG_TU_IP6;
+ }
+ field->hdr_offset = 8; /* SIP offset */
+ field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
+ field->ltype_mask = 0xF; /* Match only IPv6 */
+ break;
+ case NIX_FLOW_KEY_TYPE_TCP:
+ case NIX_FLOW_KEY_TYPE_UDP:
+ case NIX_FLOW_KEY_TYPE_SCTP:
+ case NIX_FLOW_KEY_TYPE_INNR_TCP:
+ case NIX_FLOW_KEY_TYPE_INNR_UDP:
+ case NIX_FLOW_KEY_TYPE_INNR_SCTP:
+ field->lid = NPC_LID_LD;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
+ field->lid = NPC_LID_LH;
+ field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
+
+ /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
+ * so no need to change the ltype_match, just change
+ * the lid for inner protocols
+ */
+ BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
+ (int)NPC_LT_LH_TU_TCP);
+ BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
+ (int)NPC_LT_LH_TU_UDP);
+ BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
+ (int)NPC_LT_LH_TU_SCTP);
+
+ if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
+ valid_key) {
+ field->ltype_match |= NPC_LT_LD_TCP;
+ group_member = true;
+ } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
+ valid_key) {
+ field->ltype_match |= NPC_LT_LD_UDP;
+ group_member = true;
+ } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
+ valid_key) {
+ field->ltype_match |= NPC_LT_LD_SCTP;
+ group_member = true;
+ }
+ field->ltype_mask = ~field->ltype_match;
+ if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
+ /* Handle the case where any of the group item
+ * is enabled in the group but not the final one
+ */
+ if (group_member) {
+ valid_key = true;
+ group_member = false;
+ }
+ } else {
+ field_marker = false;
+ keyoff_marker = false;
+ }
+
+ /* TCP/UDP/SCTP and ESP/AH falls at same offset so
+ * remember the TCP key offset of 40 byte hash key.
+ */
+ if (key_type == NIX_FLOW_KEY_TYPE_TCP)
+ l4_key_offset = key_off;
+ break;
+ case NIX_FLOW_KEY_TYPE_NVGRE:
+ field->lid = NPC_LID_LD;
+ field->hdr_offset = 4; /* VSID offset */
+ field->bytesm1 = 2;
+ field->ltype_match = NPC_LT_LD_NVGRE;
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_VXLAN:
+ case NIX_FLOW_KEY_TYPE_GENEVE:
+ field->lid = NPC_LID_LE;
+ field->bytesm1 = 2;
+ field->hdr_offset = 4;
+ field->ltype_mask = 0xF;
+ field_marker = false;
+ keyoff_marker = false;
+
+ if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
+ field->ltype_match |= NPC_LT_LE_VXLAN;
+ group_member = true;
+ }
+
+ if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
+ field->ltype_match |= NPC_LT_LE_GENEVE;
+ group_member = true;
+ }
+
+ if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
+ if (group_member) {
+ field->ltype_mask = ~field->ltype_match;
+ field_marker = true;
+ keyoff_marker = true;
+ valid_key = true;
+ group_member = false;
+ }
+ }
+ break;
+ case NIX_FLOW_KEY_TYPE_ETH_DMAC:
+ case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
+ field->lid = NPC_LID_LA;
+ field->ltype_match = NPC_LT_LA_ETHER;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
+ field->lid = NPC_LID_LF;
+ field->ltype_match = NPC_LT_LF_TU_ETHER;
+ }
+ field->hdr_offset = 0;
+ field->bytesm1 = 5; /* DMAC 6 Byte */
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_IPV6_EXT:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 40; /* IPV6 hdr */
+ field->bytesm1 = 0; /* 1 Byte ext hdr*/
+ field->ltype_match = NPC_LT_LC_IP6_EXT;
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_GTPU:
+ field->lid = NPC_LID_LE;
+ field->hdr_offset = 4;
+ field->bytesm1 = 3; /* 4 bytes TID*/
+ field->ltype_match = NPC_LT_LE_GTPU;
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_VLAN:
+ field->lid = NPC_LID_LB;
+ field->hdr_offset = 2; /* Skip TPID (2-bytes) */
+ field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
+ field->ltype_match = NPC_LT_LB_CTAG;
+ field->ltype_mask = 0xF;
+ field->fn_mask = 1; /* Mask out the first nibble */
+ break;
+ case NIX_FLOW_KEY_TYPE_AH:
+ case NIX_FLOW_KEY_TYPE_ESP:
+ field->hdr_offset = 0;
+ field->bytesm1 = 7; /* SPI + sequence number */
+ field->ltype_mask = 0xF;
+ field->lid = NPC_LID_LE;
+ field->ltype_match = NPC_LT_LE_ESP;
+ if (key_type == NIX_FLOW_KEY_TYPE_AH) {
+ field->lid = NPC_LID_LD;
+ field->ltype_match = NPC_LT_LD_AH;
+ field->hdr_offset = 4;
+ keyoff_marker = false;
+ }
+ break;
+ }
+ field->ena = 1;
+
+ /* Found a valid flow key type */
+ if (valid_key) {
+ /* Use the key offset of TCP/UDP/SCTP fields
+ * for ESP/AH fields.
+ */
+ if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
+ key_type == NIX_FLOW_KEY_TYPE_AH)
+ key_off = l4_key_offset;
+ field->key_offset = key_off;
+ memcpy(&alg[nr_field], field, sizeof(*field));
+ max_key_off = max(max_key_off, field->bytesm1 + 1);
+
+ /* Found a field marker, get the next field */
+ if (field_marker)
+ nr_field++;
+ }
+
+ /* Found a keyoff marker, update the new key_off */
+ if (keyoff_marker) {
+ key_off += max_key_off;
+ max_key_off = 0;
+ }
+ }
+ /* Processed all the flow key types */
+ if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
+ return 0;
+ else
+ return NIX_AF_ERR_RSS_NOSPC_FIELD;
+}
+
+static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
+{
+ u64 field[FIELDS_PER_ALG];
+ struct nix_hw *hw;
+ int fid, rc;
+
+ hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ /* No room to add new flow hash algoritham */
+ if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
+ return NIX_AF_ERR_RSS_NOSPC_ALGO;
+
+ /* Generate algo fields for the given flow_cfg */
+ rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
+ if (rc)
+ return rc;
+
+ /* Update ALGX_FIELDX register with generated fields */
+ for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
+ fid), field[fid]);
+
+ /* Store the flow_cfg for futher lookup */
+ rc = hw->flowkey.in_use;
+ hw->flowkey.flowkey[rc] = flow_cfg;
+ hw->flowkey.in_use++;
+
+ return rc;
+}
+
+int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
+ struct nix_rss_flowkey_cfg *req,
+ struct nix_rss_flowkey_cfg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int alg_idx, nixlf, blkaddr;
+ struct nix_hw *nix_hw;
+ int err;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
+ /* Failed to get algo index from the exiting list, reserve new */
+ if (alg_idx < 0) {
+ alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
+ req->flowkey_cfg);
+ if (alg_idx < 0)
+ return alg_idx;
+ }
+ rsp->alg_idx = alg_idx;
+ rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
+ alg_idx, req->mcam_index);
+ return 0;
+}
+
+static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
+{
+ u32 flowkey_cfg, minkey_cfg;
+ int alg, fid, rc;
+
+ /* Disable all flow key algx fieldx */
+ for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
+ for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
+ 0);
+ }
+
+ /* IPv4/IPv6 SIP/DIPs */
+ flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
+
+ /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+ minkey_cfg = flowkey_cfg;
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
+
+ /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
+
+ /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
+
+ /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+ NIX_FLOW_KEY_TYPE_UDP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
+
+ /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+ NIX_FLOW_KEY_TYPE_SCTP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
+
+ /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
+ NIX_FLOW_KEY_TYPE_SCTP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
+
+ /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+ NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
+ struct nix_set_mac_addr *req,
+ struct msg_rsp *rsp)
+{
+ bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* untrusted VF can't overwrite admin(PF) changes */
+ if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
+ (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
+ dev_warn(rvu->dev,
+ "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
+ return -EPERM;
+ }
+
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+
+ rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, req->mac_addr);
+
+ if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
+ ether_addr_copy(pfvf->default_mac, req->mac_addr);
+
+ rvu_switch_update_rules(rvu, pcifunc);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
+ struct msg_req *req,
+ struct nix_get_mac_addr_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+
+ if (!is_nixlf_attached(rvu, pcifunc))
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
+ struct msg_rsp *rsp)
+{
+ bool allmulti, promisc, nix_rx_multicast;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int nixlf, err;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
+ allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
+ pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
+
+ nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
+
+ if (is_vf(pcifunc) && !nix_rx_multicast &&
+ (promisc || allmulti)) {
+ dev_warn_ratelimited(rvu->dev,
+ "VF promisc/multicast not supported\n");
+ return 0;
+ }
+
+ /* untrusted VF can't configure promisc/allmulti */
+ if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
+ (promisc || allmulti))
+ return 0;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (err)
+ return err;
+
+ if (nix_rx_multicast) {
+ /* add/del this PF_FUNC to/from mcast pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
+ allmulti);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update pcifunc 0x%x to multicast list\n",
+ pcifunc);
+ return err;
+ }
+
+ /* add/del this PF_FUNC to/from promisc pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
+ promisc);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update pcifunc 0x%x to promisc list\n",
+ pcifunc);
+ return err;
+ }
+ }
+
+ /* install/uninstall allmulti entry */
+ if (allmulti) {
+ rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base);
+ } else {
+ if (!nix_rx_multicast)
+ rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
+ }
+
+ /* install/uninstall promisc entry */
+ if (promisc)
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base,
+ pfvf->rx_chan_cnt);
+ else
+ if (!nix_rx_multicast)
+ rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
+
+ return 0;
+}
+
+static void nix_find_link_frs(struct rvu *rvu,
+ struct nix_frs_cfg *req, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ struct rvu_pfvf *pfvf;
+ int maxlen, minlen;
+ int numvfs, hwvf;
+ int vf;
+
+ /* Update with requester's min/max lengths */
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ pfvf->maxlen = req->maxlen;
+ if (req->update_minlen)
+ pfvf->minlen = req->minlen;
+
+ maxlen = req->maxlen;
+ minlen = req->update_minlen ? req->minlen : 0;
+
+ /* Get this PF's numVFs and starting hwvf */
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+
+ /* For each VF, compare requested max/minlen */
+ for (vf = 0; vf < numvfs; vf++) {
+ pfvf = &rvu->hwvf[hwvf + vf];
+ if (pfvf->maxlen > maxlen)
+ maxlen = pfvf->maxlen;
+ if (req->update_minlen &&
+ pfvf->minlen && pfvf->minlen < minlen)
+ minlen = pfvf->minlen;
+ }
+
+ /* Compare requested max/minlen with PF's max/minlen */
+ pfvf = &rvu->pf[pf];
+ if (pfvf->maxlen > maxlen)
+ maxlen = pfvf->maxlen;
+ if (req->update_minlen &&
+ pfvf->minlen && pfvf->minlen < minlen)
+ minlen = pfvf->minlen;
+
+ /* Update the request with max/min PF's and it's VF's max/min */
+ req->maxlen = maxlen;
+ if (req->update_minlen)
+ req->minlen = minlen;
+}
+
+int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+ int blkaddr, link = -1;
+ struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
+ u8 cgx = 0, lmac = 0;
+ u16 max_mtu;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ if (is_afvf(pcifunc))
+ rvu_get_lbk_link_max_frs(rvu, &max_mtu);
+ else
+ rvu_get_lmac_link_max_frs(rvu, &max_mtu);
+
+ if (!req->sdp_link && req->maxlen > max_mtu)
+ return NIX_AF_ERR_FRS_INVALID;
+
+ if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
+ return NIX_AF_ERR_FRS_INVALID;
+
+ /* Check if config is for SDP link */
+ if (req->sdp_link) {
+ if (!hw->sdp_links)
+ return NIX_AF_ERR_RX_LINK_INVALID;
+ link = hw->cgx_links + hw->lbk_links;
+ goto linkcfg;
+ }
+
+ /* Check if the request is from CGX mapped RVU PF */
+ if (is_pf_cgxmapped(rvu, pf)) {
+ /* Get CGX and LMAC to which this PF is mapped and find link */
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
+ link = (cgx * hw->lmac_per_cgx) + lmac;
+ } else if (pf == 0) {
+ /* For VFs of PF0 ingress is LBK port, so config LBK link */
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ link = hw->cgx_links + pfvf->lbkid;
+ }
+
+ if (link < 0)
+ return NIX_AF_ERR_RX_LINK_INVALID;
+
+linkcfg:
+ nix_find_link_frs(rvu, req, pcifunc);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
+ cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
+ if (req->update_minlen)
+ cfg = (cfg & ~0xFFFFULL) | req->minlen;
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
+ struct msg_rsp *rsp)
+{
+ int nixlf, blkaddr, err;
+ u64 cfg;
+
+ err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
+ /* Set the interface configuration */
+ if (req->len_verify & BIT(0))
+ cfg |= BIT_ULL(41);
+ else
+ cfg &= ~BIT_ULL(41);
+
+ if (req->len_verify & BIT(1))
+ cfg |= BIT_ULL(40);
+ else
+ cfg &= ~BIT_ULL(40);
+
+ if (req->csum_verify & BIT(0))
+ cfg |= BIT_ULL(37);
+ else
+ cfg &= ~BIT_ULL(37);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
+
+ return 0;
+}
+
+static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
+{
+ return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
+}
+
+static void nix_link_config(struct rvu *rvu, int blkaddr,
+ struct nix_hw *nix_hw)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int cgx, lmac_cnt, slink, link;
+ u16 lbk_max_frs, lmac_max_frs;
+ unsigned long lmac_bmap;
+ u64 tx_credits, cfg;
+ u64 lmac_fifo_len;
+ int iter;
+
+ rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
+ rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
+
+ /* Set default min/max packet lengths allowed on NIX Rx links.
+ *
+ * With HW reset minlen value of 60byte, HW will treat ARP pkts
+ * as undersize and report them to SW as error pkts, hence
+ * setting it to 40 bytes.
+ */
+ for (link = 0; link < hw->cgx_links; link++) {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
+ }
+
+ for (link = hw->cgx_links; link < hw->lbk_links; link++) {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
+ }
+ if (hw->sdp_links) {
+ link = hw->cgx_links + hw->lbk_links;
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ }
+
+ /* Get MCS external bypass status for CN10K-B */
+ if (mcs_get_blkcnt() == 1) {
+ /* Adjust for 2 credits when external bypass is disabled */
+ nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
+ }
+
+ /* Set credits for Tx links assuming max packet length allowed.
+ * This will be reconfigured based on MTU set for PF/VF.
+ */
+ for (cgx = 0; cgx < hw->cgx; cgx++) {
+ lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ /* Skip when cgx is not available or lmac cnt is zero */
+ if (lmac_cnt <= 0)
+ continue;
+ slink = cgx * hw->lmac_per_cgx;
+
+ /* Get LMAC id's from bitmap */
+ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+ for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
+ if (!lmac_fifo_len) {
+ dev_err(rvu->dev,
+ "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
+ __func__, cgx, iter);
+ continue;
+ }
+ tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
+ /* Enable credits and set credit pkt count to max allowed */
+ cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt);
+
+ link = iter + slink;
+ nix_hw->tx_credits[link] = tx_credits;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
+ }
+ }
+
+ /* Set Tx credits for LBK link */
+ slink = hw->cgx_links;
+ for (link = slink; link < (slink + hw->lbk_links); link++) {
+ tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
+ nix_hw->tx_credits[link] = tx_credits;
+ /* Enable credits and set credit pkt count to max allowed */
+ tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
+ }
+}
+
+static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
+{
+ int idx, err;
+ u64 status;
+
+ /* Start X2P bus calibration */
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
+ /* Wait for calibration to complete */
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_STATUS, BIT_ULL(10), false);
+ if (err) {
+ dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
+ return err;
+ }
+
+ status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
+ /* Check if CGX devices are ready */
+ for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
+ /* Skip when cgx port is not available */
+ if (!rvu_cgx_pdata(idx, rvu) ||
+ (status & (BIT_ULL(16 + idx))))
+ continue;
+ dev_err(rvu->dev,
+ "CGX%d didn't respond to NIX X2P calibration\n", idx);
+ err = -EBUSY;
+ }
+
+ /* Check if LBK is ready */
+ if (!(status & BIT_ULL(19))) {
+ dev_err(rvu->dev,
+ "LBK didn't respond to NIX X2P calibration\n");
+ err = -EBUSY;
+ }
+
+ /* Clear 'calibrate_x2p' bit */
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
+ if (err || (status & 0x3FFULL))
+ dev_err(rvu->dev,
+ "NIX X2P calibration failed, status 0x%llx\n", status);
+ if (err)
+ return err;
+ return 0;
+}
+
+static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
+{
+ u64 cfg;
+ int err;
+
+ /* Set admin queue endianness */
+ cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
+#ifdef __BIG_ENDIAN
+ cfg |= BIT_ULL(8);
+ rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
+#else
+ cfg &= ~BIT_ULL(8);
+ rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
+#endif
+
+ /* Do not bypass NDC cache */
+ cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
+ cfg &= ~0x3FFEULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+ /* Disable caching of SQB aka SQEs */
+ cfg |= 0x04ULL;
+#endif
+ rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
+
+ /* Result structure can be followed by RQ/SQ/CQ context at
+ * RES + 128bytes and a write mask at RES + 256 bytes, depending on
+ * operation type. Alloc sufficient result memory for all operations.
+ */
+ err = rvu_aq_alloc(rvu, &block->aq,
+ Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
+ ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
+ rvu_write64(rvu, block->addr,
+ NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
+ return 0;
+}
+
+static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 hw_const;
+
+ hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+
+ /* On OcteonTx2 DWRR quantum is directly configured into each of
+ * the transmit scheduler queues. And PF/VF drivers were free to
+ * config any value upto 2^24.
+ * On CN10K, HW is modified, the quantum configuration at scheduler
+ * queues is in terms of weight. And SW needs to setup a base DWRR MTU
+ * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
+ * 'DWRR MTU * weight' to get the quantum.
+ *
+ * Check if HW uses a common MTU for all DWRR quantum configs.
+ * On OcteonTx2 this register field is '0'.
+ */
+ if (((hw_const >> 56) & 0x10) == 0x10)
+ hw->cap.nix_common_dwrr_mtu = true;
+}
+
+static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ const struct npc_lt_def_cfg *ltdefs;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr = nix_hw->blkaddr;
+ struct rvu_block *block;
+ int err;
+ u64 cfg;
+
+ block = &hw->block[blkaddr];
+
+ if (is_rvu_96xx_B0(rvu)) {
+ /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
+ * internal state when conditional clocks are turned off.
+ * Hence enable them.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
+
+ /* Set chan/link to backpressure TL3 instead of TL2 */
+ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+ * This sticky mode is known to cause SQ stalls when multiple
+ * SQs are mapped to same SMQ and transmitting pkts at a time.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+ cfg &= ~BIT_ULL(15);
+ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
+ }
+
+ ltdefs = rvu->kpu.lt_def;
+ /* Calibrate X2P bus to check if CGX/LBK links are fine */
+ err = nix_calibrate_x2p(rvu, blkaddr);
+ if (err)
+ return err;
+
+ /* Setup capabilities of the NIX block */
+ rvu_nix_setup_capabilities(rvu, blkaddr);
+
+ /* Initialize admin queue */
+ err = nix_aq_init(rvu, block);
+ if (err)
+ return err;
+
+ /* Restore CINT timer delay to HW reset values */
+ rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
+
+ /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
+ cfg |= 1ULL;
+ if (!is_rvu_otx2(rvu))
+ cfg |= NIX_PTP_1STEP_EN;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
+
+ if (is_block_implemented(hw, blkaddr)) {
+ err = nix_setup_txschq(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ err = nix_setup_mcast(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ err = nix_setup_txvlan(rvu, nix_hw);
+ if (err)
+ return err;
+
+ /* Configure segmentation offload formats */
+ nix_setup_lso(rvu, nix_hw, blkaddr);
+
+ /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
+ * This helps HW protocol checker to identify headers
+ * and validate length and checksums.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
+ (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
+ ltdefs->rx_ol2.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
+ (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
+ ltdefs->rx_oip4.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
+ (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
+ ltdefs->rx_iip4.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
+ (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
+ ltdefs->rx_oip6.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
+ (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
+ ltdefs->rx_iip6.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
+ (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
+ ltdefs->rx_otcp.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
+ (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
+ ltdefs->rx_itcp.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
+ (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
+ ltdefs->rx_oudp.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
+ (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
+ ltdefs->rx_iudp.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
+ (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
+ ltdefs->rx_osctp.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
+ (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
+ ltdefs->rx_isctp.ltype_mask);
+
+ if (!is_rvu_otx2(rvu)) {
+ /* Enable APAD calculation for other protocols
+ * matching APAD0 and APAD1 lt def registers.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
+ (ltdefs->rx_apad0.valid << 11) |
+ (ltdefs->rx_apad0.lid << 8) |
+ (ltdefs->rx_apad0.ltype_match << 4) |
+ ltdefs->rx_apad0.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
+ (ltdefs->rx_apad1.valid << 11) |
+ (ltdefs->rx_apad1.lid << 8) |
+ (ltdefs->rx_apad1.ltype_match << 4) |
+ ltdefs->rx_apad1.ltype_mask);
+
+ /* Receive ethertype defination register defines layer
+ * information in NPC_RESULT_S to identify the Ethertype
+ * location in L2 header. Used for Ethertype overwriting
+ * in inline IPsec flow.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
+ (ltdefs->rx_et[0].offset << 12) |
+ (ltdefs->rx_et[0].valid << 11) |
+ (ltdefs->rx_et[0].lid << 8) |
+ (ltdefs->rx_et[0].ltype_match << 4) |
+ ltdefs->rx_et[0].ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
+ (ltdefs->rx_et[1].offset << 12) |
+ (ltdefs->rx_et[1].valid << 11) |
+ (ltdefs->rx_et[1].lid << 8) |
+ (ltdefs->rx_et[1].ltype_match << 4) |
+ ltdefs->rx_et[1].ltype_mask);
+ }
+
+ err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
+ if (err)
+ return err;
+
+ nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
+ sizeof(u64), GFP_KERNEL);
+ if (!nix_hw->tx_credits)
+ return -ENOMEM;
+
+ /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
+ nix_link_config(rvu, blkaddr, nix_hw);
+
+ /* Enable Channel backpressure */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
+ }
+ return 0;
+}
+
+int rvu_nix_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_hw *nix_hw;
+ int blkaddr = 0, err;
+ int i = 0;
+
+ hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
+ GFP_KERNEL);
+ if (!hw->nix)
+ return -ENOMEM;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ nix_hw = &hw->nix[i];
+ nix_hw->rvu = rvu;
+ nix_hw->blkaddr = blkaddr;
+ err = rvu_nix_block_init(rvu, nix_hw);
+ if (err)
+ return err;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ i++;
+ }
+
+ return 0;
+}
+
+static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
+ struct rvu_block *block)
+{
+ struct nix_txsch *txsch;
+ struct nix_mcast *mcast;
+ struct nix_txvlan *vlan;
+ struct nix_hw *nix_hw;
+ int lvl;
+
+ rvu_aq_free(rvu, block->aq);
+
+ if (is_block_implemented(rvu->hw, blkaddr)) {
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ kfree(txsch->schq.bmap);
+ }
+
+ kfree(nix_hw->tx_credits);
+
+ nix_ipolicer_freemem(rvu, nix_hw);
+
+ vlan = &nix_hw->txvlan;
+ kfree(vlan->rsrc.bmap);
+ mutex_destroy(&vlan->rsrc_lock);
+
+ mcast = &nix_hw->mcast;
+ qmem_free(rvu->dev, mcast->mce_ctx);
+ qmem_free(rvu->dev, mcast->mcast_buf);
+ mutex_destroy(&mcast->mce_lock);
+ }
+}
+
+void rvu_nix_freemem(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr = 0;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ block = &hw->block[blkaddr];
+ rvu_nix_block_freemem(rvu, blkaddr, block);
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+}
+
+int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int nixlf, err;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (err)
+ return err;
+
+ rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
+
+ npc_mcam_enable_flows(rvu, pcifunc);
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ set_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
+ rvu_switch_update_rules(rvu, pcifunc);
+
+ return rvu_cgx_start_stop_io(rvu, pcifunc, true);
+}
+
+int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int nixlf, err;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (err)
+ return err;
+
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
+ err = rvu_cgx_start_stop_io(rvu, pcifunc, false);
+ if (err)
+ return err;
+
+ rvu_cgx_tx_enable(rvu, pcifunc, true);
+
+ return 0;
+}
+
+#define RX_SA_BASE GENMASK_ULL(52, 7)
+
+void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct hwctx_disable_req ctx_req;
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ u64 sa_base;
+ void *cgxd;
+ int err;
+
+ ctx_req.hdr.pcifunc = pcifunc;
+
+ /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
+ nix_interface_deinit(rvu, pcifunc, nixlf);
+ nix_rx_sync(rvu, blkaddr);
+ nix_txschq_free(rvu, pcifunc);
+
+ clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
+ rvu_cgx_start_stop_io(rvu, pcifunc, false);
+
+ if (pfvf->sq_ctx) {
+ ctx_req.ctype = NIX_AQ_CTYPE_SQ;
+ err = nix_lf_hwctx_disable(rvu, &ctx_req);
+ if (err)
+ dev_err(rvu->dev, "SQ ctx disable failed\n");
+ }
+
+ if (pfvf->rq_ctx) {
+ ctx_req.ctype = NIX_AQ_CTYPE_RQ;
+ err = nix_lf_hwctx_disable(rvu, &ctx_req);
+ if (err)
+ dev_err(rvu->dev, "RQ ctx disable failed\n");
+ }
+
+ if (pfvf->cq_ctx) {
+ ctx_req.ctype = NIX_AQ_CTYPE_CQ;
+ err = nix_lf_hwctx_disable(rvu, &ctx_req);
+ if (err)
+ dev_err(rvu->dev, "CQ ctx disable failed\n");
+ }
+
+ /* reset HW config done for Switch headers */
+ rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
+ (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
+
+ /* Disabling CGX and NPC config done for PTP */
+ if (pfvf->hw_rx_tstamp_en) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
+ /* Undo NPC config done for PTP */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
+ dev_err(rvu->dev, "NPC config for PTP failed\n");
+ pfvf->hw_rx_tstamp_en = false;
+ }
+
+ /* reset priority flow control config */
+ rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
+
+ /* reset 802.3x flow control config */
+ rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
+
+ nix_ctx_free(rvu, pfvf);
+
+ nix_free_all_bandprof(rvu, pcifunc);
+
+ sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
+ if (FIELD_GET(RX_SA_BASE, sa_base)) {
+ err = rvu_cpt_ctx_flush(rvu, pcifunc);
+ if (err)
+ dev_err(rvu->dev,
+ "CPT ctx flush failed with error: %d\n", err);
+ }
+}
+
+#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
+
+static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr, pf;
+ int nixlf;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
+
+ if (enable)
+ cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
+ else
+ cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
+}
+
+int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
+}
+
+int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
+ struct nix_lso_format_cfg *req,
+ struct nix_lso_format_cfg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, idx, f;
+ u64 reg;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ /* Find existing matching LSO format, if any */
+ for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
+ for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
+ reg = rvu_read64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(idx, f));
+ if (req->fields[f] != (reg & req->field_mask))
+ break;
+ }
+
+ if (f == NIX_LSO_FIELD_MAX)
+ break;
+ }
+
+ if (idx < nix_hw->lso.in_use) {
+ /* Match found */
+ rsp->lso_format_idx = idx;
+ return 0;
+ }
+
+ if (nix_hw->lso.in_use == nix_hw->lso.total)
+ return NIX_AF_ERR_LSO_CFG_FAIL;
+
+ rsp->lso_format_idx = nix_hw->lso.in_use++;
+
+ for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
+ req->fields[f]);
+
+ return 0;
+}
+
+#define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48)
+#define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32)
+#define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16)
+#define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0)
+
+#define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24)
+#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
+#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
+
+static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
+ int blkaddr)
+{
+ u8 cpt_idx, cpt_blkaddr;
+ u64 val;
+
+ cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
+ if (req->enable) {
+ val = 0;
+ /* Enable context prefetching */
+ if (!is_rvu_otx2(rvu))
+ val |= BIT_ULL(51);
+
+ /* Set OPCODE and EGRP */
+ val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
+
+ /* Set CPT queue for inline IPSec */
+ val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
+ val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
+ req->inst_qsel.cpt_pf_func);
+
+ if (!is_rvu_otx2(rvu)) {
+ cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
+ BLKADDR_CPT1;
+ val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
+ }
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+ val);
+
+ /* Set CPT credit */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ req->cpt_credit);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+ 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ 0x3FFFFF);
+ }
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
+ struct nix_inline_ipsec_cfg *req,
+ struct msg_rsp *rsp)
+{
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
+ nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
+ struct nix_inline_ipsec_lf_cfg *req,
+ struct msg_rsp *rsp)
+{
+ int lf, blkaddr, err;
+ u64 val;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
+ if (err)
+ return err;
+
+ if (req->enable) {
+ /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
+ val = (u64)req->ipsec_cfg0.tt << 44 |
+ (u64)req->ipsec_cfg0.tag_const << 20 |
+ (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
+ req->ipsec_cfg0.lenm1_max;
+
+ if (blkaddr == BLKADDR_NIX1)
+ val |= BIT_ULL(46);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
+
+ /* Set SA_IDX_W and SA_IDX_MAX */
+ val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
+ req->ipsec_cfg1.sa_idx_max;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
+
+ /* Set SA base address */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ req->sa_base_addr);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ 0x0);
+ }
+
+ return 0;
+}
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
+{
+ bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
+
+ /* overwrite vf mac address with default_mac */
+ if (from_vf)
+ ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
+}
+
+/* NIX ingress policers or bandwidth profiles APIs */
+static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
+{
+ struct npc_lt_def_cfg defs, *ltdefs;
+
+ ltdefs = &defs;
+ memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
+
+ /* Extract PCP and DEI fields from outer VLAN from byte offset
+ * 2 from the start of LB_PTR (ie TAG).
+ * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
+ * fields are considered when 'Tunnel enable' is set in profile.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
+ (2UL << 12) | (ltdefs->ovlan.lid << 8) |
+ (ltdefs->ovlan.ltype_match << 4) |
+ ltdefs->ovlan.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
+ (2UL << 12) | (ltdefs->ivlan.lid << 8) |
+ (ltdefs->ivlan.ltype_match << 4) |
+ ltdefs->ivlan.ltype_mask);
+
+ /* DSCP field in outer and tunneled IPv4 packets */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
+ (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
+ (ltdefs->rx_oip4.ltype_match << 4) |
+ ltdefs->rx_oip4.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
+ (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
+ (ltdefs->rx_iip4.ltype_match << 4) |
+ ltdefs->rx_iip4.ltype_mask);
+
+ /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
+ (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
+ (ltdefs->rx_oip6.ltype_match << 4) |
+ ltdefs->rx_oip6.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
+ (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
+ (ltdefs->rx_iip6.ltype_match << 4) |
+ ltdefs->rx_iip6.ltype_mask);
+}
+
+static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
+ int layer, int prof_idx)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ int rc;
+
+ memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+
+ aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
+ aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req.op = NIX_AQ_INSTOP_INIT;
+
+ /* Context is all zeros, submit to AQ */
+ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)&aq_req, NULL);
+ if (rc)
+ dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
+ layer, prof_idx);
+ return rc;
+}
+
+static int nix_setup_ipolicers(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_ipolicer *ipolicer;
+ int err, layer, prof_idx;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ if (!(cfg & BIT_ULL(61))) {
+ hw->cap.ipolicer = false;
+ return 0;
+ }
+
+ hw->cap.ipolicer = true;
+ nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
+ sizeof(*ipolicer), GFP_KERNEL);
+ if (!nix_hw->ipolicer)
+ return -ENOMEM;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ ipolicer = &nix_hw->ipolicer[layer];
+ switch (layer) {
+ case BAND_PROF_LEAF_LAYER:
+ ipolicer->band_prof.max = cfg & 0XFFFF;
+ break;
+ case BAND_PROF_MID_LAYER:
+ ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
+ break;
+ case BAND_PROF_TOP_LAYER:
+ ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
+ break;
+ }
+
+ if (!ipolicer->band_prof.max)
+ continue;
+
+ err = rvu_alloc_bitmap(&ipolicer->band_prof);
+ if (err)
+ return err;
+
+ ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->pfvf_map)
+ return -ENOMEM;
+
+ ipolicer->match_id = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->match_id)
+ return -ENOMEM;
+
+ for (prof_idx = 0;
+ prof_idx < ipolicer->band_prof.max; prof_idx++) {
+ /* Set AF as current owner for INIT ops to succeed */
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+
+ /* There is no enable bit in the profile context,
+ * so no context disable. So let's INIT them here
+ * so that PF/VF later on have to just do WRITE to
+ * setup policer rates and config.
+ */
+ err = nix_init_policer_context(rvu, nix_hw,
+ layer, prof_idx);
+ if (err)
+ return err;
+ }
+
+ /* Allocate memory for maintaining ref_counts for MID level
+ * profiles, this will be needed for leaf layer profiles'
+ * aggregation.
+ */
+ if (layer != BAND_PROF_MID_LAYER)
+ continue;
+
+ ipolicer->ref_count = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->ref_count)
+ return -ENOMEM;
+ }
+
+ /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
+ rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
+
+ nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
+
+ return 0;
+}
+
+static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ struct nix_ipolicer *ipolicer;
+ int layer;
+
+ if (!rvu->hw->cap.ipolicer)
+ return;
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ if (!ipolicer->band_prof.max)
+ continue;
+
+ kfree(ipolicer->band_prof.bmap);
+ }
+}
+
+static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
+ struct nix_hw *nix_hw, u16 pcifunc)
+{
+ struct nix_ipolicer *ipolicer;
+ int layer, hi_layer, prof_idx;
+
+ /* Bits [15:14] in profile index represent layer */
+ layer = (req->qidx >> 14) & 0x03;
+ prof_idx = req->qidx & 0x3FFF;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ if (prof_idx >= ipolicer->band_prof.max)
+ return -EINVAL;
+
+ /* Check if the profile is allocated to the requesting PCIFUNC or not
+ * with the exception of AF. AF is allowed to read and update contexts.
+ */
+ if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
+ return -EINVAL;
+
+ /* If this profile is linked to higher layer profile then check
+ * if that profile is also allocated to the requesting PCIFUNC
+ * or not.
+ */
+ if (!req->prof.hl_en)
+ return 0;
+
+ /* Leaf layer profile can link only to mid layer and
+ * mid layer to top layer.
+ */
+ if (layer == BAND_PROF_LEAF_LAYER)
+ hi_layer = BAND_PROF_MID_LAYER;
+ else if (layer == BAND_PROF_MID_LAYER)
+ hi_layer = BAND_PROF_TOP_LAYER;
+ else
+ return -EINVAL;
+
+ ipolicer = &nix_hw->ipolicer[hi_layer];
+ prof_idx = req->prof.band_prof_id;
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+ return -EINVAL;
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
+ struct nix_bandprof_alloc_req *req,
+ struct nix_bandprof_alloc_rsp *rsp)
+{
+ int blkaddr, layer, prof, idx, err;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ if (!req->prof_count[layer])
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
+
+ prof = rvu_alloc_rsrc(&ipolicer->band_prof);
+ if (prof < 0)
+ break;
+ rsp->prof_count[layer]++;
+ rsp->prof_idx[layer][idx] = prof;
+ ipolicer->pfvf_map[prof] = pcifunc;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, layer, prof_idx, err;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free all the profiles allocated to the PCIFUNC */
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
+ if (ipolicer->pfvf_map[prof_idx] != pcifunc)
+ continue;
+
+ /* Clear ratelimit aggregation, if any */
+ if (layer == BAND_PROF_LEAF_LAYER &&
+ ipolicer->match_id[prof_idx])
+ nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
+
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
+ struct nix_bandprof_free_req *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr, layer, prof_idx, idx, err;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (req->free_all)
+ return nix_free_all_bandprof(rvu, pcifunc);
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free the requested profile indices */
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ if (!req->prof_count[layer])
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
+ prof_idx = req->prof_idx[layer][idx];
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+ continue;
+
+ /* Clear ratelimit aggregation, if any */
+ if (layer == BAND_PROF_LEAF_LAYER &&
+ ipolicer->match_id[prof_idx])
+ nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
+
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u16 pcifunc, u8 ctype, u32 qidx)
+{
+ memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req->hdr.pcifunc = pcifunc;
+ aq_req->ctype = ctype;
+ aq_req->op = NIX_AQ_INSTOP_READ;
+ aq_req->qidx = qidx;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)aq_req,
+ (struct nix_aq_enq_rsp *)aq_rsp);
+}
+
+static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u32 leaf_prof, u16 mid_prof)
+{
+ memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req->hdr.pcifunc = 0x00;
+ aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req->op = NIX_AQ_INSTOP_WRITE;
+ aq_req->qidx = leaf_prof;
+
+ aq_req->prof.band_prof_id = mid_prof;
+ aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
+ aq_req->prof.hl_en = 1;
+ aq_req->prof_mask.hl_en = 1;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)aq_req,
+ (struct nix_aq_enq_rsp *)aq_rsp);
+}
+
+int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
+ u16 rq_idx, u16 match_id)
+{
+ int leaf_prof, mid_prof, leaf_match;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+ int blkaddr, idx, rc;
+
+ if (!rvu->hw->cap.ipolicer)
+ return 0;
+
+ rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (rc)
+ return rc;
+
+ /* Fetch the RQ's context to see if policing is enabled */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
+ NIX_AQ_CTYPE_RQ, rq_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
+ __func__, rq_idx, pcifunc);
+ return rc;
+ }
+
+ if (!aq_rsp.rq.policer_ena)
+ return 0;
+
+ /* Get the bandwidth profile ID mapped to this RQ */
+ leaf_prof = aq_rsp.rq.band_prof_id;
+
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
+ ipolicer->match_id[leaf_prof] = match_id;
+
+ /* Check if any other leaf profile is marked with same match_id */
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (idx == leaf_prof)
+ continue;
+ if (ipolicer->match_id[idx] != match_id)
+ continue;
+
+ leaf_match = idx;
+ break;
+ }
+
+ if (idx == ipolicer->band_prof.max)
+ return 0;
+
+ /* Fetch the matching profile's context to check if it's already
+ * mapped to a mid level profile.
+ */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_match);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_match);
+ return rc;
+ }
+
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
+ if (aq_rsp.prof.hl_en) {
+ /* Get Mid layer prof index and map leaf_prof index
+ * also such that flows that are being steered
+ * to different RQs and marked with same match_id
+ * are rate limited in a aggregate fashion
+ */
+ mid_prof = aq_rsp.prof.band_prof_id;
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_prof, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_prof, mid_prof);
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+ goto exit;
+ }
+
+ /* Allocate a mid layer profile and
+ * map both 'leaf_prof' and 'leaf_match' profiles to it.
+ */
+ mutex_lock(&rvu->rsrc_lock);
+ mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
+ if (mid_prof < 0) {
+ dev_err(rvu->dev,
+ "%s: Unable to allocate mid layer profile\n", __func__);
+ mutex_unlock(&rvu->rsrc_lock);
+ goto exit;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ ipolicer->pfvf_map[mid_prof] = 0x00;
+ ipolicer->ref_count[mid_prof] = 0;
+
+ /* Initialize mid layer profile same as 'leaf_prof' */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_prof);
+ goto exit;
+ }
+
+ memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req.hdr.pcifunc = 0x00;
+ aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
+ aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req.op = NIX_AQ_INSTOP_WRITE;
+ memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
+ memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
+ /* Clear higher layer enable bit in the mid profile, just in case */
+ aq_req.prof.hl_en = 0;
+ aq_req.prof_mask.hl_en = 1;
+
+ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)&aq_req, NULL);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to INIT context of mid layer profile %d\n",
+ __func__, mid_prof);
+ goto exit;
+ }
+
+ /* Map both leaf profiles to this mid layer profile */
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_prof, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_prof, mid_prof);
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_match, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_match, mid_prof);
+ ipolicer->ref_count[mid_prof]--;
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+
+exit:
+ return rc;
+}
+
+/* Called with mutex rsrc_lock */
+static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
+ u32 leaf_prof)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct nix_ipolicer *ipolicer;
+ u16 mid_prof;
+ int rc;
+
+ mutex_unlock(&rvu->rsrc_lock);
+
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_prof);
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_prof);
+ return;
+ }
+
+ if (!aq_rsp.prof.hl_en)
+ return;
+
+ mid_prof = aq_rsp.prof.band_prof_id;
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
+ ipolicer->ref_count[mid_prof]--;
+ /* If ref_count is zero, free mid layer profile */
+ if (!ipolicer->ref_count[mid_prof]) {
+ ipolicer->pfvf_map[mid_prof] = 0x00;
+ rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
+ }
+}
+
+int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
+ struct nix_bandprof_get_hwinfo_rsp *rsp)
+{
+ struct nix_ipolicer *ipolicer;
+ int blkaddr, layer, err;
+ struct nix_hw *nix_hw;
+ u64 tu;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ /* Return number of bandwidth profiles free at each layer */
+ mutex_lock(&rvu->rsrc_lock);
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ /* Set the policer timeunit in nanosec */
+ tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
+ rsp->policer_timeunit = (tu + 1) * 100;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
new file mode 100644
index 000000000..4f5ca5ab1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+
+static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ struct npa_aq_inst_s *inst)
+{
+ struct admin_queue *aq = block->aq;
+ struct npa_aq_res_s *result;
+ int timeout = 1000;
+ u64 reg, head;
+
+ result = (struct npa_aq_res_s *)aq->res->base;
+
+ /* Get current head pointer where to append this instruction */
+ reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
+ head = (reg >> 4) & AQ_PTR_MASK;
+
+ memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
+ (void *)inst, aq->inst->entry_sz);
+ memset(result, 0, sizeof(*result));
+ /* sync into memory */
+ wmb();
+
+ /* Ring the doorbell and wait for result */
+ rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
+ while (result->compcode == NPA_AQ_COMP_NOTDONE) {
+ cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ }
+
+ if (result->compcode != NPA_AQ_COMP_GOOD) {
+ /* TODO: Replace this with some error code */
+ if (result->compcode == NPA_AQ_COMP_CTX_FAULT ||
+ result->compcode == NPA_AQ_COMP_LOCKERR ||
+ result->compcode == NPA_AQ_COMP_CTX_POISON) {
+ if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0))
+ dev_err(rvu->dev,
+ "%s: Not able to unlock cachelines\n", __func__);
+ }
+
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, npalf, rc = 0;
+ struct npa_aq_inst_s inst;
+ struct rvu_block *block;
+ struct admin_queue *aq;
+ struct rvu_pfvf *pfvf;
+ void *ctx, *mask;
+ bool ena;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
+ return NPA_AF_ERR_AQ_ENQUEUE;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (!pfvf->npalf || blkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ aq = block->aq;
+ if (!aq) {
+ dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
+ return NPA_AF_ERR_AQ_ENQUEUE;
+ }
+
+ npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (npalf < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ memset(&inst, 0, sizeof(struct npa_aq_inst_s));
+ inst.cindex = req->aura_id;
+ inst.lf = npalf;
+ inst.ctype = req->ctype;
+ inst.op = req->op;
+ /* Currently we are not supporting enqueuing multiple instructions,
+ * so always choose first entry in result memory.
+ */
+ inst.res_addr = (u64)aq->res->iova;
+
+ /* Hardware uses same aq->res->base for updating result of
+ * previous instruction hence wait here till it is done.
+ */
+ spin_lock(&aq->lock);
+
+ /* Clean result + context memory */
+ memset(aq->res->base, 0, aq->res->entry_sz);
+ /* Context needs to be written at RES_ADDR + 128 */
+ ctx = aq->res->base + 128;
+ /* Mask needs to be written at RES_ADDR + 256 */
+ mask = aq->res->base + 256;
+
+ switch (req->op) {
+ case NPA_AQ_INSTOP_WRITE:
+ /* Copy context and write mask */
+ if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ memcpy(mask, &req->aura_mask,
+ sizeof(struct npa_aura_s));
+ memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
+ } else {
+ memcpy(mask, &req->pool_mask,
+ sizeof(struct npa_pool_s));
+ memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
+ }
+ break;
+ case NPA_AQ_INSTOP_INIT:
+ if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
+ rc = NPA_AF_ERR_AQ_FULL;
+ break;
+ }
+ /* Set pool's context address */
+ req->aura.pool_addr = pfvf->pool_ctx->iova +
+ (req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
+ memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
+ } else { /* POOL's context */
+ memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
+ }
+ break;
+ case NPA_AQ_INSTOP_NOP:
+ case NPA_AQ_INSTOP_READ:
+ case NPA_AQ_INSTOP_LOCK:
+ case NPA_AQ_INSTOP_UNLOCK:
+ break;
+ default:
+ rc = NPA_AF_ERR_AQ_FULL;
+ break;
+ }
+
+ if (rc) {
+ spin_unlock(&aq->lock);
+ return rc;
+ }
+
+ /* Submit the instruction to AQ */
+ rc = npa_aq_enqueue_wait(rvu, block, &inst);
+ if (rc) {
+ spin_unlock(&aq->lock);
+ return rc;
+ }
+
+ /* Set aura bitmap if aura hw context is enabled */
+ if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
+ __set_bit(req->aura_id, pfvf->aura_bmap);
+ if (req->op == NPA_AQ_INSTOP_WRITE) {
+ ena = (req->aura.ena & req->aura_mask.ena) |
+ (test_bit(req->aura_id, pfvf->aura_bmap) &
+ ~req->aura_mask.ena);
+ if (ena)
+ __set_bit(req->aura_id, pfvf->aura_bmap);
+ else
+ __clear_bit(req->aura_id, pfvf->aura_bmap);
+ }
+ }
+
+ /* Set pool bitmap if pool hw context is enabled */
+ if (req->ctype == NPA_AQ_CTYPE_POOL) {
+ if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
+ __set_bit(req->aura_id, pfvf->pool_bmap);
+ if (req->op == NPA_AQ_INSTOP_WRITE) {
+ ena = (req->pool.ena & req->pool_mask.ena) |
+ (test_bit(req->aura_id, pfvf->pool_bmap) &
+ ~req->pool_mask.ena);
+ if (ena)
+ __set_bit(req->aura_id, pfvf->pool_bmap);
+ else
+ __clear_bit(req->aura_id, pfvf->pool_bmap);
+ }
+ }
+ spin_unlock(&aq->lock);
+
+ if (rsp) {
+ /* Copy read context into mailbox */
+ if (req->op == NPA_AQ_INSTOP_READ) {
+ if (req->ctype == NPA_AQ_CTYPE_AURA)
+ memcpy(&rsp->aura, ctx,
+ sizeof(struct npa_aura_s));
+ else
+ memcpy(&rsp->pool, ctx,
+ sizeof(struct npa_pool_s));
+ }
+ }
+
+ return 0;
+}
+
+static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ struct npa_aq_enq_req aq_req;
+ unsigned long *bmap;
+ int id, cnt = 0;
+ int err = 0, rc;
+
+ if (!pfvf->pool_ctx || !pfvf->aura_ctx)
+ return NPA_AF_ERR_AQ_ENQUEUE;
+
+ memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
+ aq_req.hdr.pcifunc = req->hdr.pcifunc;
+
+ if (req->ctype == NPA_AQ_CTYPE_POOL) {
+ aq_req.pool.ena = 0;
+ aq_req.pool_mask.ena = 1;
+ cnt = pfvf->pool_ctx->qsize;
+ bmap = pfvf->pool_bmap;
+ } else if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ aq_req.aura.ena = 0;
+ aq_req.aura_mask.ena = 1;
+ aq_req.aura.bp_ena = 0;
+ aq_req.aura_mask.bp_ena = 1;
+ cnt = pfvf->aura_ctx->qsize;
+ bmap = pfvf->aura_bmap;
+ }
+
+ aq_req.ctype = req->ctype;
+ aq_req.op = NPA_AQ_INSTOP_WRITE;
+
+ for (id = 0; id < cnt; id++) {
+ if (!test_bit(id, bmap))
+ continue;
+ aq_req.aura_id = id;
+ rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
+ if (rc) {
+ err = rc;
+ dev_err(rvu->dev, "Failed to disable %s:%d context\n",
+ (req->ctype == NPA_AQ_CTYPE_AURA) ?
+ "Aura" : "Pool", id);
+ }
+ }
+
+ return err;
+}
+
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
+{
+ struct npa_aq_enq_req lock_ctx_req;
+ int err;
+
+ if (req->op != NPA_AQ_INSTOP_INIT)
+ return 0;
+
+ memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
+ lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
+ lock_ctx_req.ctype = req->ctype;
+ lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
+ lock_ctx_req.aura_id = req->aura_id;
+ err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
+ if (err)
+ dev_err(rvu->dev,
+ "PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
+ req->hdr.pcifunc,
+ (req->ctype == NPA_AQ_CTYPE_AURA) ?
+ "Aura" : "Pool", req->aura_id);
+ return err;
+}
+
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
+ struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
+{
+ int err;
+
+ err = rvu_npa_aq_enq_inst(rvu, req, rsp);
+ if (!err)
+ err = npa_lf_hwctx_lockdown(rvu, req);
+ return err;
+}
+#else
+
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
+ struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
+{
+ return rvu_npa_aq_enq_inst(rvu, req, rsp);
+}
+#endif
+
+int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
+ struct hwctx_disable_req *req,
+ struct msg_rsp *rsp)
+{
+ return npa_lf_hwctx_disable(rvu, req);
+}
+
+static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
+{
+ kfree(pfvf->aura_bmap);
+ pfvf->aura_bmap = NULL;
+
+ qmem_free(rvu->dev, pfvf->aura_ctx);
+ pfvf->aura_ctx = NULL;
+
+ kfree(pfvf->pool_bmap);
+ pfvf->pool_bmap = NULL;
+
+ qmem_free(rvu->dev, pfvf->pool_ctx);
+ pfvf->pool_ctx = NULL;
+
+ qmem_free(rvu->dev, pfvf->npa_qints_ctx);
+ pfvf->npa_qints_ctx = NULL;
+}
+
+int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
+ struct npa_lf_alloc_req *req,
+ struct npa_lf_alloc_rsp *rsp)
+{
+ int npalf, qints, hwctx_size, err, rc = 0;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ u64 cfg, ctx_cfg;
+ int blkaddr;
+
+ if (req->aura_sz > NPA_AURA_SZ_MAX ||
+ req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
+ return NPA_AF_ERR_PARAM;
+
+ if (req->way_mask)
+ req->way_mask &= 0xFFFF;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (!pfvf->npalf || blkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (npalf < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ /* Reset this NPA LF */
+ err = rvu_lf_reset(rvu, block, npalf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
+ return NPA_AF_ERR_LF_RESET;
+ }
+
+ ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
+
+ /* Alloc memory for aura HW contexts */
+ hwctx_size = 1UL << (ctx_cfg & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
+ NPA_AURA_COUNT(req->aura_sz), hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
+ GFP_KERNEL);
+ if (!pfvf->aura_bmap)
+ goto free_mem;
+
+ /* Alloc memory for pool HW contexts */
+ hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
+ GFP_KERNEL);
+ if (!pfvf->pool_bmap)
+ goto free_mem;
+
+ /* Get no of queue interrupts supported */
+ cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
+ qints = (cfg >> 28) & 0xFFF;
+
+ /* Alloc memory for Qints HW contexts */
+ hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
+ /* Clear way partition mask and set aura offset to '0' */
+ cfg &= ~(BIT_ULL(34) - 1);
+ /* Set aura size & enable caching of contexts */
+ cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
+
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
+
+ /* Configure aura HW context's base */
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
+ (u64)pfvf->aura_ctx->iova);
+
+ /* Enable caching of qints hw context */
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
+ BIT_ULL(36) | req->way_mask << 20);
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
+ (u64)pfvf->npa_qints_ctx->iova);
+
+ goto exit;
+
+free_mem:
+ npa_ctx_free(rvu, pfvf);
+ rc = -ENOMEM;
+
+exit:
+ /* set stack page info */
+ cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
+ rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
+ rsp->stack_pg_bytes = cfg & 0xFF;
+ rsp->qints = (cfg >> 28) & 0xFFF;
+ if (!is_rvu_otx2(rvu)) {
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
+ rsp->cache_lines = (cfg >> 1) & 0x3F;
+ }
+ return rc;
+}
+
+int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ int npalf, err;
+ int blkaddr;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (!pfvf->npalf || blkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (npalf < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ /* Reset this NPA LF */
+ err = rvu_lf_reset(rvu, block, npalf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
+ return NPA_AF_ERR_LF_RESET;
+ }
+
+ npa_ctx_free(rvu, pfvf);
+
+ return 0;
+}
+
+static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
+{
+ u64 cfg;
+ int err;
+
+ /* Set admin queue endianness */
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
+#ifdef __BIG_ENDIAN
+ cfg |= BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
+#else
+ cfg &= ~BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
+#endif
+
+ /* Do not bypass NDC cache */
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
+ cfg &= ~0x03DULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+ /* Disable caching of stack pages */
+ cfg |= 0x10ULL;
+#endif
+ rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
+
+ /* For CN10K NPA BATCH DMA set 35 cache lines */
+ if (!is_rvu_otx2(rvu)) {
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
+ cfg &= ~0x7EULL;
+ cfg |= BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NPA_AF_BATCH_CTL, cfg);
+ }
+ /* Result structure can be followed by Aura/Pool context at
+ * RES + 128bytes and a write mask at RES + 256 bytes, depending on
+ * operation type. Alloc sufficient result memory for all operations.
+ */
+ err = rvu_aq_alloc(rvu, &block->aq,
+ Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
+ ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
+ rvu_write64(rvu, block->addr,
+ NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
+ return 0;
+}
+
+int rvu_npa_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ /* Initialize admin queue */
+ return npa_aq_init(rvu, &hw->block[blkaddr]);
+}
+
+void rvu_npa_freemem(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ rvu_aq_free(rvu, block->aq);
+}
+
+void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct hwctx_disable_req ctx_req;
+
+ /* Disable all pools */
+ ctx_req.hdr.pcifunc = pcifunc;
+ ctx_req.ctype = NPA_AQ_CTYPE_POOL;
+ npa_lf_hwctx_disable(rvu, &ctx_req);
+
+ /* Disable all auras */
+ ctx_req.ctype = NPA_AQ_CTYPE_AURA;
+ npa_lf_hwctx_disable(rvu, &ctx_req);
+
+ npa_ctx_free(rvu, pfvf);
+}
+
+/* Due to an Hardware errata, in some corner cases, AQ context lock
+ * operations can result in a NDC way getting into an illegal state
+ * of not valid but locked.
+ *
+ * This API solves the problem by clearing the lock bit of the NDC block.
+ * The operation needs to be done for each line of all the NDC banks.
+ */
+int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr)
+{
+ int bank, max_bank, line, max_line, err;
+ u64 reg, ndc_af_const;
+
+ /* Set the ENABLE bit(63) to '0' */
+ reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
+ rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0));
+
+ /* Poll until the BUSY bits(47:32) are set to '0' */
+ err = rvu_poll_reg(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, GENMASK_ULL(47, 32), true);
+ if (err) {
+ dev_err(rvu->dev, "Timed out while polling for NDC CAM busy bits.\n");
+ return err;
+ }
+
+ ndc_af_const = rvu_read64(rvu, blkaddr, NDC_AF_CONST);
+ max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
+ max_line = FIELD_GET(NDC_AF_BANK_LINE_MASK, ndc_af_const);
+ for (bank = 0; bank < max_bank; bank++) {
+ for (line = 0; line < max_line; line++) {
+ /* Check if 'cache line valid bit(63)' is not set
+ * but 'cache line lock bit(60)' is set and on
+ * success, reset the lock bit(60).
+ */
+ reg = rvu_read64(rvu, blkaddr,
+ NDC_AF_BANKX_LINEX_METADATA(bank, line));
+ if (!(reg & BIT_ULL(63)) && (reg & BIT_ULL(60))) {
+ rvu_write64(rvu, blkaddr,
+ NDC_AF_BANKX_LINEX_METADATA(bank, line),
+ reg & ~BIT_ULL(60));
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
new file mode 100644
index 000000000..0bcf3e559
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -0,0 +1,3463 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "cgx.h"
+#include "npc_profile.h"
+#include "rvu_npc_hash.h"
+
+#define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */
+#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
+
+#define NPC_PARSE_RESULT_DMAC_OFFSET 8
+#define NPC_HW_TSTAMP_OFFSET 8ULL
+#define NPC_KEX_CHAN_MASK 0xFFFULL
+#define NPC_KEX_PF_FUNC_MASK 0xFFFFULL
+
+#define ALIGN_8B_CEIL(__a) (((__a) + 7) & (-8))
+
+static const char def_pfl_name[] = "default";
+
+static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pcifunc);
+static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
+ u16 pcifunc);
+
+bool is_npc_intf_tx(u8 intf)
+{
+ return !!(intf & 0x1);
+}
+
+bool is_npc_intf_rx(u8 intf)
+{
+ return !(intf & 0x1);
+}
+
+bool is_npc_interface_valid(struct rvu *rvu, u8 intf)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ return intf < hw->npc_intfs;
+}
+
+int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena)
+{
+ /* Due to a HW issue in these silicon versions, parse nibble enable
+ * configuration has to be identical for both Rx and Tx interfaces.
+ */
+ if (is_rvu_96xx_B0(rvu))
+ return nibble_ena;
+ return 0;
+}
+
+static int npc_mcam_verify_pf_func(struct rvu *rvu,
+ struct mcam_entry *entry_data, u8 intf,
+ u16 pcifunc)
+{
+ u16 pf_func, pf_func_mask;
+
+ if (is_npc_intf_rx(intf))
+ return 0;
+
+ pf_func_mask = (entry_data->kw_mask[0] >> 32) &
+ NPC_KEX_PF_FUNC_MASK;
+ pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK;
+
+ pf_func = be16_to_cpu((__force __be16)pf_func);
+ if (pf_func_mask != NPC_KEX_PF_FUNC_MASK ||
+ ((pf_func & ~RVU_PFVF_FUNC_MASK) !=
+ (pcifunc & ~RVU_PFVF_FUNC_MASK)))
+ return -EINVAL;
+
+ return 0;
+}
+
+void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
+{
+ int blkaddr;
+ u64 val = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Config CPI base for the PKIND */
+ val = pkind | 1ULL << 62;
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val);
+}
+
+int rvu_npc_get_pkind(struct rvu *rvu, u16 pf)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ u32 map;
+ int i;
+
+ for (i = 0; i < pkind->rsrc.max; i++) {
+ map = pkind->pfchan_map[i];
+ if (((map >> 16) & 0x3F) == pf)
+ return i;
+ }
+ return -1;
+}
+
+#define NPC_AF_ACTION0_PTR_ADVANCE GENMASK_ULL(27, 20)
+
+int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable)
+{
+ int pkind, blkaddr;
+ u64 val;
+
+ pkind = rvu_npc_get_pkind(rvu, pf);
+ if (pkind < 0) {
+ dev_err(rvu->dev, "%s: pkind not mapped\n", __func__);
+ return -EINVAL;
+ }
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
+ val &= ~NPC_AF_ACTION0_PTR_ADVANCE;
+ /* If timestamp is enabled then configure NPC to shift 8 bytes */
+ if (enable)
+ val |= FIELD_PREP(NPC_AF_ACTION0_PTR_ADVANCE,
+ NPC_HW_TSTAMP_OFFSET);
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
+
+ return 0;
+}
+
+static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc,
+ int nixlf)
+{
+ struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam);
+ struct rvu *rvu = hw->rvu;
+ int blkaddr = 0, max = 0;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* Given a PF/VF and NIX LF number calculate the unicast mcam
+ * entry index based on the NIX block assigned to the PF/VF.
+ */
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ if (pfvf->nix_blkaddr == blkaddr)
+ break;
+ block = &rvu->hw->block[blkaddr];
+ max += block->lf.max;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+
+ return mcam->nixlf_offset + (max + nixlf) * RSVD_MCAM_ENTRIES_PER_NIXLF;
+}
+
+int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
+ u16 pcifunc, int nixlf, int type)
+{
+ int pf = rvu_get_pf(pcifunc);
+ int index;
+
+ /* Check if this is for a PF */
+ if (pf && !(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ /* Reserved entries exclude PF0 */
+ pf--;
+ index = mcam->pf_offset + (pf * RSVD_MCAM_ENTRIES_PER_PF);
+ /* Broadcast address matching entry should be first so
+ * that the packet can be replicated to all VFs.
+ */
+ if (type == NIXLF_BCAST_ENTRY)
+ return index;
+ else if (type == NIXLF_ALLMULTI_ENTRY)
+ return index + 1;
+ else if (type == NIXLF_PROMISC_ENTRY)
+ return index + 2;
+ }
+
+ return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf);
+}
+
+int npc_get_bank(struct npc_mcam *mcam, int index)
+{
+ int bank = index / mcam->banksize;
+
+ /* 0,1 & 2,3 banks are combined for this keysize */
+ if (mcam->keysize == NPC_MCAM_KEY_X2)
+ return bank ? 2 : 0;
+
+ return bank;
+}
+
+bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
+{
+ int bank = npc_get_bank(mcam, index);
+ u64 cfg;
+
+ index &= (mcam->banksize - 1);
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(index, bank));
+ return (cfg & 1);
+}
+
+void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable)
+{
+ int bank = npc_get_bank(mcam, index);
+ int actbank = bank;
+
+ index &= (mcam->banksize - 1);
+ for (; bank < (actbank + mcam->banks_per_entry); bank++) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(index, bank),
+ enable ? 1 : 0);
+ }
+}
+
+static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
+{
+ int bank = npc_get_bank(mcam, index);
+ int actbank = bank;
+
+ index &= (mcam->banksize - 1);
+ for (; bank < (actbank + mcam->banks_per_entry); bank++) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0);
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0);
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0);
+ }
+}
+
+static void npc_get_keyword(struct mcam_entry *entry, int idx,
+ u64 *cam0, u64 *cam1)
+{
+ u64 kw_mask = 0x00;
+
+#define CAM_MASK(n) (BIT_ULL(n) - 1)
+
+ /* 0, 2, 4, 6 indices refer to BANKX_CAMX_W0 and
+ * 1, 3, 5, 7 indices refer to BANKX_CAMX_W1.
+ *
+ * Also, only 48 bits of BANKX_CAMX_W1 are valid.
+ */
+ switch (idx) {
+ case 0:
+ /* BANK(X)_CAM_W0<63:0> = MCAM_KEY[KW0]<63:0> */
+ *cam1 = entry->kw[0];
+ kw_mask = entry->kw_mask[0];
+ break;
+ case 1:
+ /* BANK(X)_CAM_W1<47:0> = MCAM_KEY[KW1]<47:0> */
+ *cam1 = entry->kw[1] & CAM_MASK(48);
+ kw_mask = entry->kw_mask[1] & CAM_MASK(48);
+ break;
+ case 2:
+ /* BANK(X + 1)_CAM_W0<15:0> = MCAM_KEY[KW1]<63:48>
+ * BANK(X + 1)_CAM_W0<63:16> = MCAM_KEY[KW2]<47:0>
+ */
+ *cam1 = (entry->kw[1] >> 48) & CAM_MASK(16);
+ *cam1 |= ((entry->kw[2] & CAM_MASK(48)) << 16);
+ kw_mask = (entry->kw_mask[1] >> 48) & CAM_MASK(16);
+ kw_mask |= ((entry->kw_mask[2] & CAM_MASK(48)) << 16);
+ break;
+ case 3:
+ /* BANK(X + 1)_CAM_W1<15:0> = MCAM_KEY[KW2]<63:48>
+ * BANK(X + 1)_CAM_W1<47:16> = MCAM_KEY[KW3]<31:0>
+ */
+ *cam1 = (entry->kw[2] >> 48) & CAM_MASK(16);
+ *cam1 |= ((entry->kw[3] & CAM_MASK(32)) << 16);
+ kw_mask = (entry->kw_mask[2] >> 48) & CAM_MASK(16);
+ kw_mask |= ((entry->kw_mask[3] & CAM_MASK(32)) << 16);
+ break;
+ case 4:
+ /* BANK(X + 2)_CAM_W0<31:0> = MCAM_KEY[KW3]<63:32>
+ * BANK(X + 2)_CAM_W0<63:32> = MCAM_KEY[KW4]<31:0>
+ */
+ *cam1 = (entry->kw[3] >> 32) & CAM_MASK(32);
+ *cam1 |= ((entry->kw[4] & CAM_MASK(32)) << 32);
+ kw_mask = (entry->kw_mask[3] >> 32) & CAM_MASK(32);
+ kw_mask |= ((entry->kw_mask[4] & CAM_MASK(32)) << 32);
+ break;
+ case 5:
+ /* BANK(X + 2)_CAM_W1<31:0> = MCAM_KEY[KW4]<63:32>
+ * BANK(X + 2)_CAM_W1<47:32> = MCAM_KEY[KW5]<15:0>
+ */
+ *cam1 = (entry->kw[4] >> 32) & CAM_MASK(32);
+ *cam1 |= ((entry->kw[5] & CAM_MASK(16)) << 32);
+ kw_mask = (entry->kw_mask[4] >> 32) & CAM_MASK(32);
+ kw_mask |= ((entry->kw_mask[5] & CAM_MASK(16)) << 32);
+ break;
+ case 6:
+ /* BANK(X + 3)_CAM_W0<47:0> = MCAM_KEY[KW5]<63:16>
+ * BANK(X + 3)_CAM_W0<63:48> = MCAM_KEY[KW6]<15:0>
+ */
+ *cam1 = (entry->kw[5] >> 16) & CAM_MASK(48);
+ *cam1 |= ((entry->kw[6] & CAM_MASK(16)) << 48);
+ kw_mask = (entry->kw_mask[5] >> 16) & CAM_MASK(48);
+ kw_mask |= ((entry->kw_mask[6] & CAM_MASK(16)) << 48);
+ break;
+ case 7:
+ /* BANK(X + 3)_CAM_W1<47:0> = MCAM_KEY[KW6]<63:16> */
+ *cam1 = (entry->kw[6] >> 16) & CAM_MASK(48);
+ kw_mask = (entry->kw_mask[6] >> 16) & CAM_MASK(48);
+ break;
+ }
+
+ *cam1 &= kw_mask;
+ *cam0 = ~*cam1 & kw_mask;
+}
+
+static void npc_fill_entryword(struct mcam_entry *entry, int idx,
+ u64 cam0, u64 cam1)
+{
+ /* Similar to npc_get_keyword, but fills mcam_entry structure from
+ * CAM registers.
+ */
+ switch (idx) {
+ case 0:
+ entry->kw[0] = cam1;
+ entry->kw_mask[0] = cam1 ^ cam0;
+ break;
+ case 1:
+ entry->kw[1] = cam1;
+ entry->kw_mask[1] = cam1 ^ cam0;
+ break;
+ case 2:
+ entry->kw[1] |= (cam1 & CAM_MASK(16)) << 48;
+ entry->kw[2] = (cam1 >> 16) & CAM_MASK(48);
+ entry->kw_mask[1] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48;
+ entry->kw_mask[2] = ((cam1 ^ cam0) >> 16) & CAM_MASK(48);
+ break;
+ case 3:
+ entry->kw[2] |= (cam1 & CAM_MASK(16)) << 48;
+ entry->kw[3] = (cam1 >> 16) & CAM_MASK(32);
+ entry->kw_mask[2] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48;
+ entry->kw_mask[3] = ((cam1 ^ cam0) >> 16) & CAM_MASK(32);
+ break;
+ case 4:
+ entry->kw[3] |= (cam1 & CAM_MASK(32)) << 32;
+ entry->kw[4] = (cam1 >> 32) & CAM_MASK(32);
+ entry->kw_mask[3] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32;
+ entry->kw_mask[4] = ((cam1 ^ cam0) >> 32) & CAM_MASK(32);
+ break;
+ case 5:
+ entry->kw[4] |= (cam1 & CAM_MASK(32)) << 32;
+ entry->kw[5] = (cam1 >> 32) & CAM_MASK(16);
+ entry->kw_mask[4] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32;
+ entry->kw_mask[5] = ((cam1 ^ cam0) >> 32) & CAM_MASK(16);
+ break;
+ case 6:
+ entry->kw[5] |= (cam1 & CAM_MASK(48)) << 16;
+ entry->kw[6] = (cam1 >> 48) & CAM_MASK(16);
+ entry->kw_mask[5] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16;
+ entry->kw_mask[6] = ((cam1 ^ cam0) >> 48) & CAM_MASK(16);
+ break;
+ case 7:
+ entry->kw[6] |= (cam1 & CAM_MASK(48)) << 16;
+ entry->kw_mask[6] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16;
+ break;
+ }
+}
+
+static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pf_func)
+{
+ int bank, nixlf, index;
+
+ /* get ucast entry rule entry index */
+ if (nix_get_nixlf(rvu, pf_func, &nixlf, NULL)) {
+ dev_err(rvu->dev, "%s: nixlf not attached to pcifunc:0x%x\n",
+ __func__, pf_func);
+ /* Action 0 is drop */
+ return 0;
+ }
+
+ index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf,
+ NIXLF_UCAST_ENTRY);
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+
+ return rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+}
+
+static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, struct mcam_entry *entry,
+ bool *enable)
+{
+ struct rvu_npc_mcam_rule *rule;
+ u16 owner, target_func;
+ struct rvu_pfvf *pfvf;
+ u64 rx_action;
+
+ owner = mcam->entry2pfvf_map[index];
+ target_func = (entry->action >> 4) & 0xffff;
+ /* do nothing when target is LBK/PF or owner is not PF */
+ if (is_pffunc_af(owner) || is_afvf(target_func) ||
+ (owner & RVU_PFVF_FUNC_MASK) ||
+ !(target_func & RVU_PFVF_FUNC_MASK))
+ return;
+
+ /* save entry2target_pffunc */
+ pfvf = rvu_get_pfvf(rvu, target_func);
+ mcam->entry2target_pffunc[index] = target_func;
+
+ /* don't enable rule when nixlf not attached or initialized */
+ if (!(is_nixlf_attached(rvu, target_func) &&
+ test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
+ *enable = false;
+
+ /* fix up not needed for the rules added by user(ntuple filters) */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ return;
+ }
+
+ /* copy VF default entry action to the VF mcam entry */
+ rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
+ target_func);
+ if (rx_action)
+ entry->action = rx_action;
+}
+
+static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, u8 intf,
+ struct mcam_entry *entry, bool enable)
+{
+ int bank = npc_get_bank(mcam, index);
+ int kw = 0, actbank, actindex;
+ u8 tx_intf_mask = ~intf & 0x3;
+ u8 tx_intf = intf;
+ u64 cam0, cam1;
+
+ actbank = bank; /* Save bank id, to set action later on */
+ actindex = index;
+ index &= (mcam->banksize - 1);
+
+ /* Disable before mcam entry update */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
+
+ /* Clear mcam entry to avoid writes being suppressed by NPC */
+ npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex);
+
+ /* CAM1 takes the comparison value and
+ * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'.
+ * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0
+ * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1
+ * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare.
+ */
+ for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
+ /* Interface should be set in all banks */
+ if (is_npc_intf_tx(intf)) {
+ /* Last bit must be set and rest don't care
+ * for TX interfaces
+ */
+ tx_intf_mask = 0x1;
+ tx_intf = intf & tx_intf_mask;
+ tx_intf_mask = ~tx_intf & tx_intf_mask;
+ }
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1),
+ tx_intf);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0),
+ tx_intf_mask);
+
+ /* Set the match key */
+ npc_get_keyword(entry, kw, &cam0, &cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), cam0);
+
+ npc_get_keyword(entry, kw + 1, &cam0, &cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0);
+ }
+
+ /* PF installing VF rule */
+ if (is_npc_intf_rx(intf) && actindex < mcam->bmap_entries)
+ npc_fixup_vf_rule(rvu, mcam, blkaddr, actindex, entry, &enable);
+
+ /* Set 'action' */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action);
+
+ /* Set TAG 'action' */
+ rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(index, actbank),
+ entry->vtag_action);
+
+ /* Enable the entry */
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
+}
+
+void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src,
+ struct mcam_entry *entry, u8 *intf, u8 *ena)
+{
+ int sbank = npc_get_bank(mcam, src);
+ int bank, kw = 0;
+ u64 cam0, cam1;
+
+ src &= (mcam->banksize - 1);
+ bank = sbank;
+
+ for (; bank < (sbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 1));
+ cam0 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 0));
+ npc_fill_entryword(entry, kw, cam0, cam1);
+
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 1));
+ cam0 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 0));
+ npc_fill_entryword(entry, kw + 1, cam0, cam1);
+ }
+
+ entry->action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(src, sbank));
+ entry->vtag_action =
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank));
+ *intf = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank, 1)) & 3;
+ *ena = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(src, sbank)) & 1;
+}
+
+static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src, u16 dest)
+{
+ int dbank = npc_get_bank(mcam, dest);
+ int sbank = npc_get_bank(mcam, src);
+ u64 cfg, sreg, dreg;
+ int bank, i;
+
+ src &= (mcam->banksize - 1);
+ dest &= (mcam->banksize - 1);
+
+ /* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */
+ for (bank = 0; bank < mcam->banks_per_entry; bank++) {
+ sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0);
+ dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0);
+ for (i = 0; i < 6; i++) {
+ cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8));
+ rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg);
+ }
+ }
+
+ /* Copy action */
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(src, sbank));
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg);
+
+ /* Copy TAG action */
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank));
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg);
+
+ /* Enable or disable */
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(src, sbank));
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg);
+}
+
+static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
+{
+ int bank = npc_get_bank(mcam, index);
+
+ index &= (mcam->banksize - 1);
+ return rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+}
+
+void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, u8 *mac_addr)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_rx_action action = { 0 };
+ int blkaddr, index;
+
+ /* AF's and SDP VFs work in promiscuous mode */
+ if (is_afvf(pcifunc) || is_sdp_vf(pcifunc))
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Ucast rule should not be installed if DMAC
+ * extraction is not supported by the profile.
+ */
+ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf))
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+
+ /* Don't change the action if entry is already enabled
+ * Otherwise RSS action may get overwritten.
+ */
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, index);
+ } else {
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+ }
+
+ req.default_rule = 1;
+ ether_addr_copy(req.packet.dmac, mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = chan;
+ req.chan_mask = 0xFFFU;
+ req.intf = pfvf->nix_rx_intf;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = action.pf_func;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = action.flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, u8 chan_cnt)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr, ucast_idx, index;
+ struct nix_rx_action action = { 0 };
+ u64 relaxed_mask;
+ u8 flow_key_alg;
+
+ if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_PROMISC_ENTRY);
+
+ if (is_cgx_vf(rvu, pcifunc))
+ index = npc_get_nixlf_mcam_index(mcam,
+ pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, NIXLF_PROMISC_ENTRY);
+
+ /* If the corresponding PF's ucast action is RSS,
+ * use the same action for promisc also
+ */
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, ucast_idx);
+
+ if (action.op != NIX_RX_ACTIONOP_RSS) {
+ *(u64 *)&action = 0;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ }
+
+ flow_key_alg = action.flow_key_alg;
+
+ /* RX_ACTION set to MCAST for CGX PF's */
+ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
+ is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ *(u64 *)&action = 0;
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ action.index = pfvf->promisc_mce_idx;
+ }
+
+ /* For cn10k the upper two bits of the channel number are
+ * cpt channel number. with masking out these bits in the
+ * mcam entry, same entry used for NIX will allow packets
+ * received from cpt for parsing.
+ */
+ if (!is_rvu_otx2(rvu)) {
+ req.chan_mask = NIX_CHAN_CPT_X2P_MASK;
+ } else {
+ req.chan_mask = 0xFFFU;
+ }
+
+ if (chan_cnt > 1) {
+ if (!is_power_of_2(chan_cnt)) {
+ dev_err(rvu->dev,
+ "%s: channel count more than 1, must be power of 2\n", __func__);
+ return;
+ }
+ relaxed_mask = GENMASK_ULL(BITS_PER_LONG_LONG - 1,
+ ilog2(chan_cnt));
+ req.chan_mask &= relaxed_mask;
+ }
+
+ req.channel = chan;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_PROMISC_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan)
+{
+ struct rvu_pfvf *pfvf;
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Skip LBK VFs */
+ if (is_afvf(pcifunc))
+ return;
+
+ /* If pkt replication is not supported,
+ * then only PF is allowed to add a bcast match entry.
+ */
+ if (!hw->cap.nix_rx_multicast && is_vf(pcifunc))
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* Bcast rule should not be installed if both DMAC
+ * and LXMB extraction is not supported by the profile.
+ */
+ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) &&
+ !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf))
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_BCAST_ENTRY);
+
+ if (!hw->cap.nix_rx_multicast) {
+ /* Early silicon doesn't support pkt replication,
+ * so install entry with UCAST action, so that PF
+ * receives all broadcast packets.
+ */
+ req.op = NIX_RX_ACTIONOP_UCAST;
+ } else {
+ req.op = NIX_RX_ACTIONOP_MCAST;
+ req.index = pfvf->bcast_mce_idx;
+ }
+
+ eth_broadcast_addr((u8 *)&req.packet.dmac);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = chan;
+ req.chan_mask = 0xFFFU;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_BCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ u64 chan)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr, ucast_idx, index;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ struct nix_rx_action action = { 0 };
+ struct rvu_pfvf *pfvf;
+ u8 flow_key_alg;
+ u16 vf_func;
+
+ /* Only CGX PF/VF can add allmulticast entry */
+ if (is_afvf(pcifunc) && is_sdp_vf(pcifunc))
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ vf_func = pcifunc & RVU_PFVF_FUNC_MASK;
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* Mcast rule should not be installed if both DMAC
+ * and LXMB extraction is not supported by the profile.
+ */
+ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) &&
+ !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf))
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_ALLMULTI_ENTRY);
+
+ /* If the corresponding PF's ucast action is RSS,
+ * use the same action for multicast entry also
+ */
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, ucast_idx);
+
+ flow_key_alg = action.flow_key_alg;
+ if (action.op != NIX_RX_ACTIONOP_RSS) {
+ *(u64 *)&action = 0;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+ }
+
+ /* RX_ACTION set to MCAST for CGX PF's */
+ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) {
+ *(u64 *)&action = 0;
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ action.index = pfvf->mcast_mce_idx;
+ }
+
+ mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */
+ ether_addr_copy(req.packet.dmac, mac_addr);
+ ether_addr_copy(req.mask.dmac, mac_addr);
+ req.features = BIT_ULL(NPC_DMAC);
+
+ /* For cn10k the upper two bits of the channel number are
+ * cpt channel number. with masking out these bits in the
+ * mcam entry, same entry used for NIX will allow packets
+ * received from cpt for parsing.
+ */
+ if (!is_rvu_otx2(rvu))
+ req.chan_mask = NIX_CHAN_CPT_X2P_MASK;
+ else
+ req.chan_mask = 0xFFFU;
+
+ req.channel = chan;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc | vf_func;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_ALLMULTI_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pcifunc, u64 rx_action)
+{
+ int actindex, index, bank, entry;
+ struct rvu_npc_mcam_rule *rule;
+ bool enable, update;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return;
+
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == pcifunc) {
+ update = true;
+ /* update not needed for the rules added via ntuple filters */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ update = false;
+ }
+ if (!update)
+ continue;
+ bank = npc_get_bank(mcam, index);
+ actindex = index;
+ entry = index & (mcam->banksize - 1);
+
+ /* read vf flow entry enable status */
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr,
+ actindex);
+ /* disable before mcam entry update */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex,
+ false);
+ /* update 'action' */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(entry, bank),
+ rx_action);
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ actindex, true);
+ }
+ }
+ mutex_unlock(&mcam->lock);
+}
+
+static void npc_update_rx_action_with_alg_idx(struct rvu *rvu, struct nix_rx_action action,
+ struct rvu_pfvf *pfvf, int mcam_index, int blkaddr,
+ int alg_idx)
+
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int bank, op_rss;
+
+ if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_index))
+ return;
+
+ op_rss = (!hw->cap.nix_rx_multicast || !pfvf->use_mce_list);
+
+ bank = npc_get_bank(mcam, mcam_index);
+ mcam_index &= (mcam->banksize - 1);
+
+ /* If Rx action is MCAST update only RSS algorithm index */
+ if (!op_rss) {
+ *(u64 *)&action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank));
+
+ action.flow_key_alg = alg_idx;
+ }
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank), *(u64 *)&action);
+}
+
+void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
+ int group, int alg_idx, int mcam_index)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_rx_action action;
+ int blkaddr, index, bank;
+ struct rvu_pfvf *pfvf;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Check if this is for reserved default entry */
+ if (mcam_index < 0) {
+ if (group != DEFAULT_RSS_CONTEXT_GROUP)
+ return;
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ } else {
+ /* TODO: validate this mcam index */
+ index = mcam_index;
+ }
+
+ if (index >= mcam->total_entries)
+ return;
+
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+
+ *(u64 *)&action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+ /* Ignore if no action was set earlier */
+ if (!*(u64 *)&action)
+ return;
+
+ action.op = NIX_RX_ACTIONOP_RSS;
+ action.pf_func = pcifunc;
+ action.index = group;
+ action.flow_key_alg = alg_idx;
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
+
+ /* update the VF flow rule action with the VF default entry action */
+ if (mcam_index < 0)
+ npc_update_vf_flow_entry(rvu, mcam, blkaddr, pcifunc,
+ *(u64 *)&action);
+
+ /* update the action change in default rule */
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (pfvf->def_ucast_rule)
+ pfvf->def_ucast_rule->rx_action = action;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_PROMISC_ENTRY);
+
+ /* If PF's promiscuous entry is enabled,
+ * Set RSS action for that entry as well
+ */
+ npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
+ alg_idx);
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_ALLMULTI_ENTRY);
+ /* If PF's allmulti entry is enabled,
+ * Set RSS action for that entry as well
+ */
+ npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
+ alg_idx);
+}
+
+void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, int type, bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_mce_list *mce_list;
+ int index, blkaddr, mce_idx;
+ struct rvu_pfvf *pfvf;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, type);
+
+ /* disable MCAM entry when packet replication is not supported by hw */
+ if (!hw->cap.nix_rx_multicast && !is_vf(pcifunc)) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ return;
+ }
+
+ /* return incase mce list is not enabled */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ if (hw->cap.nix_rx_multicast && is_vf(pcifunc) &&
+ type != NIXLF_BCAST_ENTRY && !pfvf->use_mce_list)
+ return;
+
+ nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
+
+ nix_update_mce_list(rvu, pcifunc, mce_list,
+ mce_idx, index, enable);
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
+ int nixlf, bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int index, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Ucast MCAM match entry of this PF/VF */
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+
+ /* Nothing to do for VFs, on platforms where pkt replication
+ * is not supported
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) && !rvu->hw->cap.nix_rx_multicast)
+ return;
+
+ /* add/delete pf_func to broadcast MCE list */
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_BCAST_ENTRY, enable);
+}
+
+void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ if (nixlf < 0)
+ return;
+
+ npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
+
+ /* Delete multicast and promisc MCAM entries */
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_ALLMULTI_ENTRY, false);
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_PROMISC_ENTRY, false);
+}
+
+bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
+
+ mutex_lock(&mcam->lock);
+
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (rule->intf != intf)
+ continue;
+
+ if (rule->entry != entry)
+ continue;
+
+ rule->enable = enable;
+ mutex_unlock(&mcam->lock);
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ entry, enable);
+
+ return true;
+ }
+
+ mutex_unlock(&mcam->lock);
+ return false;
+}
+
+void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ if (nixlf < 0)
+ return;
+
+ /* Enables only broadcast match entry. Promisc/Allmulti are enabled
+ * in set_rx_mode mbox handler.
+ */
+ npc_enadis_default_entries(rvu, pcifunc, nixlf, true);
+}
+
+void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+
+ /* Disable MCAM entries directing traffic to this 'pcifunc' */
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (is_npc_intf_rx(rule->intf) &&
+ rule->rx_action.pf_func == pcifunc &&
+ rule->rx_action.op != NIX_RX_ACTIONOP_MCAST) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, false);
+ rule->enable = false;
+ /* Indicate that default rule is disabled */
+ if (rule->default_rule) {
+ pfvf->def_ucast_rule = NULL;
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+
+ npc_mcam_disable_flows(rvu, pcifunc);
+
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+}
+
+void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+
+ /* Free all MCAM entries owned by this 'pcifunc' */
+ npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
+
+ /* Free all MCAM counters owned by this 'pcifunc' */
+ npc_mcam_free_all_counters(rvu, mcam, pcifunc);
+
+ /* Delete MCAM entries owned by this 'pcifunc' */
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (rule->owner == pcifunc && !rule->default_rule) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+}
+
+static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr,
+ struct npc_mcam_kex *mkex, u8 intf)
+{
+ int lid, lt, ld, fl;
+
+ if (is_npc_intf_tx(intf))
+ return;
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ mkex->keyx_cfg[NIX_INTF_RX]);
+
+ /* Program LDATA */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ SET_KEX_LD(intf, lid, lt, ld,
+ mkex->intf_lid_lt_ld[NIX_INTF_RX]
+ [lid][lt][ld]);
+ }
+ }
+ /* Program LFLAGS */
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++)
+ SET_KEX_LDFLAGS(intf, ld, fl,
+ mkex->intf_ld_flags[NIX_INTF_RX]
+ [ld][fl]);
+ }
+}
+
+static void npc_program_mkex_tx(struct rvu *rvu, int blkaddr,
+ struct npc_mcam_kex *mkex, u8 intf)
+{
+ int lid, lt, ld, fl;
+
+ if (is_npc_intf_rx(intf))
+ return;
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ mkex->keyx_cfg[NIX_INTF_TX]);
+
+ /* Program LDATA */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ SET_KEX_LD(intf, lid, lt, ld,
+ mkex->intf_lid_lt_ld[NIX_INTF_TX]
+ [lid][lt][ld]);
+ }
+ }
+ /* Program LFLAGS */
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++)
+ SET_KEX_LDFLAGS(intf, ld, fl,
+ mkex->intf_ld_flags[NIX_INTF_TX]
+ [ld][fl]);
+ }
+}
+
+static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
+ struct npc_mcam_kex *mkex)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 intf;
+ int ld;
+
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld),
+ mkex->kex_ld_flags[ld]);
+
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ npc_program_mkex_rx(rvu, blkaddr, mkex, intf);
+ npc_program_mkex_tx(rvu, blkaddr, mkex, intf);
+ }
+
+ /* Programme mkex hash profile */
+ npc_program_mkex_hash(rvu, blkaddr);
+}
+
+static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr,
+ u64 *size)
+{
+ u64 prfl_addr, prfl_sz;
+
+ if (!rvu->fwdata)
+ return -EINVAL;
+
+ prfl_addr = rvu->fwdata->mcam_addr;
+ prfl_sz = rvu->fwdata->mcam_sz;
+
+ if (!prfl_addr || !prfl_sz)
+ return -EINVAL;
+
+ *prfl_img_addr = ioremap_wc(prfl_addr, prfl_sz);
+ if (!(*prfl_img_addr))
+ return -ENOMEM;
+
+ *size = prfl_sz;
+
+ return 0;
+}
+
+/* strtoull of "mkexprof" with base:36 */
+#define MKEX_END_SIGN 0xdeadbeef
+
+static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr,
+ const char *mkex_profile)
+{
+ struct device *dev = &rvu->pdev->dev;
+ struct npc_mcam_kex *mcam_kex;
+ void __iomem *mkex_prfl_addr = NULL;
+ u64 prfl_sz;
+ int ret;
+
+ /* If user not selected mkex profile */
+ if (rvu->kpu_fwdata_sz ||
+ !strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN))
+ goto program_mkex;
+
+ /* Setting up the mapping for mkex profile image */
+ ret = npc_fwdb_prfl_img_map(rvu, &mkex_prfl_addr, &prfl_sz);
+ if (ret < 0)
+ goto program_mkex;
+
+ mcam_kex = (struct npc_mcam_kex __force *)mkex_prfl_addr;
+
+ while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) {
+ /* Compare with mkex mod_param name string */
+ if (mcam_kex->mkex_sign == MKEX_SIGN &&
+ !strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) {
+ /* Due to an errata (35786) in A0/B0 pass silicon,
+ * parse nibble enable configuration has to be
+ * identical for both Rx and Tx interfaces.
+ */
+ if (!is_rvu_96xx_B0(rvu) ||
+ mcam_kex->keyx_cfg[NIX_INTF_RX] == mcam_kex->keyx_cfg[NIX_INTF_TX])
+ rvu->kpu.mkex = mcam_kex;
+ goto program_mkex;
+ }
+
+ mcam_kex++;
+ prfl_sz -= sizeof(struct npc_mcam_kex);
+ }
+ dev_warn(dev, "Failed to load requested profile: %s\n", mkex_profile);
+
+program_mkex:
+ dev_info(rvu->dev, "Using %s mkex profile\n", rvu->kpu.mkex->name);
+ /* Program selected mkex profile */
+ npc_program_mkex_profile(rvu, blkaddr, rvu->kpu.mkex);
+ if (mkex_prfl_addr)
+ iounmap(mkex_prfl_addr);
+}
+
+static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
+ const struct npc_kpu_profile_action *kpuaction,
+ int kpu, int entry, bool pkind)
+{
+ struct npc_kpu_action0 action0 = {0};
+ struct npc_kpu_action1 action1 = {0};
+ u64 reg;
+
+ action1.errlev = kpuaction->errlev;
+ action1.errcode = kpuaction->errcode;
+ action1.dp0_offset = kpuaction->dp0_offset;
+ action1.dp1_offset = kpuaction->dp1_offset;
+ action1.dp2_offset = kpuaction->dp2_offset;
+
+ if (pkind)
+ reg = NPC_AF_PKINDX_ACTION1(entry);
+ else
+ reg = NPC_AF_KPUX_ENTRYX_ACTION1(kpu, entry);
+
+ rvu_write64(rvu, blkaddr, reg, *(u64 *)&action1);
+
+ action0.byp_count = kpuaction->bypass_count;
+ action0.capture_ena = kpuaction->cap_ena;
+ action0.parse_done = kpuaction->parse_done;
+ action0.next_state = kpuaction->next_state;
+ action0.capture_lid = kpuaction->lid;
+ action0.capture_ltype = kpuaction->ltype;
+ action0.capture_flags = kpuaction->flags;
+ action0.ptr_advance = kpuaction->ptr_advance;
+ action0.var_len_offset = kpuaction->offset;
+ action0.var_len_mask = kpuaction->mask;
+ action0.var_len_right = kpuaction->right;
+ action0.var_len_shift = kpuaction->shift;
+
+ if (pkind)
+ reg = NPC_AF_PKINDX_ACTION0(entry);
+ else
+ reg = NPC_AF_KPUX_ENTRYX_ACTION0(kpu, entry);
+
+ rvu_write64(rvu, blkaddr, reg, *(u64 *)&action0);
+}
+
+static void npc_config_kpucam(struct rvu *rvu, int blkaddr,
+ const struct npc_kpu_profile_cam *kpucam,
+ int kpu, int entry)
+{
+ struct npc_kpu_cam cam0 = {0};
+ struct npc_kpu_cam cam1 = {0};
+
+ cam1.state = kpucam->state & kpucam->state_mask;
+ cam1.dp0_data = kpucam->dp0 & kpucam->dp0_mask;
+ cam1.dp1_data = kpucam->dp1 & kpucam->dp1_mask;
+ cam1.dp2_data = kpucam->dp2 & kpucam->dp2_mask;
+
+ cam0.state = ~kpucam->state & kpucam->state_mask;
+ cam0.dp0_data = ~kpucam->dp0 & kpucam->dp0_mask;
+ cam0.dp1_data = ~kpucam->dp1 & kpucam->dp1_mask;
+ cam0.dp2_data = ~kpucam->dp2 & kpucam->dp2_mask;
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 0), *(u64 *)&cam0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 1), *(u64 *)&cam1);
+}
+
+static inline u64 enable_mask(int count)
+{
+ return (((count) < 64) ? ~(BIT_ULL(count) - 1) : (0x00ULL));
+}
+
+static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
+ const struct npc_kpu_profile *profile)
+{
+ int entry, num_entries, max_entries;
+ u64 entry_mask;
+
+ if (profile->cam_entries != profile->action_entries) {
+ dev_err(rvu->dev,
+ "KPU%d: CAM and action entries [%d != %d] not equal\n",
+ kpu, profile->cam_entries, profile->action_entries);
+ }
+
+ max_entries = rvu->hw->npc_kpu_entries;
+
+ /* Program CAM match entries for previous KPU extracted data */
+ num_entries = min_t(int, profile->cam_entries, max_entries);
+ for (entry = 0; entry < num_entries; entry++)
+ npc_config_kpucam(rvu, blkaddr,
+ &profile->cam[entry], kpu, entry);
+
+ /* Program this KPU's actions */
+ num_entries = min_t(int, profile->action_entries, max_entries);
+ for (entry = 0; entry < num_entries; entry++)
+ npc_config_kpuaction(rvu, blkaddr, &profile->action[entry],
+ kpu, entry, false);
+
+ /* Enable all programmed entries */
+ num_entries = min_t(int, profile->action_entries, profile->cam_entries);
+ entry_mask = enable_mask(num_entries);
+ /* Disable first KPU_MAX_CST_ENT entries for built-in profile */
+ if (!rvu->kpu.custom)
+ entry_mask |= GENMASK_ULL(KPU_MAX_CST_ENT - 1, 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(kpu, 0), entry_mask);
+ if (num_entries > 64) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(kpu, 1),
+ enable_mask(num_entries - 64));
+ }
+
+ /* Enable this KPU */
+ rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01);
+}
+
+static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
+{
+ profile->custom = 0;
+ profile->name = def_pfl_name;
+ profile->version = NPC_KPU_PROFILE_VER;
+ profile->ikpu = ikpu_action_entries;
+ profile->pkinds = ARRAY_SIZE(ikpu_action_entries);
+ profile->kpu = npc_kpu_profiles;
+ profile->kpus = ARRAY_SIZE(npc_kpu_profiles);
+ profile->lt_def = &npc_lt_defaults;
+ profile->mkex = &npc_mkex_default;
+ profile->mkex_hash = &npc_mkex_hash_default;
+
+ return 0;
+}
+
+static int npc_apply_custom_kpu(struct rvu *rvu,
+ struct npc_kpu_profile_adapter *profile)
+{
+ size_t hdr_sz = sizeof(struct npc_kpu_profile_fwdata), offset = 0;
+ struct npc_kpu_profile_fwdata *fw = rvu->kpu_fwdata;
+ struct npc_kpu_profile_action *action;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_fwdata *fw_kpu;
+ int entries;
+ u16 kpu, entry;
+
+ if (rvu->kpu_fwdata_sz < hdr_sz) {
+ dev_warn(rvu->dev, "Invalid KPU profile size\n");
+ return -EINVAL;
+ }
+ if (le64_to_cpu(fw->signature) != KPU_SIGN) {
+ dev_warn(rvu->dev, "Invalid KPU profile signature %llx\n",
+ fw->signature);
+ return -EINVAL;
+ }
+ /* Verify if the using known profile structure */
+ if (NPC_KPU_VER_MAJ(profile->version) >
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)) {
+ dev_warn(rvu->dev, "Not supported Major version: %d > %d\n",
+ NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER));
+ return -EINVAL;
+ }
+ /* Verify if profile is aligned with the required kernel changes */
+ if (NPC_KPU_VER_MIN(profile->version) <
+ NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER)) {
+ dev_warn(rvu->dev,
+ "Invalid KPU profile version: %d.%d.%d expected version <= %d.%d.%d\n",
+ NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MIN(profile->version),
+ NPC_KPU_VER_PATCH(profile->version),
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER),
+ NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER),
+ NPC_KPU_VER_PATCH(NPC_KPU_PROFILE_VER));
+ return -EINVAL;
+ }
+ /* Verify if profile fits the HW */
+ if (fw->kpus > profile->kpus) {
+ dev_warn(rvu->dev, "Not enough KPUs: %d > %ld\n", fw->kpus,
+ profile->kpus);
+ return -EINVAL;
+ }
+
+ profile->custom = 1;
+ profile->name = fw->name;
+ profile->version = le64_to_cpu(fw->version);
+ profile->mkex = &fw->mkex;
+ profile->lt_def = &fw->lt_def;
+
+ for (kpu = 0; kpu < fw->kpus; kpu++) {
+ fw_kpu = (struct npc_kpu_fwdata *)(fw->data + offset);
+ if (fw_kpu->entries > KPU_MAX_CST_ENT)
+ dev_warn(rvu->dev,
+ "Too many custom entries on KPU%d: %d > %d\n",
+ kpu, fw_kpu->entries, KPU_MAX_CST_ENT);
+ entries = min(fw_kpu->entries, KPU_MAX_CST_ENT);
+ cam = (struct npc_kpu_profile_cam *)fw_kpu->data;
+ offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam);
+ action = (struct npc_kpu_profile_action *)(fw->data + offset);
+ offset += fw_kpu->entries * sizeof(*action);
+ if (rvu->kpu_fwdata_sz < hdr_sz + offset) {
+ dev_warn(rvu->dev,
+ "Profile size mismatch on KPU%i parsing.\n",
+ kpu + 1);
+ return -EINVAL;
+ }
+ for (entry = 0; entry < entries; entry++) {
+ profile->kpu[kpu].cam[entry] = cam[entry];
+ profile->kpu[kpu].action[entry] = action[entry];
+ }
+ }
+
+ return 0;
+}
+
+static int npc_load_kpu_prfl_img(struct rvu *rvu, void __iomem *prfl_addr,
+ u64 prfl_sz, const char *kpu_profile)
+{
+ struct npc_kpu_profile_fwdata *kpu_data = NULL;
+ int rc = -EINVAL;
+
+ kpu_data = (struct npc_kpu_profile_fwdata __force *)prfl_addr;
+ if (le64_to_cpu(kpu_data->signature) == KPU_SIGN &&
+ !strncmp(kpu_data->name, kpu_profile, KPU_NAME_LEN)) {
+ dev_info(rvu->dev, "Loading KPU profile from firmware db: %s\n",
+ kpu_profile);
+ rvu->kpu_fwdata = kpu_data;
+ rvu->kpu_fwdata_sz = prfl_sz;
+ rvu->kpu_prfl_addr = prfl_addr;
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz,
+ const char *kpu_profile)
+{
+ struct npc_coalesced_kpu_prfl *img_data = NULL;
+ int i = 0, rc = -EINVAL;
+ void __iomem *kpu_prfl_addr;
+ u16 offset;
+
+ img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr;
+ if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
+ !strncmp(img_data->name, kpu_profile, KPU_NAME_LEN)) {
+ /* Loaded profile is a single KPU profile. */
+ rc = npc_load_kpu_prfl_img(rvu, rvu->kpu_prfl_addr,
+ prfl_sz, kpu_profile);
+ goto done;
+ }
+
+ /* Loaded profile is coalesced image, offset of first KPU profile.*/
+ offset = offsetof(struct npc_coalesced_kpu_prfl, prfl_sz) +
+ (img_data->num_prfl * sizeof(uint16_t));
+ /* Check if mapped image is coalesced image. */
+ while (i < img_data->num_prfl) {
+ /* Profile image offsets are rounded up to next 8 multiple.*/
+ offset = ALIGN_8B_CEIL(offset);
+ kpu_prfl_addr = (void __iomem *)((uintptr_t)rvu->kpu_prfl_addr +
+ offset);
+ rc = npc_load_kpu_prfl_img(rvu, kpu_prfl_addr,
+ img_data->prfl_sz[i], kpu_profile);
+ if (!rc)
+ break;
+ /* Calculating offset of profile image based on profile size.*/
+ offset += img_data->prfl_sz[i];
+ i++;
+ }
+done:
+ return rc;
+}
+
+static int npc_load_kpu_profile_fwdb(struct rvu *rvu, const char *kpu_profile)
+{
+ int ret = -EINVAL;
+ u64 prfl_sz;
+
+ /* Setting up the mapping for NPC profile image */
+ ret = npc_fwdb_prfl_img_map(rvu, &rvu->kpu_prfl_addr, &prfl_sz);
+ if (ret < 0)
+ goto done;
+
+ /* Detect if profile is coalesced or single KPU profile and load */
+ ret = npc_fwdb_detect_load_prfl_img(rvu, prfl_sz, kpu_profile);
+ if (ret == 0)
+ goto done;
+
+ /* Cleaning up if KPU profile image from fwdata is not valid. */
+ if (rvu->kpu_prfl_addr) {
+ iounmap(rvu->kpu_prfl_addr);
+ rvu->kpu_prfl_addr = NULL;
+ rvu->kpu_fwdata_sz = 0;
+ rvu->kpu_fwdata = NULL;
+ }
+
+done:
+ return ret;
+}
+
+static void npc_load_kpu_profile(struct rvu *rvu)
+{
+ struct npc_kpu_profile_adapter *profile = &rvu->kpu;
+ const char *kpu_profile = rvu->kpu_pfl_name;
+ const struct firmware *fw = NULL;
+ bool retry_fwdb = false;
+
+ /* If user not specified profile customization */
+ if (!strncmp(kpu_profile, def_pfl_name, KPU_NAME_LEN))
+ goto revert_to_default;
+ /* First prepare default KPU, then we'll customize top entries. */
+ npc_prepare_default_kpu(profile);
+
+ /* Order of preceedence for load loading NPC profile (high to low)
+ * Firmware binary in filesystem.
+ * Firmware database method.
+ * Default KPU profile.
+ */
+ if (!request_firmware_direct(&fw, kpu_profile, rvu->dev)) {
+ dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n",
+ kpu_profile);
+ rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL);
+ if (rvu->kpu_fwdata) {
+ memcpy(rvu->kpu_fwdata, fw->data, fw->size);
+ rvu->kpu_fwdata_sz = fw->size;
+ }
+ release_firmware(fw);
+ retry_fwdb = true;
+ goto program_kpu;
+ }
+
+load_image_fwdb:
+ /* Loading the KPU profile using firmware database */
+ if (npc_load_kpu_profile_fwdb(rvu, kpu_profile))
+ goto revert_to_default;
+
+program_kpu:
+ /* Apply profile customization if firmware was loaded. */
+ if (!rvu->kpu_fwdata_sz || npc_apply_custom_kpu(rvu, profile)) {
+ /* If image from firmware filesystem fails to load or invalid
+ * retry with firmware database method.
+ */
+ if (rvu->kpu_fwdata || rvu->kpu_fwdata_sz) {
+ /* Loading image from firmware database failed. */
+ if (rvu->kpu_prfl_addr) {
+ iounmap(rvu->kpu_prfl_addr);
+ rvu->kpu_prfl_addr = NULL;
+ } else {
+ kfree(rvu->kpu_fwdata);
+ }
+ rvu->kpu_fwdata = NULL;
+ rvu->kpu_fwdata_sz = 0;
+ if (retry_fwdb) {
+ retry_fwdb = false;
+ goto load_image_fwdb;
+ }
+ }
+
+ dev_warn(rvu->dev,
+ "Can't load KPU profile %s. Using default.\n",
+ kpu_profile);
+ kfree(rvu->kpu_fwdata);
+ rvu->kpu_fwdata = NULL;
+ goto revert_to_default;
+ }
+
+ dev_info(rvu->dev, "Using custom profile '%s', version %d.%d.%d\n",
+ profile->name, NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MIN(profile->version),
+ NPC_KPU_VER_PATCH(profile->version));
+
+ return;
+
+revert_to_default:
+ npc_prepare_default_kpu(profile);
+}
+
+static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int num_pkinds, num_kpus, idx;
+
+ /* Disable all KPUs and their entries */
+ for (idx = 0; idx < hw->npc_kpus; idx++) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(idx, 0), ~0ULL);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(idx, 1), ~0ULL);
+ rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00);
+ }
+
+ /* Load and customize KPU profile. */
+ npc_load_kpu_profile(rvu);
+
+ /* First program IKPU profile i.e PKIND configs.
+ * Check HW max count to avoid configuring junk or
+ * writing to unsupported CSR addresses.
+ */
+ num_pkinds = rvu->kpu.pkinds;
+ num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds);
+
+ for (idx = 0; idx < num_pkinds; idx++)
+ npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true);
+
+ /* Program KPU CAM and Action profiles */
+ num_kpus = rvu->kpu.kpus;
+ num_kpus = min_t(int, hw->npc_kpus, num_kpus);
+
+ for (idx = 0; idx < num_kpus; idx++)
+ npc_program_kpu_profile(rvu, blkaddr, idx, &rvu->kpu.kpu[idx]);
+}
+
+static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
+{
+ int nixlf_count = rvu_get_nixlf_count(rvu);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int rsvd, err;
+ u16 index;
+ int cntr;
+ u64 cfg;
+
+ /* Actual number of MCAM entries vary by entry size */
+ cfg = (rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07;
+ mcam->total_entries = (mcam->banks / BIT_ULL(cfg)) * mcam->banksize;
+ mcam->keysize = cfg;
+
+ /* Number of banks combined per MCAM entry */
+ if (cfg == NPC_MCAM_KEY_X4)
+ mcam->banks_per_entry = 4;
+ else if (cfg == NPC_MCAM_KEY_X2)
+ mcam->banks_per_entry = 2;
+ else
+ mcam->banks_per_entry = 1;
+
+ /* Reserve one MCAM entry for each of the NIX LF to
+ * guarantee space to install default matching DMAC rule.
+ * Also reserve 2 MCAM entries for each PF for default
+ * channel based matching or 'bcast & promisc' matching to
+ * support BCAST and PROMISC modes of operation for PFs.
+ * PF0 is excluded.
+ */
+ rsvd = (nixlf_count * RSVD_MCAM_ENTRIES_PER_NIXLF) +
+ ((rvu->hw->total_pfs - 1) * RSVD_MCAM_ENTRIES_PER_PF);
+ if (mcam->total_entries <= rsvd) {
+ dev_warn(rvu->dev,
+ "Insufficient NPC MCAM size %d for pkt I/O, exiting\n",
+ mcam->total_entries);
+ return -ENOMEM;
+ }
+
+ mcam->bmap_entries = mcam->total_entries - rsvd;
+ mcam->nixlf_offset = mcam->bmap_entries;
+ mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
+
+ /* Allocate bitmaps for managing MCAM entries */
+ mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries),
+ sizeof(long), GFP_KERNEL);
+ if (!mcam->bmap)
+ return -ENOMEM;
+
+ mcam->bmap_reverse = devm_kcalloc(rvu->dev,
+ BITS_TO_LONGS(mcam->bmap_entries),
+ sizeof(long), GFP_KERNEL);
+ if (!mcam->bmap_reverse)
+ return -ENOMEM;
+
+ mcam->bmap_fcnt = mcam->bmap_entries;
+
+ /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
+ mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->entry2pfvf_map)
+ return -ENOMEM;
+
+ /* Reserve 1/8th of MCAM entries at the bottom for low priority
+ * allocations and another 1/8th at the top for high priority
+ * allocations.
+ */
+ mcam->lprio_count = mcam->bmap_entries / 8;
+ if (mcam->lprio_count > BITS_PER_LONG)
+ mcam->lprio_count = round_down(mcam->lprio_count,
+ BITS_PER_LONG);
+ mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
+ mcam->hprio_count = mcam->lprio_count;
+ mcam->hprio_end = mcam->hprio_count;
+
+ /* Allocate bitmap for managing MCAM counters and memory
+ * for saving counter to RVU PFFUNC allocation mapping.
+ */
+ err = rvu_alloc_bitmap(&mcam->counters);
+ if (err)
+ return err;
+
+ mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->cntr2pfvf_map)
+ goto free_mem;
+
+ /* Alloc memory for MCAM entry to counter mapping and for tracking
+ * counter's reference count.
+ */
+ mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->entry2cntr_map)
+ goto free_mem;
+
+ mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->cntr_refcnt)
+ goto free_mem;
+
+ /* Alloc memory for saving target device of mcam rule */
+ mcam->entry2target_pffunc = devm_kcalloc(rvu->dev, mcam->total_entries,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->entry2target_pffunc)
+ goto free_mem;
+
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
+ mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
+ }
+
+ for (cntr = 0; cntr < mcam->counters.max; cntr++)
+ mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP;
+
+ mutex_init(&mcam->lock);
+
+ return 0;
+
+free_mem:
+ kfree(mcam->counters.bmap);
+ return -ENOMEM;
+}
+
+static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 npc_const, npc_const1;
+ u64 npc_const2 = 0;
+
+ npc_const = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ npc_const1 = rvu_read64(rvu, blkaddr, NPC_AF_CONST1);
+ if (npc_const1 & BIT_ULL(63))
+ npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2);
+
+ pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT;
+ hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL;
+ hw->npc_kpu_entries = npc_const1 & 0xFFFULL;
+ hw->npc_kpus = (npc_const >> 8) & 0x1FULL;
+ hw->npc_intfs = npc_const & 0xFULL;
+ hw->npc_counters = (npc_const >> 48) & 0xFFFFULL;
+
+ mcam->banks = (npc_const >> 44) & 0xFULL;
+ mcam->banksize = (npc_const >> 28) & 0xFFFFULL;
+ hw->npc_stat_ena = BIT_ULL(9);
+ /* Extended set */
+ if (npc_const2) {
+ hw->npc_ext_set = true;
+ /* 96xx supports only match_stats and npc_counters
+ * reflected in NPC_AF_CONST reg.
+ * STAT_SEL and ENA are at [0:8] and 9 bit positions.
+ * 98xx has both match_stat and ext and npc_counter
+ * reflected in NPC_AF_CONST2
+ * STAT_SEL_EXT added at [12:14] bit position.
+ * cn10k supports only ext and hence npc_counters in
+ * NPC_AF_CONST is 0 and npc_counters reflected in NPC_AF_CONST2.
+ * STAT_SEL bitpos incremented from [0:8] to [0:11] and ENA bit moved to 63
+ */
+ if (!hw->npc_counters)
+ hw->npc_stat_ena = BIT_ULL(63);
+ hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL;
+ mcam->banksize = npc_const2 & 0xFFFFULL;
+ }
+
+ mcam->counters.max = hw->npc_counters;
+}
+
+static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam_kex *mkex = rvu->kpu.mkex;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 nibble_ena, rx_kex, tx_kex;
+ u8 intf;
+
+ /* Reserve last counter for MCAM RX miss action which is set to
+ * drop packet. This way we will know how many pkts didn't match
+ * any MCAM entry.
+ */
+ mcam->counters.max--;
+ mcam->rx_miss_act_cntr = mcam->counters.max;
+
+ rx_kex = mkex->keyx_cfg[NIX_INTF_RX];
+ tx_kex = mkex->keyx_cfg[NIX_INTF_TX];
+ nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
+
+ nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena);
+ if (nibble_ena) {
+ tx_kex &= ~NPC_PARSE_NIBBLE;
+ tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
+ mkex->keyx_cfg[NIX_INTF_TX] = tx_kex;
+ }
+
+ /* Configure RX interfaces */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ if (is_npc_intf_tx(intf))
+ continue;
+
+ /* Set RX MCAM search key size. LA..LE (ltype only) + Channel */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ rx_kex);
+
+ /* If MCAM lookup doesn't result in a match, drop the received
+ * packet. And map this action to a counter to count dropped
+ * packets.
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_ACT(intf), NIX_RX_ACTIONOP_DROP);
+
+ /* NPC_AF_INTFX_MISS_STAT_ACT[14:12] - counter[11:9]
+ * NPC_AF_INTFX_MISS_STAT_ACT[8:0] - counter[8:0]
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_STAT_ACT(intf),
+ ((mcam->rx_miss_act_cntr >> 9) << 12) |
+ hw->npc_stat_ena | mcam->rx_miss_act_cntr);
+ }
+
+ /* Configure TX interfaces */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ if (is_npc_intf_rx(intf))
+ continue;
+
+ /* Extract Ltypes LID_LA to LID_LE */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ tx_kex);
+
+ /* Set TX miss action to UCAST_DEFAULT i.e
+ * transmit the packet on NIX LF SQ's default channel.
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_ACT(intf),
+ NIX_TX_ACTIONOP_UCAST_DEFAULT);
+ }
+}
+
+int rvu_npc_init(struct rvu *rvu)
+{
+ struct npc_kpu_profile_adapter *kpu = &rvu->kpu;
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, entry, bank, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -ENODEV;
+ }
+
+ rvu_npc_hw_init(rvu, blkaddr);
+
+ /* First disable all MCAM entries, to stop traffic towards NIXLFs */
+ for (bank = 0; bank < mcam->banks; bank++) {
+ for (entry = 0; entry < mcam->banksize; entry++)
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0);
+ }
+
+ err = rvu_alloc_bitmap(&pkind->rsrc);
+ if (err)
+ return err;
+ /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0',
+ * no need to configure PKIND for all LBKs separately.
+ */
+ rvu_alloc_rsrc(&pkind->rsrc);
+
+ /* Allocate mem for pkind to PF and channel mapping info */
+ pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max,
+ sizeof(u32), GFP_KERNEL);
+ if (!pkind->pfchan_map)
+ return -ENOMEM;
+
+ /* Configure KPU profile */
+ npc_parser_profile_init(rvu, blkaddr);
+
+ /* Config Outer L2, IPv4's NPC layer info */
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2,
+ (kpu->lt_def->pck_ol2.lid << 8) | (kpu->lt_def->pck_ol2.ltype_match << 4) |
+ kpu->lt_def->pck_ol2.ltype_mask);
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4,
+ (kpu->lt_def->pck_oip4.lid << 8) | (kpu->lt_def->pck_oip4.ltype_match << 4) |
+ kpu->lt_def->pck_oip4.ltype_mask);
+
+ /* Config Inner IPV4 NPC layer info */
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4,
+ (kpu->lt_def->pck_iip4.lid << 8) | (kpu->lt_def->pck_iip4.ltype_match << 4) |
+ kpu->lt_def->pck_iip4.ltype_mask);
+
+ /* Enable below for Rx pkts.
+ * - Outer IPv4 header checksum validation.
+ * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2B].
+ * - Detect outer L2 multicast address and set NPC_RESULT_S[L2M].
+ * - Inner IPv4 header checksum validation.
+ * - Set non zero checksum error code value
+ */
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG,
+ rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) |
+ ((u64)NPC_EC_OIP4_CSUM << 32) | (NPC_EC_IIP4_CSUM << 24) |
+ BIT_ULL(7) | BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1));
+
+ rvu_npc_setup_interfaces(rvu, blkaddr);
+
+ npc_config_secret_key(rvu, blkaddr);
+ /* Configure MKEX profile */
+ npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name);
+
+ err = npc_mcam_rsrcs_init(rvu, blkaddr);
+ if (err)
+ return err;
+
+ err = npc_flow_steering_init(rvu, blkaddr);
+ if (err) {
+ dev_err(rvu->dev,
+ "Incorrect mkex profile loaded using default mkex\n");
+ npc_load_mkex_profile(rvu, blkaddr, def_pfl_name);
+ }
+
+ return 0;
+}
+
+void rvu_npc_freemem(struct rvu *rvu)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ kfree(pkind->rsrc.bmap);
+ kfree(mcam->counters.bmap);
+ if (rvu->kpu_prfl_addr)
+ iounmap(rvu->kpu_prfl_addr);
+ else
+ kfree(rvu->kpu_fwdata);
+ mutex_destroy(&mcam->lock);
+}
+
+void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int entry;
+
+ *alloc_cnt = 0;
+ *enable_cnt = 0;
+
+ for (entry = 0; entry < mcam->bmap_entries; entry++) {
+ if (mcam->entry2pfvf_map[entry] == pcifunc) {
+ (*alloc_cnt)++;
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry))
+ (*enable_cnt)++;
+ }
+ }
+}
+
+void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int cntr;
+
+ *alloc_cnt = 0;
+ *enable_cnt = 0;
+
+ for (cntr = 0; cntr < mcam->counters.max; cntr++) {
+ if (mcam->cntr2pfvf_map[cntr] == pcifunc) {
+ (*alloc_cnt)++;
+ if (mcam->cntr_refcnt[cntr])
+ (*enable_cnt)++;
+ }
+ }
+}
+
+static int npc_mcam_verify_entry(struct npc_mcam *mcam,
+ u16 pcifunc, int entry)
+{
+ /* verify AF installed entries */
+ if (is_pffunc_af(pcifunc))
+ return 0;
+ /* Verify if entry is valid and if it is indeed
+ * allocated to the requesting PFFUNC.
+ */
+ if (entry >= mcam->bmap_entries)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (pcifunc != mcam->entry2pfvf_map[entry])
+ return NPC_MCAM_PERM_DENIED;
+
+ return 0;
+}
+
+static int npc_mcam_verify_counter(struct npc_mcam *mcam,
+ u16 pcifunc, int cntr)
+{
+ /* Verify if counter is valid and if it is indeed
+ * allocated to the requesting PFFUNC.
+ */
+ if (cntr >= mcam->counters.max)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (pcifunc != mcam->cntr2pfvf_map[cntr])
+ return NPC_MCAM_PERM_DENIED;
+
+ return 0;
+}
+
+static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 entry, u16 cntr)
+{
+ u16 index = entry & (mcam->banksize - 1);
+ u32 bank = npc_get_bank(mcam, entry);
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ /* Set mapping and increment counter's refcnt */
+ mcam->entry2cntr_map[entry] = cntr;
+ mcam->cntr_refcnt[cntr]++;
+ /* Enable stats */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank),
+ ((cntr >> 9) << 12) | hw->npc_stat_ena | cntr);
+}
+
+static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu,
+ struct npc_mcam *mcam,
+ int blkaddr, u16 entry, u16 cntr)
+{
+ u16 index = entry & (mcam->banksize - 1);
+ u32 bank = npc_get_bank(mcam, entry);
+
+ /* Remove mapping and reduce counter's refcnt */
+ mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP;
+ mcam->cntr_refcnt[cntr]--;
+ /* Disable stats */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00);
+}
+
+/* Sets MCAM entry in bitmap as used. Update
+ * reverse bitmap too. Should be called with
+ * 'mcam->lock' held.
+ */
+static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index)
+{
+ u16 entry, rentry;
+
+ entry = index;
+ rentry = mcam->bmap_entries - index - 1;
+
+ __set_bit(entry, mcam->bmap);
+ __set_bit(rentry, mcam->bmap_reverse);
+ mcam->bmap_fcnt--;
+}
+
+/* Sets MCAM entry in bitmap as free. Update
+ * reverse bitmap too. Should be called with
+ * 'mcam->lock' held.
+ */
+static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index)
+{
+ u16 entry, rentry;
+
+ entry = index;
+ rentry = mcam->bmap_entries - index - 1;
+
+ __clear_bit(entry, mcam->bmap);
+ __clear_bit(rentry, mcam->bmap_reverse);
+ mcam->bmap_fcnt++;
+}
+
+static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pcifunc)
+{
+ u16 index, cntr;
+
+ /* Scan all MCAM entries and free the ones mapped to 'pcifunc' */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2pfvf_map[index] == pcifunc) {
+ mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
+ /* Free the entry in bitmap */
+ npc_mcam_clear_bit(mcam, index);
+ /* Disable the entry */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+
+ /* Update entry2counter mapping */
+ cntr = mcam->entry2cntr_map[index];
+ if (cntr != NPC_MCAM_INVALID_MAP)
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam,
+ blkaddr, index,
+ cntr);
+ mcam->entry2target_pffunc[index] = 0x0;
+ }
+ }
+}
+
+static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
+ u16 pcifunc)
+{
+ u16 cntr;
+
+ /* Scan all MCAM counters and free the ones mapped to 'pcifunc' */
+ for (cntr = 0; cntr < mcam->counters.max; cntr++) {
+ if (mcam->cntr2pfvf_map[cntr] == pcifunc) {
+ mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP;
+ mcam->cntr_refcnt[cntr] = 0;
+ rvu_free_rsrc(&mcam->counters, cntr);
+ /* This API is expected to be called after freeing
+ * MCAM entries, which inturn will remove
+ * 'entry to counter' mapping.
+ * No need to do it again.
+ */
+ }
+ }
+}
+
+/* Find area of contiguous free entries of size 'nr'.
+ * If not found return max contiguous free entries available.
+ */
+static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start,
+ u16 nr, u16 *max_area)
+{
+ u16 max_area_start = 0;
+ u16 index, next, end;
+
+ *max_area = 0;
+
+again:
+ index = find_next_zero_bit(map, size, start);
+ if (index >= size)
+ return max_area_start;
+
+ end = ((index + nr) >= size) ? size : index + nr;
+ next = find_next_bit(map, end, index);
+ if (*max_area < (next - index)) {
+ *max_area = next - index;
+ max_area_start = index;
+ }
+
+ if (next < end) {
+ start = next + 1;
+ goto again;
+ }
+
+ return max_area_start;
+}
+
+/* Find number of free MCAM entries available
+ * within range i.e in between 'start' and 'end'.
+ */
+static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end)
+{
+ u16 index, next;
+ u16 fcnt = 0;
+
+again:
+ if (start >= end)
+ return fcnt;
+
+ index = find_next_zero_bit(map, end, start);
+ if (index >= end)
+ return fcnt;
+
+ next = find_next_bit(map, end, index);
+ if (next <= end) {
+ fcnt += next - index;
+ start = next + 1;
+ goto again;
+ }
+
+ fcnt += end - index;
+ return fcnt;
+}
+
+static void
+npc_get_mcam_search_range_priority(struct npc_mcam *mcam,
+ struct npc_mcam_alloc_entry_req *req,
+ u16 *start, u16 *end, bool *reverse)
+{
+ u16 fcnt;
+
+ if (req->priority == NPC_MCAM_HIGHER_PRIO)
+ goto hprio;
+
+ /* For a low priority entry allocation
+ * - If reference entry is not in hprio zone then
+ * search range: ref_entry to end.
+ * - If reference entry is in hprio zone and if
+ * request can be accomodated in non-hprio zone then
+ * search range: 'start of middle zone' to 'end'
+ * - else search in reverse, so that less number of hprio
+ * zone entries are allocated.
+ */
+
+ *reverse = false;
+ *start = req->ref_entry + 1;
+ *end = mcam->bmap_entries;
+
+ if (req->ref_entry >= mcam->hprio_end)
+ return;
+
+ fcnt = npc_mcam_get_free_count(mcam->bmap,
+ mcam->hprio_end, mcam->bmap_entries);
+ if (fcnt > req->count)
+ *start = mcam->hprio_end;
+ else
+ *reverse = true;
+ return;
+
+hprio:
+ /* For a high priority entry allocation, search is always
+ * in reverse to preserve hprio zone entries.
+ * - If reference entry is not in lprio zone then
+ * search range: 0 to ref_entry.
+ * - If reference entry is in lprio zone and if
+ * request can be accomodated in middle zone then
+ * search range: 'hprio_end' to 'lprio_start'
+ */
+
+ *reverse = true;
+ *start = 0;
+ *end = req->ref_entry;
+
+ if (req->ref_entry <= mcam->lprio_start)
+ return;
+
+ fcnt = npc_mcam_get_free_count(mcam->bmap,
+ mcam->hprio_end, mcam->lprio_start);
+ if (fcnt < req->count)
+ return;
+ *start = mcam->hprio_end;
+ *end = mcam->lprio_start;
+}
+
+static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+ struct npc_mcam_alloc_entry_req *req,
+ struct npc_mcam_alloc_entry_rsp *rsp)
+{
+ u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
+ u16 fcnt, hp_fcnt, lp_fcnt;
+ u16 start, end, index;
+ int entry, next_start;
+ bool reverse = false;
+ unsigned long *bmap;
+ u16 max_contig;
+
+ mutex_lock(&mcam->lock);
+
+ /* Check if there are any free entries */
+ if (!mcam->bmap_fcnt) {
+ mutex_unlock(&mcam->lock);
+ return NPC_MCAM_ALLOC_FAILED;
+ }
+
+ /* MCAM entries are divided into high priority, middle and
+ * low priority zones. Idea is to not allocate top and lower
+ * most entries as much as possible, this is to increase
+ * probability of honouring priority allocation requests.
+ *
+ * Two bitmaps are used for mcam entry management,
+ * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'.
+ * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'.
+ *
+ * Reverse bitmap is used to allocate entries
+ * - when a higher priority entry is requested
+ * - when available free entries are less.
+ * Lower priority ones out of avaialble free entries are always
+ * chosen when 'high vs low' question arises.
+ */
+
+ /* Get the search range for priority allocation request */
+ if (req->priority) {
+ npc_get_mcam_search_range_priority(mcam, req,
+ &start, &end, &reverse);
+ goto alloc;
+ }
+
+ /* For a VF base MCAM match rule is set by its PF. And all the
+ * further MCAM rules installed by VF on its own are
+ * concatenated with the base rule set by its PF. Hence PF entries
+ * should be at lower priority compared to VF entries. Otherwise
+ * base rule is hit always and rules installed by VF will be of
+ * no use. Hence if the request is from PF and NOT a priority
+ * allocation request then allocate low priority entries.
+ */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ goto lprio_alloc;
+
+ /* Find out the search range for non-priority allocation request
+ *
+ * Get MCAM free entry count in middle zone.
+ */
+ lp_fcnt = npc_mcam_get_free_count(mcam->bmap,
+ mcam->lprio_start,
+ mcam->bmap_entries);
+ hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end);
+ fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt;
+
+ /* Check if request can be accomodated in the middle zone */
+ if (fcnt > req->count) {
+ start = mcam->hprio_end;
+ end = mcam->lprio_start;
+ } else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) {
+ /* Expand search zone from half of hprio zone to
+ * half of lprio zone.
+ */
+ start = mcam->hprio_end / 2;
+ end = mcam->bmap_entries - (mcam->lprio_count / 2);
+ reverse = true;
+ } else {
+ /* Not enough free entries, search all entries in reverse,
+ * so that low priority ones will get used up.
+ */
+lprio_alloc:
+ reverse = true;
+ start = 0;
+ end = mcam->bmap_entries;
+ }
+
+alloc:
+ if (reverse) {
+ bmap = mcam->bmap_reverse;
+ start = mcam->bmap_entries - start;
+ end = mcam->bmap_entries - end;
+ swap(start, end);
+ } else {
+ bmap = mcam->bmap;
+ }
+
+ if (req->contig) {
+ /* Allocate requested number of contiguous entries, if
+ * unsuccessful find max contiguous entries available.
+ */
+ index = npc_mcam_find_zero_area(bmap, end, start,
+ req->count, &max_contig);
+ rsp->count = max_contig;
+ if (reverse)
+ rsp->entry = mcam->bmap_entries - index - max_contig;
+ else
+ rsp->entry = index;
+ } else {
+ /* Allocate requested number of non-contiguous entries,
+ * if unsuccessful allocate as many as possible.
+ */
+ rsp->count = 0;
+ next_start = start;
+ for (entry = 0; entry < req->count; entry++) {
+ index = find_next_zero_bit(bmap, end, next_start);
+ if (index >= end)
+ break;
+
+ next_start = start + (index - start) + 1;
+
+ /* Save the entry's index */
+ if (reverse)
+ index = mcam->bmap_entries - index - 1;
+ entry_list[entry] = index;
+ rsp->count++;
+ }
+ }
+
+ /* If allocating requested no of entries is unsucessful,
+ * expand the search range to full bitmap length and retry.
+ */
+ if (!req->priority && (rsp->count < req->count) &&
+ ((end - start) != mcam->bmap_entries)) {
+ reverse = true;
+ start = 0;
+ end = mcam->bmap_entries;
+ goto alloc;
+ }
+
+ /* For priority entry allocation requests, if allocation is
+ * failed then expand search to max possible range and retry.
+ */
+ if (req->priority && rsp->count < req->count) {
+ if (req->priority == NPC_MCAM_LOWER_PRIO &&
+ (start != (req->ref_entry + 1))) {
+ start = req->ref_entry + 1;
+ end = mcam->bmap_entries;
+ reverse = false;
+ goto alloc;
+ } else if ((req->priority == NPC_MCAM_HIGHER_PRIO) &&
+ ((end - start) != req->ref_entry)) {
+ start = 0;
+ end = req->ref_entry;
+ reverse = true;
+ goto alloc;
+ }
+ }
+
+ /* Copy MCAM entry indices into mbox response entry_list.
+ * Requester always expects indices in ascending order, so
+ * reverse the list if reverse bitmap is used for allocation.
+ */
+ if (!req->contig && rsp->count) {
+ index = 0;
+ for (entry = rsp->count - 1; entry >= 0; entry--) {
+ if (reverse)
+ rsp->entry_list[index++] = entry_list[entry];
+ else
+ rsp->entry_list[entry] = entry_list[entry];
+ }
+ }
+
+ /* Mark the allocated entries as used and set nixlf mapping */
+ for (entry = 0; entry < rsp->count; entry++) {
+ index = req->contig ?
+ (rsp->entry + entry) : rsp->entry_list[entry];
+ npc_mcam_set_bit(mcam, index);
+ mcam->entry2pfvf_map[index] = pcifunc;
+ mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
+ }
+
+ /* Update available free count in mbox response */
+ rsp->free_count = mcam->bmap_fcnt;
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+/* Marks bitmaps to reserved the mcam slot */
+void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ npc_mcam_set_bit(mcam, entry_idx);
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
+ struct npc_mcam_alloc_entry_req *req,
+ struct npc_mcam_alloc_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ rsp->entry = NPC_MCAM_ENTRY_INVALID;
+ rsp->free_count = 0;
+
+ /* Check if ref_entry is within range */
+ if (req->priority && req->ref_entry >= mcam->bmap_entries) {
+ dev_err(rvu->dev, "%s: reference entry %d is out of range\n",
+ __func__, req->ref_entry);
+ return NPC_MCAM_INVALID_REQ;
+ }
+
+ /* ref_entry can't be '0' if requested priority is high.
+ * Can't be last entry if requested priority is low.
+ */
+ if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) ||
+ ((req->ref_entry == (mcam->bmap_entries - 1)) &&
+ req->priority == NPC_MCAM_LOWER_PRIO))
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Since list of allocated indices needs to be sent to requester,
+ * max number of non-contiguous entries per mbox msg is limited.
+ */
+ if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) {
+ dev_err(rvu->dev,
+ "%s: %d Non-contiguous MCAM entries requested is more than max (%d) allowed\n",
+ __func__, req->count, NPC_MAX_NONCONTIG_ENTRIES);
+ return NPC_MCAM_INVALID_REQ;
+ }
+
+ /* Alloc request from PFFUNC with no NIXLF attached should be denied */
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
+ return NPC_MCAM_ALLOC_DENIED;
+
+ return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp);
+}
+
+int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
+ struct npc_mcam_free_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc = 0;
+ u16 cntr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Free request from PFFUNC with no NIXLF attached, ignore */
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+
+ if (req->all)
+ goto free_all;
+
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (rc)
+ goto exit;
+
+ mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP;
+ mcam->entry2target_pffunc[req->entry] = 0x0;
+ npc_mcam_clear_bit(mcam, req->entry);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
+
+ /* Update entry2counter mapping */
+ cntr = mcam->entry2cntr_map[req->entry];
+ if (cntr != NPC_MCAM_INVALID_MAP)
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ req->entry, cntr);
+
+ goto exit;
+
+free_all:
+ /* Free up all entries allocated to requesting PFFUNC */
+ npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
+exit:
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_read_entry(struct rvu *rvu,
+ struct npc_mcam_read_entry_req *req,
+ struct npc_mcam_read_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (!rc) {
+ npc_read_mcam_entry(rvu, mcam, blkaddr, req->entry,
+ &rsp->entry_data,
+ &rsp->intf, &rsp->enable);
+ }
+
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
+ struct npc_mcam_write_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+ u8 nix_intf;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (rc)
+ goto exit;
+
+ if (req->set_cntr &&
+ npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ if (!is_npc_interface_valid(rvu, req->intf)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ if (is_npc_intf_tx(req->intf))
+ nix_intf = pfvf->nix_tx_intf;
+ else
+ nix_intf = pfvf->nix_rx_intf;
+
+ if (!is_pffunc_af(pcifunc) &&
+ npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ /* For AF installed rules, the nix_intf should be set to target NIX */
+ if (is_pffunc_af(req->hdr.pcifunc))
+ nix_intf = req->intf;
+
+ npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, nix_intf,
+ &req->entry_data, req->enable_entry);
+
+ if (req->set_cntr)
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ req->entry, req->cntr);
+
+ rc = 0;
+exit:
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
+ struct npc_mcam_ena_dis_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ mutex_unlock(&mcam->lock);
+ if (rc)
+ return rc;
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
+ struct npc_mcam_ena_dis_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ mutex_unlock(&mcam->lock);
+ if (rc)
+ return rc;
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
+ struct npc_mcam_shift_entry_req *req,
+ struct npc_mcam_shift_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ u16 old_entry, new_entry;
+ int blkaddr, rc = 0;
+ u16 index, cntr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (req->shift_count > NPC_MCAM_MAX_SHIFTS)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < req->shift_count; index++) {
+ old_entry = req->curr_entry[index];
+ new_entry = req->new_entry[index];
+
+ /* Check if both old and new entries are valid and
+ * does belong to this PFFUNC or not.
+ */
+ rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry);
+ if (rc)
+ break;
+
+ rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry);
+ if (rc)
+ break;
+
+ /* new_entry should not have a counter mapped */
+ if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) {
+ rc = NPC_MCAM_PERM_DENIED;
+ break;
+ }
+
+ /* Disable the new_entry */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false);
+
+ /* Copy rule from old entry to new entry */
+ npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry);
+
+ /* Copy counter mapping, if any */
+ cntr = mcam->entry2cntr_map[old_entry];
+ if (cntr != NPC_MCAM_INVALID_MAP) {
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ old_entry, cntr);
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ new_entry, cntr);
+ }
+
+ /* Enable new_entry and disable old_entry */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false);
+ }
+
+ /* If shift has failed then report the failed index */
+ if (index != req->shift_count) {
+ rc = NPC_MCAM_PERM_DENIED;
+ rsp->failed_entry_idx = index;
+ }
+
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
+ struct npc_mcam_alloc_counter_req *req,
+ struct npc_mcam_alloc_counter_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ u16 max_contig, cntr;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* If the request is from a PFFUNC with no NIXLF attached, ignore */
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Since list of allocated counter IDs needs to be sent to requester,
+ * max number of non-contiguous counters per mbox msg is limited.
+ */
+ if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+
+ /* Check if unused counters are available or not */
+ if (!rvu_rsrc_free_count(&mcam->counters)) {
+ mutex_unlock(&mcam->lock);
+ return NPC_MCAM_ALLOC_FAILED;
+ }
+
+ rsp->count = 0;
+
+ if (req->contig) {
+ /* Allocate requested number of contiguous counters, if
+ * unsuccessful find max contiguous entries available.
+ */
+ index = npc_mcam_find_zero_area(mcam->counters.bmap,
+ mcam->counters.max, 0,
+ req->count, &max_contig);
+ rsp->count = max_contig;
+ rsp->cntr = index;
+ for (cntr = index; cntr < (index + max_contig); cntr++) {
+ __set_bit(cntr, mcam->counters.bmap);
+ mcam->cntr2pfvf_map[cntr] = pcifunc;
+ }
+ } else {
+ /* Allocate requested number of non-contiguous counters,
+ * if unsuccessful allocate as many as possible.
+ */
+ for (cntr = 0; cntr < req->count; cntr++) {
+ index = rvu_alloc_rsrc(&mcam->counters);
+ if (index < 0)
+ break;
+ rsp->cntr_list[cntr] = index;
+ rsp->count++;
+ mcam->cntr2pfvf_map[index] = pcifunc;
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 index, entry = 0;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ if (err) {
+ mutex_unlock(&mcam->lock);
+ return err;
+ }
+
+ /* Mark counter as free/unused */
+ mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP;
+ rvu_free_rsrc(&mcam->counters, req->cntr);
+
+ /* Disable all MCAM entry's stats which are using this counter */
+ while (entry < mcam->bmap_entries) {
+ if (!mcam->cntr_refcnt[req->cntr])
+ break;
+
+ index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
+ if (index >= mcam->bmap_entries)
+ break;
+ entry = index + 1;
+ if (mcam->entry2cntr_map[index] != req->cntr)
+ continue;
+
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ index, req->cntr);
+ }
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
+ struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 index, entry = 0;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ if (rc)
+ goto exit;
+
+ /* Unmap the MCAM entry and counter */
+ if (!req->all) {
+ rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry);
+ if (rc)
+ goto exit;
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ req->entry, req->cntr);
+ goto exit;
+ }
+
+ /* Disable all MCAM entry's stats which are using this counter */
+ while (entry < mcam->bmap_entries) {
+ if (!mcam->cntr_refcnt[req->cntr])
+ break;
+
+ index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
+ if (index >= mcam->bmap_entries)
+ break;
+ entry = index + 1;
+
+ if (mcam->entry2cntr_map[index] != req->cntr)
+ continue;
+
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ index, req->cntr);
+ }
+exit:
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ mutex_unlock(&mcam->lock);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req,
+ struct npc_mcam_oper_counter_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ mutex_unlock(&mcam->lock);
+ if (err)
+ return err;
+
+ rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr));
+ rsp->stat &= BIT_ULL(48) - 1;
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
+ struct npc_mcam_alloc_and_write_entry_req *req,
+ struct npc_mcam_alloc_and_write_entry_rsp *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ struct npc_mcam_alloc_counter_req cntr_req;
+ struct npc_mcam_alloc_counter_rsp cntr_rsp;
+ struct npc_mcam_alloc_entry_req entry_req;
+ struct npc_mcam_alloc_entry_rsp entry_rsp;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 entry = NPC_MCAM_ENTRY_INVALID;
+ u16 cntr = NPC_MCAM_ENTRY_INVALID;
+ int blkaddr, rc;
+ u8 nix_intf;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (!is_npc_interface_valid(rvu, req->intf))
+ return NPC_MCAM_INVALID_REQ;
+
+ if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
+ req->hdr.pcifunc))
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Try to allocate a MCAM entry */
+ entry_req.hdr.pcifunc = req->hdr.pcifunc;
+ entry_req.contig = true;
+ entry_req.priority = req->priority;
+ entry_req.ref_entry = req->ref_entry;
+ entry_req.count = 1;
+
+ rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu,
+ &entry_req, &entry_rsp);
+ if (rc)
+ return rc;
+
+ if (!entry_rsp.count)
+ return NPC_MCAM_ALLOC_FAILED;
+
+ entry = entry_rsp.entry;
+
+ if (!req->alloc_cntr)
+ goto write_entry;
+
+ /* Now allocate counter */
+ cntr_req.hdr.pcifunc = req->hdr.pcifunc;
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+
+ rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
+ if (rc) {
+ /* Free allocated MCAM entry */
+ mutex_lock(&mcam->lock);
+ mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP;
+ npc_mcam_clear_bit(mcam, entry);
+ mutex_unlock(&mcam->lock);
+ return rc;
+ }
+
+ cntr = cntr_rsp.cntr;
+
+write_entry:
+ mutex_lock(&mcam->lock);
+
+ if (is_npc_intf_tx(req->intf))
+ nix_intf = pfvf->nix_tx_intf;
+ else
+ nix_intf = pfvf->nix_rx_intf;
+
+ npc_config_mcam_entry(rvu, mcam, blkaddr, entry, nix_intf,
+ &req->entry_data, req->enable_entry);
+
+ if (req->alloc_cntr)
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr);
+ mutex_unlock(&mcam->lock);
+
+ rsp->entry = entry;
+ rsp->cntr = cntr;
+
+ return 0;
+}
+
+#define GET_KEX_CFG(intf) \
+ rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf))
+
+#define GET_KEX_FLAGS(ld) \
+ rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld))
+
+#define GET_KEX_LD(intf, lid, lt, ld) \
+ rvu_read64(rvu, BLKADDR_NPC, \
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld))
+
+#define GET_KEX_LDFLAGS(intf, ld, fl) \
+ rvu_read64(rvu, BLKADDR_NPC, \
+ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl))
+
+int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
+ struct npc_get_kex_cfg_rsp *rsp)
+{
+ int lid, lt, ld, fl;
+
+ rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX);
+ rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX);
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] =
+ GET_KEX_LD(NIX_INTF_RX, lid, lt, ld);
+ rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] =
+ GET_KEX_LD(NIX_INTF_TX, lid, lt, ld);
+ }
+ }
+ }
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld);
+
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++) {
+ rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] =
+ GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl);
+ rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] =
+ GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl);
+ }
+ }
+ memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN);
+ return 0;
+}
+
+static int
+npc_set_var_len_offset_pkind(struct rvu *rvu, u16 pcifunc, u64 pkind,
+ u8 var_len_off, u8 var_len_off_mask, u8 shift_dir)
+{
+ struct npc_kpu_action0 *act0;
+ u8 shift_count = 0;
+ int blkaddr;
+ u64 val;
+
+ if (!var_len_off_mask)
+ return -EINVAL;
+
+ if (var_len_off_mask != 0xff) {
+ if (shift_dir)
+ shift_count = __ffs(var_len_off_mask);
+ else
+ shift_count = (8 - __fls(var_len_off_mask));
+ }
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+ val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
+ act0 = (struct npc_kpu_action0 *)&val;
+ act0->var_len_shift = shift_count;
+ act0->var_len_right = shift_dir;
+ act0->var_len_mask = var_len_off_mask;
+ act0->var_len_offset = var_len_off;
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
+ return 0;
+}
+
+int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
+ u64 pkind, u8 var_len_off, u8 var_len_off_mask,
+ u8 shift_dir)
+
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr, nixlf, rc, intf_mode;
+ int pf = rvu_get_pf(pcifunc);
+ u64 rxpkind, txpkind;
+ u8 cgx_id, lmac_id;
+
+ /* use default pkind to disable edsa/higig */
+ rxpkind = rvu_npc_get_pkind(rvu, pf);
+ txpkind = NPC_TX_DEF_PKIND;
+ intf_mode = NPC_INTF_MODE_DEF;
+
+ if (mode & OTX2_PRIV_FLAGS_CUSTOM) {
+ if (pkind == NPC_RX_CUSTOM_PRE_L2_PKIND) {
+ rc = npc_set_var_len_offset_pkind(rvu, pcifunc, pkind,
+ var_len_off,
+ var_len_off_mask,
+ shift_dir);
+ if (rc)
+ return rc;
+ }
+ rxpkind = pkind;
+ txpkind = pkind;
+ }
+
+ if (dir & PKIND_RX) {
+ /* rx pkind set req valid only for cgx mapped PFs */
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return 0;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ rxpkind);
+ if (rc)
+ return rc;
+ }
+
+ if (dir & PKIND_TX) {
+ /* Tx pkind set request valid if PCIFUNC has NIXLF attached */
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (rc)
+ return rc;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf),
+ txpkind);
+ }
+
+ pfvf->intf_mode = intf_mode;
+ return 0;
+}
+
+int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu, struct npc_set_pkind *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_npc_set_parse_mode(rvu, req->hdr.pcifunc, req->mode,
+ req->dir, req->pkind, req->var_len_off,
+ req->var_len_off_mask, req->shift_dir);
+}
+
+int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu,
+ struct msg_req *req,
+ struct npc_mcam_read_base_rule_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int index, blkaddr, nixlf, rc = 0;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ u8 intf, enable;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Return the channel number in case of PF */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ rsp->entry.kw[0] = pfvf->rx_chan_base;
+ rsp->entry.kw_mask[0] = 0xFFFULL;
+ goto out;
+ }
+
+ /* Find the pkt steering rule installed by PF to this VF */
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == pcifunc)
+ goto read_entry;
+ }
+
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (rc < 0) {
+ mutex_unlock(&mcam->lock);
+ goto out;
+ }
+ /* Read the default ucast entry if there is no pkt steering rule */
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_UCAST_ENTRY);
+read_entry:
+ /* Read the mcam entry */
+ npc_read_mcam_entry(rvu, mcam, blkaddr, index, &rsp->entry, &intf,
+ &enable);
+ mutex_unlock(&mcam->lock);
+out:
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu,
+ struct npc_mcam_get_stats_req *req,
+ struct npc_mcam_get_stats_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 index, cntr;
+ int blkaddr;
+ u64 regval;
+ u32 bank;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+
+ index = req->entry & (mcam->banksize - 1);
+ bank = npc_get_bank(mcam, req->entry);
+
+ /* read MCAM entry STAT_ACT register */
+ regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank));
+
+ if (!(regval & rvu->hw->npc_stat_ena)) {
+ rsp->stat_ena = 0;
+ mutex_unlock(&mcam->lock);
+ return 0;
+ }
+
+ cntr = regval & 0x1FF;
+
+ rsp->stat_ena = 1;
+ rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(cntr));
+ rsp->stat &= BIT_ULL(48) - 1;
+
+ mutex_unlock(&mcam->lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
new file mode 100644
index 000000000..80d6aa3f1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -0,0 +1,1666 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include <linux/bitfield.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "rvu_npc_fs.h"
+#include "rvu_npc_hash.h"
+
+static const char * const npc_flow_names[] = {
+ [NPC_DMAC] = "dmac",
+ [NPC_SMAC] = "smac",
+ [NPC_ETYPE] = "ether type",
+ [NPC_VLAN_ETYPE_CTAG] = "vlan ether type ctag",
+ [NPC_VLAN_ETYPE_STAG] = "vlan ether type stag",
+ [NPC_OUTER_VID] = "outer vlan id",
+ [NPC_TOS] = "tos",
+ [NPC_SIP_IPV4] = "ipv4 source ip",
+ [NPC_DIP_IPV4] = "ipv4 destination ip",
+ [NPC_SIP_IPV6] = "ipv6 source ip",
+ [NPC_DIP_IPV6] = "ipv6 destination ip",
+ [NPC_IPPROTO_TCP] = "ip proto tcp",
+ [NPC_IPPROTO_UDP] = "ip proto udp",
+ [NPC_IPPROTO_SCTP] = "ip proto sctp",
+ [NPC_IPPROTO_ICMP] = "ip proto icmp",
+ [NPC_IPPROTO_ICMP6] = "ip proto icmp6",
+ [NPC_IPPROTO_AH] = "ip proto AH",
+ [NPC_IPPROTO_ESP] = "ip proto ESP",
+ [NPC_SPORT_TCP] = "tcp source port",
+ [NPC_DPORT_TCP] = "tcp destination port",
+ [NPC_SPORT_UDP] = "udp source port",
+ [NPC_DPORT_UDP] = "udp destination port",
+ [NPC_SPORT_SCTP] = "sctp source port",
+ [NPC_DPORT_SCTP] = "sctp destination port",
+ [NPC_LXMB] = "Mcast/Bcast header ",
+ [NPC_UNKNOWN] = "unknown",
+};
+
+bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 mcam_features;
+ u64 unsupported;
+
+ mcam_features = is_npc_intf_tx(intf) ? mcam->tx_features : mcam->rx_features;
+ unsupported = (mcam_features ^ features) & ~mcam_features;
+
+ /* Return false if at least one of the input flows is not extracted */
+ return !unsupported;
+}
+
+const char *npc_get_field_name(u8 hdr)
+{
+ if (hdr >= ARRAY_SIZE(npc_flow_names))
+ return npc_flow_names[NPC_UNKNOWN];
+
+ return npc_flow_names[hdr];
+}
+
+/* Compute keyword masks and figure out the number of keywords a field
+ * spans in the key.
+ */
+static void npc_set_kw_masks(struct npc_mcam *mcam, u8 type,
+ u8 nr_bits, int start_kwi, int offset, u8 intf)
+{
+ struct npc_key_field *field = &mcam->rx_key_fields[type];
+ u8 bits_in_kw;
+ int max_kwi;
+
+ if (mcam->banks_per_entry == 1)
+ max_kwi = 1; /* NPC_MCAM_KEY_X1 */
+ else if (mcam->banks_per_entry == 2)
+ max_kwi = 3; /* NPC_MCAM_KEY_X2 */
+ else
+ max_kwi = 6; /* NPC_MCAM_KEY_X4 */
+
+ if (is_npc_intf_tx(intf))
+ field = &mcam->tx_key_fields[type];
+
+ if (offset + nr_bits <= 64) {
+ /* one KW only */
+ if (start_kwi > max_kwi)
+ return;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(nr_bits - 1, 0)
+ << offset;
+ field->nr_kws = 1;
+ } else if (offset + nr_bits > 64 &&
+ offset + nr_bits <= 128) {
+ /* two KWs */
+ if (start_kwi + 1 > max_kwi)
+ return;
+ /* first KW mask */
+ bits_in_kw = 64 - offset;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0)
+ << offset;
+ /* second KW mask i.e. mask for rest of bits */
+ bits_in_kw = nr_bits + offset - 64;
+ field->kw_mask[start_kwi + 1] |= GENMASK_ULL(bits_in_kw - 1, 0);
+ field->nr_kws = 2;
+ } else {
+ /* three KWs */
+ if (start_kwi + 2 > max_kwi)
+ return;
+ /* first KW mask */
+ bits_in_kw = 64 - offset;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0)
+ << offset;
+ /* second KW mask */
+ field->kw_mask[start_kwi + 1] = ~0ULL;
+ /* third KW mask i.e. mask for rest of bits */
+ bits_in_kw = nr_bits + offset - 128;
+ field->kw_mask[start_kwi + 2] |= GENMASK_ULL(bits_in_kw - 1, 0);
+ field->nr_kws = 3;
+ }
+}
+
+/* Helper function to figure out whether field exists in the key */
+static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *input;
+
+ input = &mcam->rx_key_fields[type];
+ if (is_npc_intf_tx(intf))
+ input = &mcam->tx_key_fields[type];
+
+ return input->nr_kws > 0;
+}
+
+static bool npc_is_same(struct npc_key_field *input,
+ struct npc_key_field *field)
+{
+ return memcmp(&input->layer_mdata, &field->layer_mdata,
+ sizeof(struct npc_layer_mdata)) == 0;
+}
+
+static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type,
+ u64 cfg, u8 lid, u8 lt, u8 intf)
+{
+ struct npc_key_field *input = &mcam->rx_key_fields[type];
+
+ if (is_npc_intf_tx(intf))
+ input = &mcam->tx_key_fields[type];
+
+ input->layer_mdata.hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ input->layer_mdata.key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+ input->layer_mdata.len = FIELD_GET(NPC_BYTESM, cfg) + 1;
+ input->layer_mdata.ltype = lt;
+ input->layer_mdata.lid = lid;
+}
+
+static bool npc_check_overlap_fields(struct npc_key_field *input1,
+ struct npc_key_field *input2)
+{
+ int kwi;
+
+ /* Fields with same layer id and different ltypes are mutually
+ * exclusive hence they can be overlapped
+ */
+ if (input1->layer_mdata.lid == input2->layer_mdata.lid &&
+ input1->layer_mdata.ltype != input2->layer_mdata.ltype)
+ return false;
+
+ for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) {
+ if (input1->kw_mask[kwi] & input2->kw_mask[kwi])
+ return true;
+ }
+
+ return false;
+}
+
+/* Helper function to check whether given field overlaps with any other fields
+ * in the key. Due to limitations on key size and the key extraction profile in
+ * use higher layers can overwrite lower layer's header fields. Hence overlap
+ * needs to be checked.
+ */
+static bool npc_check_overlap(struct rvu *rvu, int blkaddr,
+ enum key_fields type, u8 start_lid, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *dummy, *input;
+ int start_kwi, offset;
+ u8 nr_bits, lid, lt, ld;
+ u64 cfg;
+
+ dummy = &mcam->rx_key_fields[NPC_UNKNOWN];
+ input = &mcam->rx_key_fields[type];
+
+ if (is_npc_intf_tx(intf)) {
+ dummy = &mcam->tx_key_fields[NPC_UNKNOWN];
+ input = &mcam->tx_key_fields[type];
+ }
+
+ for (lid = start_lid; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG
+ (intf, lid, lt, ld));
+ if (!FIELD_GET(NPC_LDATA_EN, cfg))
+ continue;
+ memset(dummy, 0, sizeof(struct npc_key_field));
+ npc_set_layer_mdata(mcam, NPC_UNKNOWN, cfg,
+ lid, lt, intf);
+ /* exclude input */
+ if (npc_is_same(input, dummy))
+ continue;
+ start_kwi = dummy->layer_mdata.key / 8;
+ offset = (dummy->layer_mdata.key * 8) % 64;
+ nr_bits = dummy->layer_mdata.len * 8;
+ /* form KW masks */
+ npc_set_kw_masks(mcam, NPC_UNKNOWN, nr_bits,
+ start_kwi, offset, intf);
+ /* check any input field bits falls in any
+ * other field bits.
+ */
+ if (npc_check_overlap_fields(dummy, input))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
+ u8 intf)
+{
+ if (!npc_is_field_present(rvu, type, intf) ||
+ npc_check_overlap(rvu, blkaddr, type, 0, intf))
+ return false;
+ return true;
+}
+
+static void npc_scan_exact_result(struct npc_mcam *mcam, u8 bit_number,
+ u8 key_nibble, u8 intf)
+{
+ u8 offset = (key_nibble * 4) % 64; /* offset within key word */
+ u8 kwi = (key_nibble * 4) / 64; /* which word in key */
+ u8 nr_bits = 4; /* bits in a nibble */
+ u8 type;
+
+ switch (bit_number) {
+ case 40 ... 43:
+ type = NPC_EXACT_RESULT;
+ break;
+
+ default:
+ return;
+ }
+ npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
+}
+
+static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
+ u8 key_nibble, u8 intf)
+{
+ u8 offset = (key_nibble * 4) % 64; /* offset within key word */
+ u8 kwi = (key_nibble * 4) / 64; /* which word in key */
+ u8 nr_bits = 4; /* bits in a nibble */
+ u8 type;
+
+ switch (bit_number) {
+ case 0 ... 2:
+ type = NPC_CHAN;
+ break;
+ case 3:
+ type = NPC_ERRLEV;
+ break;
+ case 4 ... 5:
+ type = NPC_ERRCODE;
+ break;
+ case 6:
+ type = NPC_LXMB;
+ break;
+ /* check for LTYPE only as of now */
+ case 9:
+ type = NPC_LA;
+ break;
+ case 12:
+ type = NPC_LB;
+ break;
+ case 15:
+ type = NPC_LC;
+ break;
+ case 18:
+ type = NPC_LD;
+ break;
+ case 21:
+ type = NPC_LE;
+ break;
+ case 24:
+ type = NPC_LF;
+ break;
+ case 27:
+ type = NPC_LG;
+ break;
+ case 30:
+ type = NPC_LH;
+ break;
+ default:
+ return;
+ }
+
+ npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
+}
+
+static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *key_fields;
+ /* Ether type can come from three layers
+ * (ethernet, single tagged, double tagged)
+ */
+ struct npc_key_field *etype_ether;
+ struct npc_key_field *etype_tag1;
+ struct npc_key_field *etype_tag2;
+ /* Outer VLAN TCI can come from two layers
+ * (single tagged, double tagged)
+ */
+ struct npc_key_field *vlan_tag1;
+ struct npc_key_field *vlan_tag2;
+ u64 *features;
+ u8 start_lid;
+ int i;
+
+ key_fields = mcam->rx_key_fields;
+ features = &mcam->rx_features;
+
+ if (is_npc_intf_tx(intf)) {
+ key_fields = mcam->tx_key_fields;
+ features = &mcam->tx_features;
+ }
+
+ /* Handle header fields which can come from multiple layers like
+ * etype, outer vlan tci. These fields should have same position in
+ * the key otherwise to install a mcam rule more than one entry is
+ * needed which complicates mcam space management.
+ */
+ etype_ether = &key_fields[NPC_ETYPE_ETHER];
+ etype_tag1 = &key_fields[NPC_ETYPE_TAG1];
+ etype_tag2 = &key_fields[NPC_ETYPE_TAG2];
+ vlan_tag1 = &key_fields[NPC_VLAN_TAG1];
+ vlan_tag2 = &key_fields[NPC_VLAN_TAG2];
+
+ /* if key profile programmed does not extract Ethertype at all */
+ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) {
+ dev_err(rvu->dev, "mkex: Ethertype is not extracted.\n");
+ goto vlan_tci;
+ }
+
+ /* if key profile programmed extracts Ethertype from one layer */
+ if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_ether;
+ if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_tag1;
+ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_tag2;
+
+ /* if key profile programmed extracts Ethertype from multiple layers */
+ if (etype_ether->nr_kws && etype_tag1->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i]) {
+ dev_err(rvu->dev, "mkex: Etype pos is different for untagged and tagged pkts.\n");
+ goto vlan_tci;
+ }
+ }
+ key_fields[NPC_ETYPE] = *etype_tag1;
+ }
+ if (etype_ether->nr_kws && etype_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i]) {
+ dev_err(rvu->dev, "mkex: Etype pos is different for untagged and double tagged pkts.\n");
+ goto vlan_tci;
+ }
+ }
+ key_fields[NPC_ETYPE] = *etype_tag2;
+ }
+ if (etype_tag1->nr_kws && etype_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i]) {
+ dev_err(rvu->dev, "mkex: Etype pos is different for tagged and double tagged pkts.\n");
+ goto vlan_tci;
+ }
+ }
+ key_fields[NPC_ETYPE] = *etype_tag2;
+ }
+
+ /* check none of higher layers overwrite Ethertype */
+ start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1;
+ if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf)) {
+ dev_err(rvu->dev, "mkex: Ethertype is overwritten by higher layers.\n");
+ goto vlan_tci;
+ }
+ *features |= BIT_ULL(NPC_ETYPE);
+vlan_tci:
+ /* if key profile does not extract outer vlan tci at all */
+ if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws) {
+ dev_err(rvu->dev, "mkex: Outer vlan tci is not extracted.\n");
+ goto done;
+ }
+
+ /* if key profile extracts outer vlan tci from one layer */
+ if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
+ key_fields[NPC_OUTER_VID] = *vlan_tag1;
+ if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws)
+ key_fields[NPC_OUTER_VID] = *vlan_tag2;
+
+ /* if key profile extracts outer vlan tci from multiple layers */
+ if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i]) {
+ dev_err(rvu->dev, "mkex: Out vlan tci pos is different for tagged and double tagged pkts.\n");
+ goto done;
+ }
+ }
+ key_fields[NPC_OUTER_VID] = *vlan_tag2;
+ }
+ /* check none of higher layers overwrite outer vlan tci */
+ start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1;
+ if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf)) {
+ dev_err(rvu->dev, "mkex: Outer vlan tci is overwritten by higher layers.\n");
+ goto done;
+ }
+ *features |= BIT_ULL(NPC_OUTER_VID);
+done:
+ return;
+}
+
+static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
+ u8 lt, u64 cfg, u8 intf)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u8 hdr, key, nr_bytes, bit_offset;
+ u8 la_ltype, la_start;
+ /* starting KW index and starting bit position */
+ int start_kwi, offset;
+
+ nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1;
+ hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+
+ /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding
+ * ethernet header.
+ */
+ if (is_npc_intf_tx(intf)) {
+ la_ltype = NPC_LT_LA_IH_NIX_ETHER;
+ la_start = 8;
+ } else {
+ la_ltype = NPC_LT_LA_ETHER;
+ la_start = 0;
+ }
+
+#define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \
+do { \
+ start_kwi = key / 8; \
+ offset = (key * 8) % 64; \
+ if (lid == (hlid) && lt == (hlt)) { \
+ if ((hstart) >= hdr && \
+ ((hstart) + (hlen)) <= (hdr + nr_bytes)) { \
+ bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \
+ npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \
+ offset += bit_offset; \
+ start_kwi += offset / 64; \
+ offset %= 64; \
+ npc_set_kw_masks(mcam, (name), (hlen) * 8, \
+ start_kwi, offset, intf); \
+ } \
+ } \
+} while (0)
+
+ /* List LID, LTYPE, start offset from layer and length(in bytes) of
+ * packet header fields below.
+ * Example: Source IP is 4 bytes and starts at 12th byte of IP header
+ */
+ NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1);
+ NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4);
+ NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4);
+ if (rvu->hw->cap.npc_hash_extract) {
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][0])
+ NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 4);
+ else
+ NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
+
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][1])
+ NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 4);
+ else
+ NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
+ } else {
+ NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
+ NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
+ }
+
+ NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2);
+ NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2);
+ NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
+ NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
+ NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
+ NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ /* SMAC follows the DMAC(which is 6 bytes) */
+ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6);
+ /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
+ NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2);
+}
+
+static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 *features = &mcam->rx_features;
+ u64 tcp_udp_sctp;
+ int hdr;
+
+ if (is_npc_intf_tx(intf))
+ features = &mcam->tx_features;
+
+ for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) {
+ if (npc_check_field(rvu, blkaddr, hdr, intf))
+ *features |= BIT_ULL(hdr);
+ }
+
+ tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
+ BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
+ BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP);
+
+ /* for tcp/udp/sctp corresponding layer type should be in the key */
+ if (*features & tcp_udp_sctp) {
+ if (!npc_check_field(rvu, blkaddr, NPC_LD, intf))
+ *features &= ~tcp_udp_sctp;
+ else
+ *features |= BIT_ULL(NPC_IPPROTO_TCP) |
+ BIT_ULL(NPC_IPPROTO_UDP) |
+ BIT_ULL(NPC_IPPROTO_SCTP);
+ }
+
+ /* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) {
+ *features |= BIT_ULL(NPC_IPPROTO_AH);
+ *features |= BIT_ULL(NPC_IPPROTO_ICMP);
+ *features |= BIT_ULL(NPC_IPPROTO_ICMP6);
+ }
+
+ /* for ESP, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LE, intf))
+ *features |= BIT_ULL(NPC_IPPROTO_ESP);
+
+ /* for vlan corresponding layer type should be in the key */
+ if (*features & BIT_ULL(NPC_OUTER_VID))
+ if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ *features &= ~BIT_ULL(NPC_OUTER_VID);
+
+ /* for vlan ethertypes corresponding layer type should be in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ *features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) |
+ BIT_ULL(NPC_VLAN_ETYPE_STAG);
+
+ /* for L2M/L2B/L3M/L3B, check if the type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf))
+ *features |= BIT_ULL(NPC_LXMB);
+}
+
+/* Scan key extraction profile and record how fields of our interest
+ * fill the key structure. Also verify Channel and DMAC exists in
+ * key and not overwritten by other header fields.
+ */
+static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u8 lid, lt, ld, bitnr;
+ u64 cfg, masked_cfg;
+ u8 key_nibble = 0;
+
+ /* Scan and note how parse result is going to be in key.
+ * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from
+ * parse result in the key. The enabled nibbles from parse result
+ * will be concatenated in key.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf));
+ masked_cfg = cfg & NPC_PARSE_NIBBLE;
+ for_each_set_bit(bitnr, (unsigned long *)&masked_cfg, 31) {
+ npc_scan_parse_result(mcam, bitnr, key_nibble, intf);
+ key_nibble++;
+ }
+
+ /* Ignore exact match bits for mcam entries except the first rule
+ * which is drop on hit. This first rule is configured explitcitly by
+ * exact match code.
+ */
+ masked_cfg = cfg & NPC_EXACT_NIBBLE;
+ bitnr = NPC_EXACT_NIBBLE_START;
+ for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg, NPC_EXACT_NIBBLE_END + 1) {
+ npc_scan_exact_result(mcam, bitnr, key_nibble, intf);
+ key_nibble++;
+ }
+
+ /* Scan and note how layer data is going to be in key */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG
+ (intf, lid, lt, ld));
+ if (!FIELD_GET(NPC_LDATA_EN, cfg))
+ continue;
+ npc_scan_ldata(rvu, blkaddr, lid, lt, cfg,
+ intf);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr)
+{
+ int err;
+
+ err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX);
+ if (err)
+ return err;
+
+ err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX);
+ if (err)
+ return err;
+
+ /* Channel is mandatory */
+ if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "Channel not present in Key\n");
+ return -EINVAL;
+ }
+ /* check that none of the fields overwrite channel */
+ if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "Channel cannot be overwritten\n");
+ return -EINVAL;
+ }
+
+ npc_set_features(rvu, blkaddr, NIX_INTF_TX);
+ npc_set_features(rvu, blkaddr, NIX_INTF_RX);
+ npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_TX);
+ npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_RX);
+
+ return 0;
+}
+
+int npc_flow_steering_init(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ INIT_LIST_HEAD(&mcam->mcam_rules);
+
+ return npc_scan_verify_kex(rvu, blkaddr);
+}
+
+static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 *mcam_features = &mcam->rx_features;
+ u64 unsupported;
+ u8 bit;
+
+ if (is_npc_intf_tx(intf))
+ mcam_features = &mcam->tx_features;
+
+ unsupported = (*mcam_features ^ features) & ~(*mcam_features);
+ if (unsupported) {
+ dev_warn(rvu->dev, "Unsupported flow(s):\n");
+ for_each_set_bit(bit, (unsigned long *)&unsupported, 64)
+ dev_warn(rvu->dev, "%s ", npc_get_field_name(bit));
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* npc_update_entry - Based on the masks generated during
+ * the key scanning, updates the given entry with value and
+ * masks for the field of interest. Maximum 16 bytes of a packet
+ * header can be extracted by HW hence lo and hi are sufficient.
+ * When field bytes are less than or equal to 8 then hi should be
+ * 0 for value and mask.
+ *
+ * If exact match of value is required then mask should be all 1's.
+ * If any bits in mask are 0 then corresponding bits in value are
+ * dont care.
+ */
+void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct mcam_entry dummy = { {0} };
+ struct npc_key_field *field;
+ u64 kw1, kw2, kw3;
+ u8 shift;
+ int i;
+
+ field = &mcam->rx_key_fields[type];
+ if (is_npc_intf_tx(intf))
+ field = &mcam->tx_key_fields[type];
+
+ if (!field->nr_kws)
+ return;
+
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (!field->kw_mask[i])
+ continue;
+ /* place key value in kw[x] */
+ shift = __ffs64(field->kw_mask[i]);
+ /* update entry value */
+ kw1 = (val_lo << shift) & field->kw_mask[i];
+ dummy.kw[i] = kw1;
+ /* update entry mask */
+ kw1 = (mask_lo << shift) & field->kw_mask[i];
+ dummy.kw_mask[i] = kw1;
+
+ if (field->nr_kws == 1)
+ break;
+ /* place remaining bits of key value in kw[x + 1] */
+ if (field->nr_kws == 2) {
+ /* update entry value */
+ kw2 = shift ? val_lo >> (64 - shift) : 0;
+ kw2 |= (val_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ dummy.kw[i + 1] = kw2;
+ /* update entry mask */
+ kw2 = shift ? mask_lo >> (64 - shift) : 0;
+ kw2 |= (mask_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ dummy.kw_mask[i + 1] = kw2;
+ break;
+ }
+ /* place remaining bits of key value in kw[x + 1], kw[x + 2] */
+ if (field->nr_kws == 3) {
+ /* update entry value */
+ kw2 = shift ? val_lo >> (64 - shift) : 0;
+ kw2 |= (val_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ kw3 = shift ? val_hi >> (64 - shift) : 0;
+ kw3 &= field->kw_mask[i + 2];
+ dummy.kw[i + 1] = kw2;
+ dummy.kw[i + 2] = kw3;
+ /* update entry mask */
+ kw2 = shift ? mask_lo >> (64 - shift) : 0;
+ kw2 |= (mask_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ kw3 = shift ? mask_hi >> (64 - shift) : 0;
+ kw3 &= field->kw_mask[i + 2];
+ dummy.kw_mask[i + 1] = kw2;
+ dummy.kw_mask[i + 2] = kw3;
+ break;
+ }
+ }
+ /* dummy is ready with values and masks for given key
+ * field now clear and update input entry with those
+ */
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (!field->kw_mask[i])
+ continue;
+ entry->kw[i] &= ~field->kw_mask[i];
+ entry->kw_mask[i] &= ~field->kw_mask[i];
+
+ entry->kw[i] |= dummy.kw[i];
+ entry->kw_mask[i] |= dummy.kw_mask[i];
+ }
+}
+
+static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct rvu_npc_mcam_rule *output, u8 intf)
+{
+ u32 src_ip[IPV6_WORDS], src_ip_mask[IPV6_WORDS];
+ u32 dst_ip[IPV6_WORDS], dst_ip_mask[IPV6_WORDS];
+ struct flow_msg *opkt = &output->packet;
+ struct flow_msg *omask = &output->mask;
+ u64 mask_lo, mask_hi;
+ u64 val_lo, val_hi;
+
+ /* For an ipv6 address fe80::2c68:63ff:fe5e:2d0a the packet
+ * values to be programmed in MCAM should as below:
+ * val_high: 0xfe80000000000000
+ * val_low: 0x2c6863fffe5e2d0a
+ */
+ if (features & BIT_ULL(NPC_SIP_IPV6)) {
+ be32_to_cpu_array(src_ip_mask, mask->ip6src, IPV6_WORDS);
+ be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
+
+ mask_hi = (u64)src_ip_mask[0] << 32 | src_ip_mask[1];
+ mask_lo = (u64)src_ip_mask[2] << 32 | src_ip_mask[3];
+ val_hi = (u64)src_ip[0] << 32 | src_ip[1];
+ val_lo = (u64)src_ip[2] << 32 | src_ip[3];
+
+ npc_update_entry(rvu, NPC_SIP_IPV6, entry, val_lo, val_hi,
+ mask_lo, mask_hi, intf);
+ memcpy(opkt->ip6src, pkt->ip6src, sizeof(opkt->ip6src));
+ memcpy(omask->ip6src, mask->ip6src, sizeof(omask->ip6src));
+ }
+ if (features & BIT_ULL(NPC_DIP_IPV6)) {
+ be32_to_cpu_array(dst_ip_mask, mask->ip6dst, IPV6_WORDS);
+ be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
+
+ mask_hi = (u64)dst_ip_mask[0] << 32 | dst_ip_mask[1];
+ mask_lo = (u64)dst_ip_mask[2] << 32 | dst_ip_mask[3];
+ val_hi = (u64)dst_ip[0] << 32 | dst_ip[1];
+ val_lo = (u64)dst_ip[2] << 32 | dst_ip[3];
+
+ npc_update_entry(rvu, NPC_DIP_IPV6, entry, val_lo, val_hi,
+ mask_lo, mask_hi, intf);
+ memcpy(opkt->ip6dst, pkt->ip6dst, sizeof(opkt->ip6dst));
+ memcpy(omask->ip6dst, mask->ip6dst, sizeof(omask->ip6dst));
+ }
+}
+
+static void npc_update_vlan_features(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, u8 intf)
+{
+ bool ctag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_CTAG));
+ bool stag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_STAG));
+ bool vid = !!(features & BIT_ULL(NPC_OUTER_VID));
+
+ /* If only VLAN id is given then always match outer VLAN id */
+ if (vid && !ctag && !stag) {
+ npc_update_entry(rvu, NPC_LB, entry,
+ NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0,
+ NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf);
+ return;
+ }
+ if (ctag)
+ npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_CTAG, 0,
+ ~0ULL, 0, intf);
+ if (stag)
+ npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_STAG_QINQ, 0,
+ ~0ULL, 0, intf);
+}
+
+static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct rvu_npc_mcam_rule *output, u8 intf,
+ int blkaddr)
+{
+ u64 dmac_mask = ether_addr_to_u64(mask->dmac);
+ u64 smac_mask = ether_addr_to_u64(mask->smac);
+ u64 dmac_val = ether_addr_to_u64(pkt->dmac);
+ u64 smac_val = ether_addr_to_u64(pkt->smac);
+ struct flow_msg *opkt = &output->packet;
+ struct flow_msg *omask = &output->mask;
+
+ if (!features)
+ return;
+
+ /* For tcp/udp/sctp LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_TCP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_UDP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_SCTP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_ICMP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_ICMP6))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6,
+ 0, ~0ULL, 0, intf);
+
+ /* For AH, LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_AH))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH,
+ 0, ~0ULL, 0, intf);
+ /* For ESP, LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_ESP))
+ npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP,
+ 0, ~0ULL, 0, intf);
+
+ if (features & BIT_ULL(NPC_LXMB)) {
+ output->lxmb = is_broadcast_ether_addr(pkt->dmac) ? 2 : 1;
+ npc_update_entry(rvu, NPC_LXMB, entry, output->lxmb, 0,
+ output->lxmb, 0, intf);
+ }
+#define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \
+do { \
+ if (features & BIT_ULL((field))) { \
+ npc_update_entry(rvu, (field), entry, (val_lo), (val_hi), \
+ (mask_lo), (mask_hi), intf); \
+ memcpy(&opkt->member, &pkt->member, sizeof(pkt->member)); \
+ memcpy(&omask->member, &mask->member, sizeof(mask->member)); \
+ } \
+} while (0)
+
+ NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0);
+
+ NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0);
+ NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0,
+ ntohs(mask->etype), 0);
+ NPC_WRITE_FLOW(NPC_TOS, tos, pkt->tos, 0, mask->tos, 0);
+ NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0,
+ ntohl(mask->ip4src), 0);
+ NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0,
+ ntohl(mask->ip4dst), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_TCP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_UDP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_TCP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_UDP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_SCTP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+
+ NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0,
+ ntohs(mask->vlan_tci), 0);
+
+ npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf);
+ npc_update_vlan_features(rvu, entry, features, intf);
+
+ npc_update_field_hash(rvu, intf, entry, blkaddr, features,
+ pkt, mask, opkt, omask);
+}
+
+static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, u16 entry)
+{
+ struct rvu_npc_mcam_rule *iter;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ if (iter->entry == entry) {
+ mutex_unlock(&mcam->lock);
+ return iter;
+ }
+ }
+ mutex_unlock(&mcam->lock);
+
+ return NULL;
+}
+
+static void rvu_mcam_add_rule(struct npc_mcam *mcam,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct list_head *head = &mcam->mcam_rules;
+ struct rvu_npc_mcam_rule *iter;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ if (iter->entry > rule->entry)
+ break;
+ head = &iter->list;
+ }
+
+ list_add(&rule->list, head);
+ mutex_unlock(&mcam->lock);
+}
+
+static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct npc_mcam_oper_counter_req free_req = { 0 };
+ struct msg_rsp free_rsp;
+
+ if (!rule->has_cntr)
+ return;
+
+ free_req.hdr.pcifunc = pcifunc;
+ free_req.cntr = rule->cntr;
+
+ rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp);
+ rule->has_cntr = false;
+}
+
+static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule,
+ struct npc_install_flow_rsp *rsp)
+{
+ struct npc_mcam_alloc_counter_req cntr_req = { 0 };
+ struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
+ int err;
+
+ cntr_req.hdr.pcifunc = pcifunc;
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+
+ /* we try to allocate a counter to track the stats of this
+ * rule. If counter could not be allocated then proceed
+ * without counter because counters are limited than entries.
+ */
+ err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req,
+ &cntr_rsp);
+ if (!err && cntr_rsp.count) {
+ rule->cntr = cntr_rsp.cntr;
+ rule->has_cntr = true;
+ rsp->counter = rule->cntr;
+ } else {
+ rsp->counter = err;
+ }
+}
+
+static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req,
+ u16 target, bool pf_set_vfs_mac)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct nix_rx_action action;
+
+ if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac)
+ req->chan_mask = 0x0; /* Do not care channel */
+
+ npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask,
+ 0, NIX_INTF_RX);
+
+ *(u64 *)&action = 0x00;
+ action.pf_func = target;
+ action.op = req->op;
+ action.index = req->index;
+ action.match_id = req->match_id;
+ action.flow_key_alg = req->flow_key_alg;
+
+ if (req->op == NIX_RX_ACTION_DEFAULT) {
+ if (pfvf->def_ucast_rule) {
+ action = pfvf->def_ucast_rule->rx_action;
+ } else {
+ /* For profiles which do not extract DMAC, the default
+ * unicast entry is unused. Hence modify action for the
+ * requests which use same action as default unicast
+ * entry
+ */
+ *(u64 *)&action = 0;
+ action.pf_func = target;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ }
+ }
+
+ entry->action = *(u64 *)&action;
+
+ /* VTAG0 starts at 0th byte of LID_B.
+ * VTAG1 starts at 4th byte of LID_B.
+ */
+ entry->vtag_action = FIELD_PREP(RX_VTAG0_VALID_BIT, req->vtag0_valid) |
+ FIELD_PREP(RX_VTAG0_TYPE_MASK, req->vtag0_type) |
+ FIELD_PREP(RX_VTAG0_LID_MASK, NPC_LID_LB) |
+ FIELD_PREP(RX_VTAG0_RELPTR_MASK, 0) |
+ FIELD_PREP(RX_VTAG1_VALID_BIT, req->vtag1_valid) |
+ FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) |
+ FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) |
+ FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4);
+}
+
+static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req, u16 target)
+{
+ struct nix_tx_action action;
+ u64 mask = ~0ULL;
+
+ /* If AF is installing then do not care about
+ * PF_FUNC in Send Descriptor
+ */
+ if (is_pffunc_af(req->hdr.pcifunc))
+ mask = 0;
+
+ npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target),
+ 0, mask, 0, NIX_INTF_TX);
+
+ *(u64 *)&action = 0x00;
+ action.op = req->op;
+ action.index = req->index;
+ action.match_id = req->match_id;
+
+ entry->action = *(u64 *)&action;
+
+ /* VTAG0 starts at 0th byte of LID_B.
+ * VTAG1 starts at 4th byte of LID_B.
+ */
+ entry->vtag_action = FIELD_PREP(TX_VTAG0_DEF_MASK, req->vtag0_def) |
+ FIELD_PREP(TX_VTAG0_OP_MASK, req->vtag0_op) |
+ FIELD_PREP(TX_VTAG0_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(TX_VTAG0_RELPTR_MASK, 20) |
+ FIELD_PREP(TX_VTAG1_DEF_MASK, req->vtag1_def) |
+ FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) |
+ FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24);
+}
+
+static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
+ int nixlf, struct rvu_pfvf *pfvf,
+ struct npc_install_flow_req *req,
+ struct npc_install_flow_rsp *rsp, bool enable,
+ bool pf_set_vfs_mac)
+{
+ struct rvu_npc_mcam_rule *def_ucast_rule = pfvf->def_ucast_rule;
+ u64 features, installed_features, missing_features = 0;
+ struct npc_mcam_write_entry_req write_req = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule dummy = { 0 };
+ struct rvu_npc_mcam_rule *rule;
+ u16 owner = req->hdr.pcifunc;
+ struct msg_rsp write_rsp;
+ struct mcam_entry *entry;
+ bool new = false;
+ u16 entry_index;
+ int err;
+
+ installed_features = req->features;
+ features = req->features;
+ entry = &write_req.entry_data;
+ entry_index = req->entry;
+
+ npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
+ req->intf, blkaddr);
+
+ if (is_npc_intf_rx(req->intf))
+ npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
+ else
+ npc_update_tx_entry(rvu, pfvf, entry, req, target);
+
+ /* Default unicast rules do not exist for TX */
+ if (is_npc_intf_tx(req->intf))
+ goto find_rule;
+
+ if (req->default_rule) {
+ entry_index = npc_get_nixlf_mcam_index(mcam, target, nixlf,
+ NIXLF_UCAST_ENTRY);
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, entry_index);
+ }
+
+ /* update mcam entry with default unicast rule attributes */
+ if (def_ucast_rule && (req->default_rule && req->append)) {
+ missing_features = (def_ucast_rule->features ^ features) &
+ def_ucast_rule->features;
+ if (missing_features)
+ npc_update_flow(rvu, entry, missing_features,
+ &def_ucast_rule->packet,
+ &def_ucast_rule->mask,
+ &dummy, req->intf,
+ blkaddr);
+ installed_features = req->features | missing_features;
+ }
+
+find_rule:
+ rule = rvu_mcam_find_rule(mcam, entry_index);
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+ new = true;
+ }
+
+ /* allocate new counter if rule has no counter */
+ if (!req->default_rule && req->set_cntr && !rule->has_cntr)
+ rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp);
+
+ /* if user wants to delete an existing counter for a rule then
+ * free the counter
+ */
+ if (!req->set_cntr && rule->has_cntr)
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+
+ write_req.hdr.pcifunc = owner;
+
+ /* AF owns the default rules so change the owner just to relax
+ * the checks in rvu_mbox_handler_npc_mcam_write_entry
+ */
+ if (req->default_rule)
+ write_req.hdr.pcifunc = 0;
+
+ write_req.entry = entry_index;
+ write_req.intf = req->intf;
+ write_req.enable_entry = (u8)enable;
+ /* if counter is available then clear and use it */
+ if (req->set_cntr && rule->has_cntr) {
+ rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), req->cntr_val);
+ write_req.set_cntr = 1;
+ write_req.cntr = rule->cntr;
+ }
+
+ /* update rule */
+ memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
+ memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
+ rule->entry = entry_index;
+ memcpy(&rule->rx_action, &entry->action, sizeof(struct nix_rx_action));
+ if (is_npc_intf_tx(req->intf))
+ memcpy(&rule->tx_action, &entry->action,
+ sizeof(struct nix_tx_action));
+ rule->vtag_action = entry->vtag_action;
+ rule->features = installed_features;
+ rule->default_rule = req->default_rule;
+ rule->owner = owner;
+ rule->enable = enable;
+ rule->chan_mask = write_req.entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
+ rule->chan = write_req.entry_data.kw[0] & NPC_KEX_CHAN_MASK;
+ rule->chan &= rule->chan_mask;
+ rule->lxmb = dummy.lxmb;
+ if (is_npc_intf_tx(req->intf))
+ rule->intf = pfvf->nix_tx_intf;
+ else
+ rule->intf = pfvf->nix_rx_intf;
+
+ if (new)
+ rvu_mcam_add_rule(mcam, rule);
+ if (req->default_rule)
+ pfvf->def_ucast_rule = rule;
+
+ /* write to mcam entry registers */
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
+ &write_rsp);
+ if (err) {
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+ if (new) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ return err;
+ }
+
+ /* VF's MAC address is being changed via PF */
+ if (pf_set_vfs_mac) {
+ ether_addr_copy(pfvf->default_mac, req->packet.dmac);
+ ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ set_bit(PF_SET_VF_MAC, &pfvf->flags);
+ }
+
+ if (test_bit(PF_SET_VF_CFG, &pfvf->flags) &&
+ req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7)
+ rule->vfvlan_cfg = true;
+
+ if (is_npc_intf_rx(req->intf) && req->match_id &&
+ (req->op == NIX_RX_ACTIONOP_UCAST || req->op == NIX_RX_ACTIONOP_RSS))
+ return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc,
+ req->index, req->match_id);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
+ struct npc_install_flow_req *req,
+ struct npc_install_flow_rsp *rsp)
+{
+ bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+ bool pf_set_vfs_mac = false;
+ bool enable = true;
+ u16 target;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return NPC_MCAM_INVALID_REQ;
+ }
+
+ if (!is_npc_interface_valid(rvu, req->intf))
+ return NPC_FLOW_INTF_INVALID;
+
+ /* If DMAC is not extracted in MKEX, rules installed by AF
+ * can rely on L2MB bit set by hardware protocol checker for
+ * broadcast and multicast addresses.
+ */
+ if (npc_check_field(rvu, blkaddr, NPC_DMAC, req->intf))
+ goto process_flow;
+
+ if (is_pffunc_af(req->hdr.pcifunc) &&
+ req->features & BIT_ULL(NPC_DMAC)) {
+ if (is_unicast_ether_addr(req->packet.dmac)) {
+ dev_warn(rvu->dev,
+ "%s: mkex profile does not support ucast flow\n",
+ __func__);
+ return NPC_FLOW_NOT_SUPPORTED;
+ }
+
+ if (!npc_is_field_present(rvu, NPC_LXMB, req->intf)) {
+ dev_warn(rvu->dev,
+ "%s: mkex profile does not support bcast/mcast flow",
+ __func__);
+ return NPC_FLOW_NOT_SUPPORTED;
+ }
+
+ /* Modify feature to use LXMB instead of DMAC */
+ req->features &= ~BIT_ULL(NPC_DMAC);
+ req->features |= BIT_ULL(NPC_LXMB);
+ }
+
+process_flow:
+ if (from_vf && req->default_rule)
+ return NPC_FLOW_VF_PERM_DENIED;
+
+ /* Each PF/VF info is maintained in struct rvu_pfvf.
+ * rvu_pfvf for the target PF/VF needs to be retrieved
+ * hence modify pcifunc accordingly.
+ */
+
+ /* AF installing for a PF/VF */
+ if (!req->hdr.pcifunc)
+ target = req->vf;
+ /* PF installing for its VF */
+ else if (!from_vf && req->vf) {
+ target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
+ pf_set_vfs_mac = req->default_rule &&
+ (req->features & BIT_ULL(NPC_DMAC));
+ }
+ /* msg received from PF/VF */
+ else
+ target = req->hdr.pcifunc;
+
+ /* ignore chan_mask in case pf func is not AF, revisit later */
+ if (!is_pffunc_af(req->hdr.pcifunc))
+ req->chan_mask = 0xFFF;
+
+ err = npc_check_unsupported_flows(rvu, req->features, req->intf);
+ if (err)
+ return NPC_FLOW_NOT_SUPPORTED;
+
+ pfvf = rvu_get_pfvf(rvu, target);
+
+ /* PF installing for its VF */
+ if (req->hdr.pcifunc && !from_vf && req->vf)
+ set_bit(PF_SET_VF_CFG, &pfvf->flags);
+
+ /* update req destination mac addr */
+ if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) &&
+ is_zero_ether_addr(req->packet.dmac)) {
+ ether_addr_copy(req->packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ }
+
+ /* Proceed if NIXLF is attached or not for TX rules */
+ err = nix_get_nixlf(rvu, target, &nixlf, NULL);
+ if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac)
+ return NPC_FLOW_NO_NIXLF;
+
+ /* don't enable rule when nixlf not attached or initialized */
+ if (!(is_nixlf_attached(rvu, target) &&
+ test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
+ enable = false;
+
+ /* Packets reaching NPC in Tx path implies that a
+ * NIXLF is properly setup and transmitting.
+ * Hence rules can be enabled for Tx.
+ */
+ if (is_npc_intf_tx(req->intf))
+ enable = true;
+
+ /* Do not allow requests from uninitialized VFs */
+ if (from_vf && !enable)
+ return NPC_FLOW_VF_NOT_INIT;
+
+ /* PF sets VF mac & VF NIXLF is not attached, update the mac addr */
+ if (pf_set_vfs_mac && !enable) {
+ ether_addr_copy(pfvf->default_mac, req->packet.dmac);
+ ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ set_bit(PF_SET_VF_MAC, &pfvf->flags);
+ return 0;
+ }
+
+ mutex_lock(&rswitch->switch_lock);
+ err = npc_install_flow(rvu, blkaddr, target, nixlf, pfvf,
+ req, rsp, enable, pf_set_vfs_mac);
+ mutex_unlock(&rswitch->switch_lock);
+
+ return err;
+}
+
+static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule,
+ u16 pcifunc)
+{
+ struct npc_mcam_ena_dis_entry_req dis_req = { 0 };
+ struct msg_rsp dis_rsp;
+
+ if (rule->default_rule)
+ return 0;
+
+ if (rule->has_cntr)
+ rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule);
+
+ dis_req.hdr.pcifunc = pcifunc;
+ dis_req.entry = rule->entry;
+
+ list_del(&rule->list);
+ kfree(rule);
+
+ return rvu_mbox_handler_npc_mcam_dis_entry(rvu, &dis_req, &dis_rsp);
+}
+
+int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
+ struct npc_delete_flow_req *req,
+ struct npc_delete_flow_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *iter, *tmp;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct list_head del_list;
+ int blkaddr;
+
+ INIT_LIST_HEAD(&del_list);
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) {
+ if (iter->owner == pcifunc) {
+ /* All rules */
+ if (req->all) {
+ list_move_tail(&iter->list, &del_list);
+ /* Range of rules */
+ } else if (req->end && iter->entry >= req->start &&
+ iter->entry <= req->end) {
+ list_move_tail(&iter->list, &del_list);
+ /* single rule */
+ } else if (req->entry == iter->entry) {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr)
+ rsp->cntr_val = rvu_read64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(iter->cntr));
+ list_move_tail(&iter->list, &del_list);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&mcam->lock);
+
+ list_for_each_entry_safe(iter, tmp, &del_list, list) {
+ u16 entry = iter->entry;
+
+ /* clear the mcam entry target pcifunc */
+ mcam->entry2target_pffunc[entry] = 0x0;
+ if (npc_delete_flow(rvu, iter, pcifunc))
+ dev_err(rvu->dev, "rule deletion failed for entry:%u",
+ entry);
+ }
+
+ return 0;
+}
+
+static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr,
+ struct rvu_npc_mcam_rule *rule,
+ struct rvu_pfvf *pfvf)
+{
+ struct npc_mcam_write_entry_req write_req = { 0 };
+ struct mcam_entry *entry = &write_req.entry_data;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct msg_rsp rsp;
+ u8 intf, enable;
+ int err;
+
+ ether_addr_copy(rule->packet.dmac, pfvf->mac_addr);
+
+ npc_read_mcam_entry(rvu, mcam, npcblkaddr, rule->entry,
+ entry, &intf, &enable);
+
+ npc_update_entry(rvu, NPC_DMAC, entry,
+ ether_addr_to_u64(pfvf->mac_addr), 0,
+ 0xffffffffffffull, 0, intf);
+
+ write_req.hdr.pcifunc = rule->owner;
+ write_req.entry = rule->entry;
+ write_req.intf = pfvf->nix_rx_intf;
+
+ mutex_unlock(&mcam->lock);
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp);
+ mutex_lock(&mcam->lock);
+
+ return err;
+}
+
+void npc_mcam_enable_flows(struct rvu *rvu, u16 target)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, target);
+ struct rvu_npc_mcam_rule *def_ucast_rule;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ int blkaddr, bank, index;
+ u64 def_action;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ def_ucast_rule = pfvf->def_ucast_rule;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (is_npc_intf_rx(rule->intf) &&
+ rule->rx_action.pf_func == target && !rule->enable) {
+ if (rule->default_rule) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, true);
+ rule->enable = true;
+ continue;
+ }
+
+ if (rule->vfvlan_cfg)
+ npc_update_dmac_value(rvu, blkaddr, rule, pfvf);
+
+ if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) {
+ if (!def_ucast_rule)
+ continue;
+ /* Use default unicast entry action */
+ rule->rx_action = def_ucast_rule->rx_action;
+ def_action = *(u64 *)&def_ucast_rule->rx_action;
+ bank = npc_get_bank(mcam, rule->entry);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION
+ (rule->entry, bank), def_action);
+ }
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, true);
+ rule->enable = true;
+ }
+ }
+
+ /* Enable MCAM entries installed by PF with target as VF pcifunc */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == target)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ index, true);
+ }
+ mutex_unlock(&mcam->lock);
+}
+
+void npc_mcam_disable_flows(struct rvu *rvu, u16 target)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+ /* Disable MCAM entries installed by PF with target as VF pcifunc */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == target)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ index, false);
+ }
+ mutex_unlock(&mcam->lock);
+}
+
+/* single drop on non hit rule starting from 0th index. This an extension
+ * to RPM mac filter to support more rules.
+ */
+int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
+ u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
+ u64 bcast_mcast_val, u64 bcast_mcast_mask)
+{
+ struct npc_mcam_alloc_counter_req cntr_req = { 0 };
+ struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
+ struct npc_mcam_write_entry_req req = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ struct msg_rsp rsp;
+ bool enabled;
+ int blkaddr;
+ int err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -ENODEV;
+ }
+
+ /* Bail out if no exact match support */
+ if (!rvu_npc_exact_has_match_table(rvu)) {
+ dev_info(rvu->dev, "%s: No support for exact match feature\n", __func__);
+ return -EINVAL;
+ }
+
+ /* If 0th entry is already used, return err */
+ enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_idx);
+ if (enabled) {
+ dev_err(rvu->dev, "%s: failed to add single drop on non hit rule at %d th index\n",
+ __func__, mcam_idx);
+ return -EINVAL;
+ }
+
+ /* Add this entry to mcam rules list */
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ /* Disable rule by default. Enable rule when first dmac filter is
+ * installed
+ */
+ rule->enable = false;
+ rule->chan = chan_val;
+ rule->chan_mask = chan_mask;
+ rule->entry = mcam_idx;
+ rvu_mcam_add_rule(mcam, rule);
+
+ /* Reserve slot 0 */
+ npc_mcam_rsrcs_reserve(rvu, blkaddr, mcam_idx);
+
+ /* Allocate counter for this single drop on non hit rule */
+ cntr_req.hdr.pcifunc = 0; /* AF request */
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+ err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
+ if (err) {
+ dev_err(rvu->dev, "%s: Err to allocate cntr for drop rule (err=%d)\n",
+ __func__, err);
+ return -EFAULT;
+ }
+ *counter_idx = cntr_rsp.cntr;
+
+ /* Fill in fields for this mcam entry */
+ npc_update_entry(rvu, NPC_EXACT_RESULT, &req.entry_data, exact_val, 0,
+ exact_mask, 0, NIX_INTF_RX);
+ npc_update_entry(rvu, NPC_CHAN, &req.entry_data, chan_val, 0,
+ chan_mask, 0, NIX_INTF_RX);
+ npc_update_entry(rvu, NPC_LXMB, &req.entry_data, bcast_mcast_val, 0,
+ bcast_mcast_mask, 0, NIX_INTF_RX);
+
+ req.intf = NIX_INTF_RX;
+ req.set_cntr = true;
+ req.cntr = cntr_rsp.cntr;
+ req.entry = mcam_idx;
+
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &req, &rsp);
+ if (err) {
+ dev_err(rvu->dev, "%s: Installation of single drop on non hit rule at %d failed\n",
+ __func__, mcam_idx);
+ return err;
+ }
+
+ dev_err(rvu->dev, "%s: Installed single drop on non hit rule at %d, cntr=%d\n",
+ __func__, mcam_idx, req.cntr);
+
+ /* disable entry at Bank 0, index 0 */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, mcam_idx, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_get_field_status(struct rvu *rvu,
+ struct npc_get_field_status_req *req,
+ struct npc_get_field_status_rsp *rsp)
+{
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (!is_npc_interface_valid(rvu, req->intf))
+ return NPC_FLOW_INTF_INVALID;
+
+ if (npc_check_field(rvu, blkaddr, req->field, req->intf))
+ rsp->enable = 1;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
new file mode 100644
index 000000000..3f5c9042d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#ifndef __RVU_NPC_FS_H
+#define __RVU_NPC_FS_H
+
+#define IPV6_WORDS 4
+#define NPC_BYTESM GENMASK_ULL(19, 16)
+#define NPC_HDR_OFFSET GENMASK_ULL(15, 8)
+#define NPC_KEY_OFFSET GENMASK_ULL(5, 0)
+#define NPC_LDATA_EN BIT_ULL(7)
+
+void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf);
+
+#endif /* RVU_NPC_FS_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
new file mode 100644
index 000000000..54e0dfdc9
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -0,0 +1,2053 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/stddef.h>
+#include <linux/debugfs.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "cgx.h"
+#include "rvu_npc_fs.h"
+#include "rvu_npc_hash.h"
+
+static u64 rvu_npc_wide_extract(const u64 input[], size_t start_bit,
+ size_t width_bits)
+{
+ const u64 mask = ~(u64)((~(__uint128_t)0) << width_bits);
+ const size_t msb = start_bit + width_bits - 1;
+ const size_t lword = start_bit >> 6;
+ const size_t uword = msb >> 6;
+ size_t lbits;
+ u64 hi, lo;
+
+ if (lword == uword)
+ return (input[lword] >> (start_bit & 63)) & mask;
+
+ lbits = 64 - (start_bit & 63);
+ hi = input[uword];
+ lo = (input[lword] >> (start_bit & 63));
+ return ((hi << lbits) | lo) & mask;
+}
+
+static void rvu_npc_lshift_key(u64 *key, size_t key_bit_len)
+{
+ u64 prev_orig_word = 0;
+ u64 cur_orig_word = 0;
+ size_t extra = key_bit_len % 64;
+ size_t max_idx = key_bit_len / 64;
+ size_t i;
+
+ if (extra)
+ max_idx++;
+
+ for (i = 0; i < max_idx; i++) {
+ cur_orig_word = key[i];
+ key[i] = key[i] << 1;
+ key[i] |= ((prev_orig_word >> 63) & 0x1);
+ prev_orig_word = cur_orig_word;
+ }
+}
+
+static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len,
+ size_t key_bit_len)
+{
+ u32 hash_out = 0;
+ u64 temp_data = 0;
+ int i;
+
+ for (i = data_bit_len - 1; i >= 0; i--) {
+ temp_data = (data[i / 64]);
+ temp_data = temp_data >> (i % 64);
+ temp_data &= 0x1;
+ if (temp_data)
+ hash_out ^= (u32)(rvu_npc_wide_extract(key, key_bit_len - 32, 32));
+
+ rvu_npc_lshift_key(key, key_bit_len);
+ }
+
+ return hash_out;
+}
+
+u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp,
+ u8 intf, u8 hash_idx)
+{
+ u64 hash_key[3];
+ u64 data_padded[2];
+ u32 field_hash;
+
+ hash_key[0] = rsp.secret_key[1] << 31;
+ hash_key[0] |= rsp.secret_key[2];
+ hash_key[1] = rsp.secret_key[1] >> 33;
+ hash_key[1] |= rsp.secret_key[0] << 31;
+ hash_key[2] = rsp.secret_key[0] >> 33;
+
+ data_padded[0] = rsp.hash_mask[intf][hash_idx][0] & ldata[0];
+ data_padded[1] = rsp.hash_mask[intf][hash_idx][1] & ldata[1];
+ field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159);
+
+ field_hash &= FIELD_GET(GENMASK(63, 32), rsp.hash_ctrl[intf][hash_idx]);
+ field_hash += FIELD_GET(GENMASK(31, 0), rsp.hash_ctrl[intf][hash_idx]);
+ return field_hash;
+}
+
+static u64 npc_update_use_hash(struct rvu *rvu, int blkaddr,
+ u8 intf, int lid, int lt, int ld)
+{
+ u8 hdr, key;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld));
+ hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+
+ /* Update use_hash(bit-20) to 'true' and
+ * bytesm1(bit-16:19) to '0x3' in KEX_LD_CFG
+ */
+ cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
+ hdr, 0x1, 0x0, key);
+
+ return cfg;
+}
+
+static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
+ u8 intf)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ int lid, lt, ld, hash_cnt = 0;
+
+ if (is_npc_intf_tx(intf))
+ return;
+
+ /* Program HASH_CFG */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
+ u64 cfg;
+
+ if (hash_cnt == NPC_MAX_HASH)
+ return;
+
+ cfg = npc_update_use_hash(rvu, blkaddr,
+ intf, lid, lt, ld);
+ /* Set updated KEX configuration */
+ SET_KEX_LD(intf, lid, lt, ld, cfg);
+ /* Set HASH configuration */
+ SET_KEX_LD_HASH(intf, ld,
+ mkex_hash->hash[intf][ld]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 0,
+ mkex_hash->hash_mask[intf][ld][0]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 1,
+ mkex_hash->hash_mask[intf][ld][1]);
+ SET_KEX_LD_HASH_CTRL(intf, ld,
+ mkex_hash->hash_ctrl[intf][ld]);
+
+ hash_cnt++;
+ }
+ }
+ }
+ }
+}
+
+static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
+ u8 intf)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ int lid, lt, ld, hash_cnt = 0;
+
+ if (is_npc_intf_rx(intf))
+ return;
+
+ /* Program HASH_CFG */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
+ u64 cfg;
+
+ if (hash_cnt == NPC_MAX_HASH)
+ return;
+
+ cfg = npc_update_use_hash(rvu, blkaddr,
+ intf, lid, lt, ld);
+ /* Set updated KEX configuration */
+ SET_KEX_LD(intf, lid, lt, ld, cfg);
+ /* Set HASH configuration */
+ SET_KEX_LD_HASH(intf, ld,
+ mkex_hash->hash[intf][ld]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 0,
+ mkex_hash->hash_mask[intf][ld][0]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 1,
+ mkex_hash->hash_mask[intf][ld][1]);
+ SET_KEX_LD_HASH_CTRL(intf, ld,
+ mkex_hash->hash_ctrl[intf][ld]);
+ hash_cnt++;
+ }
+ }
+ }
+}
+
+void npc_config_secret_key(struct rvu *rvu, int blkaddr)
+{
+ struct hw_cap *hwcap = &rvu->hw->cap;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 intf;
+
+ if (!hwcap->npc_hash_extract)
+ return;
+
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf),
+ RVU_NPC_HASH_SECRET_KEY0);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf),
+ RVU_NPC_HASH_SECRET_KEY1);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf),
+ RVU_NPC_HASH_SECRET_KEY2);
+ }
+}
+
+void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam_kex_hash *mh = rvu->kpu.mkex_hash;
+ struct hw_cap *hwcap = &rvu->hw->cap;
+ u8 intf, ld, hdr_offset, byte_len;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 cfg;
+
+ /* Check if hardware supports hash extraction */
+ if (!hwcap->npc_hash_extract)
+ return;
+
+ /* Check if IPv6 source/destination address
+ * should be hash enabled.
+ * Hashing reduces 128bit SIP/DIP fields to 32bit
+ * so that 224 bit X2 key can be used for IPv6 based filters as well,
+ * which in turn results in more number of MCAM entries available for
+ * use.
+ *
+ * Hashing of IPV6 SIP/DIP is enabled in below scenarios
+ * 1. If the silicon variant supports hashing feature
+ * 2. If the number of bytes of IP addr being extracted is 4 bytes ie
+ * 32bit. The assumption here is that if user wants 8bytes of LSB of
+ * IP addr or full 16 bytes then his intention is not to use 32bit
+ * hash.
+ */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf,
+ NPC_LID_LC,
+ NPC_LT_LC_IP6,
+ ld));
+ hdr_offset = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ byte_len = FIELD_GET(NPC_BYTESM, cfg);
+ /* Hashing of IPv6 source/destination address should be
+ * enabled if,
+ * hdr_offset == 8 (offset of source IPv6 address) or
+ * hdr_offset == 24 (offset of destination IPv6)
+ * address) and the number of byte to be
+ * extracted is 4. As per hardware configuration
+ * byte_len should be == actual byte_len - 1.
+ * Hence byte_len is checked against 3 but nor 4.
+ */
+ if ((hdr_offset == 8 || hdr_offset == 24) && byte_len == 3)
+ mh->lid_lt_ld_hash_en[intf][NPC_LID_LC][NPC_LT_LC_IP6][ld] = true;
+ }
+ }
+
+ /* Update hash configuration if the field is hash enabled */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ npc_program_mkex_hash_rx(rvu, blkaddr, intf);
+ npc_program_mkex_hash_tx(rvu, blkaddr, intf);
+ }
+}
+
+void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ struct mcam_entry *entry,
+ int blkaddr,
+ u64 features,
+ struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct flow_msg *opkt,
+ struct flow_msg *omask)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ struct npc_get_field_hash_info_req req;
+ struct npc_get_field_hash_info_rsp rsp;
+ u64 ldata[2], cfg;
+ u32 field_hash;
+ u8 hash_idx;
+
+ if (!rvu->hw->cap.npc_hash_extract) {
+ dev_dbg(rvu->dev, "%s: Field hash extract feature is not supported\n", __func__);
+ return;
+ }
+
+ req.intf = intf;
+ rvu_mbox_handler_npc_get_field_hash_info(rvu, &req, &rsp);
+
+ for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) {
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx));
+ if ((cfg & BIT_ULL(11)) && (cfg & BIT_ULL(12))) {
+ u8 lid = (cfg & GENMASK_ULL(10, 8)) >> 8;
+ u8 ltype = (cfg & GENMASK_ULL(7, 4)) >> 4;
+ u8 ltype_mask = cfg & GENMASK_ULL(3, 0);
+
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) {
+ switch (ltype & ltype_mask) {
+ /* If hash extract enabled is supported for IPv6 then
+ * 128 bit IPv6 source and destination addressed
+ * is hashed to 32 bit value.
+ */
+ case NPC_LT_LC_IP6:
+ /* ld[0] == hash_idx[0] == Source IPv6
+ * ld[1] == hash_idx[1] == Destination IPv6
+ */
+ if ((features & BIT_ULL(NPC_SIP_IPV6)) && !hash_idx) {
+ u32 src_ip[IPV6_WORDS];
+
+ be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
+ ldata[1] = (u64)src_ip[0] << 32 | src_ip[1];
+ ldata[0] = (u64)src_ip[2] << 32 | src_ip[3];
+ field_hash = npc_field_hash_calc(ldata,
+ rsp,
+ intf,
+ hash_idx);
+ npc_update_entry(rvu, NPC_SIP_IPV6, entry,
+ field_hash, 0,
+ GENMASK(31, 0), 0, intf);
+ memcpy(&opkt->ip6src, &pkt->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&omask->ip6src, &mask->ip6src,
+ sizeof(mask->ip6src));
+ } else if ((features & BIT_ULL(NPC_DIP_IPV6)) && hash_idx) {
+ u32 dst_ip[IPV6_WORDS];
+
+ be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
+ ldata[1] = (u64)dst_ip[0] << 32 | dst_ip[1];
+ ldata[0] = (u64)dst_ip[2] << 32 | dst_ip[3];
+ field_hash = npc_field_hash_calc(ldata,
+ rsp,
+ intf,
+ hash_idx);
+ npc_update_entry(rvu, NPC_DIP_IPV6, entry,
+ field_hash, 0,
+ GENMASK(31, 0), 0, intf);
+ memcpy(&opkt->ip6dst, &pkt->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&omask->ip6dst, &mask->ip6dst,
+ sizeof(mask->ip6dst));
+ }
+
+ break;
+ }
+ }
+ }
+ }
+}
+
+int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu,
+ struct npc_get_field_hash_info_req *req,
+ struct npc_get_field_hash_info_rsp *rsp)
+{
+ u64 *secret_key = rsp->secret_key;
+ u8 intf = req->intf;
+ int i, j, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf));
+ secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf));
+ secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf));
+
+ for (i = 0; i < NPC_MAX_HASH; i++) {
+ for (j = 0; j < NPC_MAX_HASH_MASK; j++) {
+ rsp->hash_mask[NIX_INTF_RX][i][j] =
+ GET_KEX_LD_HASH_MASK(NIX_INTF_RX, i, j);
+ rsp->hash_mask[NIX_INTF_TX][i][j] =
+ GET_KEX_LD_HASH_MASK(NIX_INTF_TX, i, j);
+ }
+ }
+
+ for (i = 0; i < NPC_MAX_INTF; i++)
+ for (j = 0; j < NPC_MAX_HASH; j++)
+ rsp->hash_ctrl[i][j] = GET_KEX_LD_HASH_CTRL(i, j);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac2u64 - utility function to convert mac address to u64.
+ * @mac_addr: MAC address.
+ * Return: mdata for exact match table.
+ */
+static u64 rvu_npc_exact_mac2u64(u8 *mac_addr)
+{
+ u64 mac = 0;
+ int index;
+
+ for (index = ETH_ALEN - 1; index >= 0; index--)
+ mac |= ((u64)*mac_addr++) << (8 * index);
+
+ return mac;
+}
+
+/**
+ * rvu_exact_prepare_mdata - Make mdata for mcam entry
+ * @mac: MAC address
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @mask: LDATA mask.
+ * Return: Meta data
+ */
+static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask)
+{
+ u64 ldata = rvu_npc_exact_mac2u64(mac);
+
+ /* Please note that mask is 48bit which excludes chan and ctype.
+ * Increase mask bits if we need to include them as well.
+ */
+ ldata |= ((u64)chan << 48);
+ ldata |= ((u64)ctype << 60);
+ ldata &= mask;
+ ldata = ldata << 2;
+
+ return ldata;
+}
+
+/**
+ * rvu_exact_calculate_hash - calculate hash index to mem table.
+ * @rvu: resource virtualization unit.
+ * @chan: Channel number
+ * @ctype: Channel type.
+ * @mac: MAC address
+ * @mask: HASH mask.
+ * @table_depth: Depth of table.
+ * Return: Hash value
+ */
+static u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac,
+ u64 mask, u32 table_depth)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ u64 hash_key[2];
+ u64 key_in[2];
+ u64 ldata;
+ u32 hash;
+
+ key_in[0] = RVU_NPC_HASH_SECRET_KEY0;
+ key_in[1] = RVU_NPC_HASH_SECRET_KEY2;
+
+ hash_key[0] = key_in[0] << 31;
+ hash_key[0] |= key_in[1];
+ hash_key[1] = key_in[0] >> 33;
+
+ ldata = rvu_exact_prepare_mdata(mac, chan, ctype, mask);
+
+ dev_dbg(rvu->dev, "%s: ldata=0x%llx hash_key0=0x%llx hash_key2=0x%llx\n", __func__,
+ ldata, hash_key[1], hash_key[0]);
+ hash = rvu_npc_toeplitz_hash(&ldata, (u64 *)hash_key, 64, 95);
+
+ hash &= table->mem_table.hash_mask;
+ hash += table->mem_table.hash_offset;
+ dev_dbg(rvu->dev, "%s: hash=%x\n", __func__, hash);
+
+ return hash;
+}
+
+/**
+ * rvu_npc_exact_alloc_mem_table_entry - find free entry in 4 way table.
+ * @rvu: resource virtualization unit.
+ * @way: Indicate way to table.
+ * @index: Hash index to 4 way table.
+ * @hash: Hash value.
+ *
+ * Searches 4 way table using hash index. Returns 0 on success.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_alloc_mem_table_entry(struct rvu *rvu, u8 *way,
+ u32 *index, unsigned int hash)
+{
+ struct npc_exact_table *table;
+ int depth, i;
+
+ table = rvu->hw->table;
+ depth = table->mem_table.depth;
+
+ /* Check all the 4 ways for a free slot. */
+ mutex_lock(&table->lock);
+ for (i = 0; i < table->mem_table.ways; i++) {
+ if (test_bit(hash + i * depth, table->mem_table.bmap))
+ continue;
+
+ set_bit(hash + i * depth, table->mem_table.bmap);
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n",
+ __func__, i, hash);
+
+ *way = i;
+ *index = hash;
+ return 0;
+ }
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__,
+ bitmap_weight(table->mem_table.bmap, table->mem_table.depth));
+ return -ENOSPC;
+}
+
+/**
+ * rvu_npc_exact_free_id - Free seq id from bitmat.
+ * @rvu: Resource virtualization unit.
+ * @seq_id: Sequence identifier to be freed.
+ */
+static void rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table *table;
+
+ table = rvu->hw->table;
+ mutex_lock(&table->lock);
+ clear_bit(seq_id, table->id_bmap);
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: freed id %d\n", __func__, seq_id);
+}
+
+/**
+ * rvu_npc_exact_alloc_id - Alloc seq id from bitmap.
+ * @rvu: Resource virtualization unit.
+ * @seq_id: Sequence identifier.
+ * Return: True or false.
+ */
+static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id)
+{
+ struct npc_exact_table *table;
+ u32 idx;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+ idx = find_first_zero_bit(table->id_bmap, table->tot_ids);
+ if (idx == table->tot_ids) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n",
+ __func__, bitmap_weight(table->id_bmap, table->tot_ids));
+
+ return false;
+ }
+
+ /* Mark bit map to indicate that slot is used.*/
+ set_bit(idx, table->id_bmap);
+ mutex_unlock(&table->lock);
+
+ *seq_id = idx;
+ dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id);
+
+ return true;
+}
+
+/**
+ * rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table.
+ * @rvu: resource virtualization unit.
+ * @index: Index to exact CAM table.
+ * Return: 0 upon success; else error number.
+ */
+static int rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index)
+{
+ struct npc_exact_table *table;
+ u32 idx;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+ idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth);
+ if (idx == table->cam_table.depth) {
+ mutex_unlock(&table->lock);
+ dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__,
+ bitmap_weight(table->cam_table.bmap, table->cam_table.depth));
+ return -ENOSPC;
+ }
+
+ /* Mark bit map to indicate that slot is used.*/
+ set_bit(idx, table->cam_table.bmap);
+ mutex_unlock(&table->lock);
+
+ *index = idx;
+ dev_dbg(rvu->dev, "%s: cam table entry alloc success (index=%d)\n",
+ __func__, idx);
+ return 0;
+}
+
+/**
+ * rvu_exact_prepare_table_entry - Data for exact match table entry.
+ * @rvu: Resource virtualization unit.
+ * @enable: Enable/Disable entry
+ * @ctype: Software defined channel type. Currently set as 0.
+ * @chan: Channel number.
+ * @mac_addr: Destination mac address.
+ * Return: mdata for exact match table.
+ */
+static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable,
+ u8 ctype, u16 chan, u8 *mac_addr)
+
+{
+ u64 ldata = rvu_npc_exact_mac2u64(mac_addr);
+
+ /* Enable or disable */
+ u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0);
+
+ /* Set Ctype */
+ mdata |= FIELD_PREP(GENMASK_ULL(61, 60), ctype);
+
+ /* Set chan */
+ mdata |= FIELD_PREP(GENMASK_ULL(59, 48), chan);
+
+ /* MAC address */
+ mdata |= FIELD_PREP(GENMASK_ULL(47, 0), ldata);
+
+ return mdata;
+}
+
+/**
+ * rvu_exact_config_secret_key - Configure secret key.
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_secret_key(struct rvu *rvu)
+{
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET0(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY0);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET1(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY1);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET2(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY2);
+}
+
+/**
+ * rvu_exact_config_search_key - Configure search key
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_search_key(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 reg_val;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* HDR offset */
+ reg_val = FIELD_PREP(GENMASK_ULL(39, 32), 0);
+
+ /* BYTESM1, number of bytes - 1 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(18, 16), ETH_ALEN - 1);
+
+ /* Enable LID and set LID to NPC_LID_LA */
+ reg_val |= FIELD_PREP(GENMASK_ULL(11, 11), 1);
+ reg_val |= FIELD_PREP(GENMASK_ULL(10, 8), NPC_LID_LA);
+
+ /* Clear layer type based extraction */
+
+ /* Disable LT_EN */
+ reg_val |= FIELD_PREP(GENMASK_ULL(12, 12), 0);
+
+ /* Set LTYPE_MATCH to 0 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(7, 4), 0);
+
+ /* Set LTYPE_MASK to 0 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(3, 0), 0);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_CFG(NIX_INTF_RX), reg_val);
+}
+
+/**
+ * rvu_exact_config_result_ctrl - Set exact table hash control
+ * @rvu: Resource virtualization unit.
+ * @depth: Depth of Exact match table.
+ *
+ * Sets mask and offset for hash for mem table.
+ */
+static void rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth)
+{
+ int blkaddr;
+ u64 reg = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* Set mask. Note that depth is a power of 2 */
+ rvu->hw->table->mem_table.hash_mask = (depth - 1);
+ reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1));
+
+ /* Set offset as 0 */
+ rvu->hw->table->mem_table.hash_offset = 0;
+ reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0);
+
+ /* Set reg for RX */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg);
+ /* Store hash mask and offset for s/w algorithm */
+}
+
+/**
+ * rvu_exact_config_table_mask - Set exact table mask.
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_table_mask(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 mask = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* Don't use Ctype */
+ mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0);
+
+ /* Set chan */
+ mask |= GENMASK_ULL(59, 48);
+
+ /* Full ldata */
+ mask |= GENMASK_ULL(47, 0);
+
+ /* Store mask for s/w hash calcualtion */
+ rvu->hw->table->mem_table.mask = mask;
+
+ /* Set mask for RX.*/
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask);
+}
+
+/**
+ * rvu_npc_exact_get_max_entries - Get total number of entries in table.
+ * @rvu: resource virtualization unit.
+ * Return: Maximum table entries possible.
+ */
+u32 rvu_npc_exact_get_max_entries(struct rvu *rvu)
+{
+ struct npc_exact_table *table;
+
+ table = rvu->hw->table;
+ return table->tot_ids;
+}
+
+/**
+ * rvu_npc_exact_has_match_table - Checks support for exact match.
+ * @rvu: resource virtualization unit.
+ * Return: True if exact match table is supported/enabled.
+ */
+bool rvu_npc_exact_has_match_table(struct rvu *rvu)
+{
+ return rvu->hw->cap.npc_exact_match_enabled;
+}
+
+/**
+ * __rvu_npc_exact_find_entry_by_seq_id - find entry by id
+ * @rvu: resource virtualization unit.
+ * @seq_id: Sequence identifier.
+ *
+ * Caller should acquire the lock.
+ * Return: Pointer to table entry.
+ */
+static struct npc_exact_table_entry *
+__rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ struct npc_exact_table_entry *entry = NULL;
+ struct list_head *lhead;
+
+ lhead = &table->lhead_gbl;
+
+ /* traverse to find the matching entry */
+ list_for_each_entry(entry, lhead, glist) {
+ if (entry->seq_id != seq_id)
+ continue;
+
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * rvu_npc_exact_add_to_list - Add entry to list
+ * @rvu: resource virtualization unit.
+ * @opc_type: OPCODE to select MEM/CAM table.
+ * @ways: MEM table ways.
+ * @index: Index in MEM/CAM table.
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @mac_addr: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @seq_id: Sequence identifier
+ * @cmd: True if function is called by ethtool cmd
+ * @mcam_idx: NPC mcam index of DMAC entry in NPC mcam.
+ * @pcifunc: pci function
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways,
+ u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan,
+ u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc)
+{
+ struct npc_exact_table_entry *entry, *tmp, *iter;
+ struct npc_exact_table *table = rvu->hw->table;
+ struct list_head *lhead, *pprev;
+
+ WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS);
+
+ if (!rvu_npc_exact_alloc_id(rvu, seq_id)) {
+ dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__);
+ return -EFAULT;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ rvu_npc_exact_free_id(rvu, *seq_id);
+ dev_err(rvu->dev, "%s: Memory allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&table->lock);
+ switch (opc_type) {
+ case NPC_EXACT_OPC_CAM:
+ lhead = &table->lhead_cam_tbl_entry;
+ table->cam_tbl_entry_cnt++;
+ break;
+
+ case NPC_EXACT_OPC_MEM:
+ lhead = &table->lhead_mem_tbl_entry[ways];
+ table->mem_tbl_entry_cnt++;
+ break;
+
+ default:
+ mutex_unlock(&table->lock);
+ kfree(entry);
+ rvu_npc_exact_free_id(rvu, *seq_id);
+
+ dev_err(rvu->dev, "%s: Unknown opc type%d\n", __func__, opc_type);
+ return -EINVAL;
+ }
+
+ /* Add to global list */
+ INIT_LIST_HEAD(&entry->glist);
+ list_add_tail(&entry->glist, &table->lhead_gbl);
+ INIT_LIST_HEAD(&entry->list);
+ entry->index = index;
+ entry->ways = ways;
+ entry->opc_type = opc_type;
+
+ entry->pcifunc = pcifunc;
+
+ ether_addr_copy(entry->mac, mac_addr);
+ entry->chan = chan;
+ entry->ctype = ctype;
+ entry->cgx_id = cgx_id;
+ entry->lmac_id = lmac_id;
+
+ entry->seq_id = *seq_id;
+
+ entry->mcam_idx = mcam_idx;
+ entry->cmd = cmd;
+
+ pprev = lhead;
+
+ /* Insert entry in ascending order of index */
+ list_for_each_entry_safe(iter, tmp, lhead, list) {
+ if (index < iter->index)
+ break;
+
+ pprev = &iter->list;
+ }
+
+ /* Add to each table list */
+ list_add(&entry->list, pprev);
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mem_table_write - Wrapper for register write
+ * @rvu: resource virtualization unit.
+ * @blkaddr: Block address
+ * @ways: ways for MEM table.
+ * @index: Index in MEM
+ * @mdata: Meta data to be written to register.
+ */
+static void rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways,
+ u32 index, u64 mdata)
+{
+ rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata);
+}
+
+/**
+ * rvu_npc_exact_cam_table_write - Wrapper for register write
+ * @rvu: resource virtualization unit.
+ * @blkaddr: Block address
+ * @index: Index in MEM
+ * @mdata: Meta data to be written to register.
+ */
+static void rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr,
+ u32 index, u64 mdata)
+{
+ rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata);
+}
+
+/**
+ * rvu_npc_exact_dealloc_table_entry - dealloc table entry
+ * @rvu: resource virtualization unit.
+ * @opc_type: OPCODE for selection of table(MEM or CAM)
+ * @ways: ways if opc_type is MEM table.
+ * @index: Index of MEM or CAM table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type,
+ u8 ways, u32 index)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_exact_table *table;
+ u8 null_dmac[6] = { 0 };
+ int depth;
+
+ /* Prepare entry with all fields set to zero */
+ u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac);
+
+ table = rvu->hw->table;
+ depth = table->mem_table.depth;
+
+ mutex_lock(&table->lock);
+
+ switch (opc_type) {
+ case NPC_EXACT_OPC_CAM:
+
+ /* Check whether entry is used already */
+ if (!test_bit(index, table->cam_table.bmap)) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: Trying to free an unused entry ways=%d index=%d\n",
+ __func__, ways, index);
+ return -EINVAL;
+ }
+
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, index, null_mdata);
+ clear_bit(index, table->cam_table.bmap);
+ break;
+
+ case NPC_EXACT_OPC_MEM:
+
+ /* Check whether entry is used already */
+ if (!test_bit(index + ways * depth, table->mem_table.bmap)) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: Trying to free an unused entry index=%d\n",
+ __func__, index);
+ return -EINVAL;
+ }
+
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, null_mdata);
+ clear_bit(index + ways * depth, table->mem_table.bmap);
+ break;
+
+ default:
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: invalid opc type %d", __func__, opc_type);
+ return -ENOSPC;
+ }
+
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: Successfully deleted entry (index=%d, ways=%d opc_type=%d\n",
+ __func__, index, ways, opc_type);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_alloc_table_entry - Allociate an entry
+ * @rvu: resource virtualization unit.
+ * @mac: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @index: Index of MEM table or CAM table.
+ * @ways: Ways. Only valid for MEM table.
+ * @opc_type: OPCODE to select table (MEM or CAM)
+ *
+ * Try allocating a slot from MEM table. If all 4 ways
+ * slot are full for a hash index, check availability in
+ * 32-entry CAM table for allocation.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_alloc_table_entry(struct rvu *rvu, char *mac, u16 chan, u8 ctype,
+ u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type)
+{
+ struct npc_exact_table *table;
+ unsigned int hash;
+ int err;
+
+ table = rvu->hw->table;
+
+ /* Check in 4-ways mem entry for free slote */
+ hash = rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask,
+ table->mem_table.depth);
+ err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash);
+ if (!err) {
+ *opc_type = NPC_EXACT_OPC_MEM;
+ dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n",
+ __func__, *ways, *index);
+ return 0;
+ }
+
+ dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__);
+
+ /* wayss is 0 for cam table */
+ *ways = 0;
+ err = rvu_npc_exact_alloc_cam_table_entry(rvu, index);
+ if (!err) {
+ *opc_type = NPC_EXACT_OPC_CAM;
+ dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n",
+ __func__, *index);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__);
+ return -ENOSPC;
+}
+
+/**
+ * rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base.
+ * @rvu: resource virtualization unit.
+ * @drop_mcam_idx: Drop rule index in NPC mcam.
+ * @chan_val: Channel value.
+ * @chan_mask: Channel Mask.
+ * @pcifunc: pcifunc of interface.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx,
+ u64 chan_val, u64 chan_mask, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int i;
+
+ table = rvu->hw->table;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].chan_val != (u16)chan_val)
+ continue;
+
+ if (table->drop_rule_map[i].chan_mask != (u16)chan_mask)
+ continue;
+
+ return false;
+ }
+
+ if (i == NPC_MCAM_DROP_RULE_MAX)
+ return false;
+
+ table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx;
+ table->drop_rule_map[i].chan_val = (u16)chan_val;
+ table->drop_rule_map[i].chan_mask = (u16)chan_mask;
+ table->drop_rule_map[i].pcifunc = pcifunc;
+ table->drop_rule_map[i].valid = true;
+ return true;
+}
+
+/**
+ * rvu_npc_exact_calc_drop_rule_chan_and_mask - Calculate Channel number and mask.
+ * @rvu: resource virtualization unit.
+ * @intf_type: Interface type (SDK, LBK or CGX)
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LAMC identifier.
+ * @val: Channel number.
+ * @mask: Channel mask.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu *rvu, u8 intf_type,
+ u8 cgx_id, u8 lmac_id,
+ u64 *val, u64 *mask)
+{
+ u16 chan_val, chan_mask;
+
+ /* No support for SDP and LBK */
+ if (intf_type != NIX_INTF_TYPE_CGX)
+ return false;
+
+ chan_val = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
+ chan_mask = 0xfff;
+
+ if (val)
+ *val = chan_val;
+
+ if (mask)
+ *mask = chan_mask;
+
+ return true;
+}
+
+/**
+ * rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc
+ * @rvu: resource virtualization unit.
+ * @drop_rule_idx: Drop rule index in NPC mcam.
+ *
+ * Debugfs (exact_drop_cnt) entry displays pcifunc for interface
+ * by retrieving the pcifunc value from data base.
+ * Return: Drop rule index.
+ */
+u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx)
+{
+ struct npc_exact_table *table;
+ int i;
+
+ table = rvu->hw->table;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx)
+ continue;
+
+ return table->drop_rule_map[i].pcifunc;
+ }
+
+ dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
+ __func__, drop_rule_idx);
+ return -1;
+}
+
+/**
+ * rvu_npc_exact_get_drop_rule_info - Get drop rule information.
+ * @rvu: resource virtualization unit.
+ * @intf_type: Interface type (CGX, SDP or LBK)
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @drop_mcam_idx: NPC mcam drop rule index.
+ * @val: Channel value.
+ * @mask: Channel mask.
+ * @pcifunc: pcifunc of interface corresponding to the drop rule.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id,
+ u8 lmac_id, u32 *drop_mcam_idx, u64 *val,
+ u64 *mask, u16 *pcifunc)
+{
+ struct npc_exact_table *table;
+ u64 chan_val, chan_mask;
+ bool rc;
+ int i;
+
+ table = rvu->hw->table;
+
+ if (intf_type != NIX_INTF_TYPE_CGX) {
+ dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__);
+ return false;
+ }
+
+ rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, intf_type, cgx_id,
+ lmac_id, &chan_val, &chan_mask);
+ if (!rc)
+ return false;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].chan_val != (u16)chan_val)
+ continue;
+
+ if (val)
+ *val = table->drop_rule_map[i].chan_val;
+ if (mask)
+ *mask = table->drop_rule_map[i].chan_mask;
+ if (pcifunc)
+ *pcifunc = table->drop_rule_map[i].pcifunc;
+
+ *drop_mcam_idx = i;
+ return true;
+ }
+
+ if (i == NPC_MCAM_DROP_RULE_MAX) {
+ dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
+ __func__, *drop_mcam_idx);
+ return false;
+ }
+
+ dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return false;
+}
+
+/**
+ * __rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule.
+ * @rvu: resource virtualization unit.
+ * @drop_mcam_idx: NPC mcam drop rule index.
+ * @val: +1 or -1.
+ * @enable_or_disable_cam: If no exact match rules against a drop rule, disable it.
+ *
+ * when first exact match entry against a drop rule is added, enable_or_disable_cam
+ * is set to true. When last exact match entry against a drop rule is deleted,
+ * enable_or_disable_cam is set to true.
+ * Return: Number of rules
+ */
+static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx,
+ int val, bool *enable_or_disable_cam)
+{
+ struct npc_exact_table *table;
+ u16 *cnt, old_cnt;
+ bool promisc;
+
+ table = rvu->hw->table;
+ promisc = table->promisc_mode[drop_mcam_idx];
+
+ cnt = &table->cnt_cmd_rules[drop_mcam_idx];
+ old_cnt = *cnt;
+
+ *cnt += val;
+
+ if (!enable_or_disable_cam)
+ goto done;
+
+ *enable_or_disable_cam = false;
+
+ if (promisc)
+ goto done;
+
+ /* If all rules are deleted and not already in promisc mode;
+ * disable cam
+ */
+ if (!*cnt && val < 0) {
+ *enable_or_disable_cam = true;
+ goto done;
+ }
+
+ /* If rule got added and not already in promisc mode; enable cam */
+ if (!old_cnt && val > 0) {
+ *enable_or_disable_cam = true;
+ goto done;
+ }
+
+done:
+ return *cnt;
+}
+
+/**
+ * rvu_npc_exact_del_table_entry_by_id - Delete and free table entry.
+ * @rvu: resource virtualization unit.
+ * @seq_id: Sequence identifier of the entry.
+ *
+ * Deletes entry from linked lists and free up slot in HW MEM or CAM
+ * table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table_entry *entry = NULL;
+ struct npc_exact_table *table;
+ bool disable_cam = false;
+ u32 drop_mcam_idx = -1;
+ int *cnt;
+ bool rc;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id);
+ if (!entry) {
+ dev_dbg(rvu->dev, "%s: failed to find entry for id=%d\n", __func__, seq_id);
+ mutex_unlock(&table->lock);
+ return -ENODATA;
+ }
+
+ cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt :
+ &table->mem_tbl_entry_cnt;
+
+ /* delete from lists */
+ list_del_init(&entry->list);
+ list_del_init(&entry->glist);
+
+ (*cnt)--;
+
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, entry->cgx_id,
+ entry->lmac_id, &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to retrieve drop info for id=0x%x\n",
+ __func__, seq_id);
+ mutex_unlock(&table->lock);
+ return -ENODATA;
+ }
+
+ if (entry->cmd)
+ __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, -1, &disable_cam);
+
+ /* No dmac filter rules; disable drop on hit rule */
+ if (disable_cam) {
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+ dev_dbg(rvu->dev, "%s: Disabling mcam idx %d\n",
+ __func__, drop_mcam_idx);
+ }
+
+ mutex_unlock(&table->lock);
+
+ rvu_npc_exact_dealloc_table_entry(rvu, entry->opc_type, entry->ways, entry->index);
+
+ rvu_npc_exact_free_id(rvu, seq_id);
+
+ dev_dbg(rvu->dev, "%s: delete entry success for id=0x%x, mca=%pM\n",
+ __func__, seq_id, entry->mac);
+ kfree(entry);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_add_table_entry - Adds a table entry
+ * @rvu: resource virtualization unit.
+ * @cgx_id: cgx identifier.
+ * @lmac_id: lmac identifier.
+ * @mac: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @seq_id: Sequence number.
+ * @cmd: Whether it is invoked by ethtool cmd.
+ * @mcam_idx: NPC mcam index corresponding to MAC
+ * @pcifunc: PCI func.
+ *
+ * Creates a new exact match table entry in either CAM or
+ * MEM table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac,
+ u16 chan, u8 ctype, u32 *seq_id, bool cmd,
+ u32 mcam_idx, u16 pcifunc)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ enum npc_exact_opc_type opc_type;
+ bool enable_cam = false;
+ u32 drop_mcam_idx;
+ u32 index;
+ u64 mdata;
+ bool rc;
+ int err;
+ u8 ways;
+
+ ctype = 0;
+
+ err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type);
+ if (err) {
+ dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__);
+ return err;
+ }
+
+ /* Write mdata to table */
+ mdata = rvu_exact_prepare_table_entry(rvu, true, ctype, chan, mac);
+
+ if (opc_type == NPC_EXACT_OPC_CAM)
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, index, mdata);
+ else
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, mdata);
+
+ /* Insert entry to linked list */
+ err = rvu_npc_exact_add_to_list(rvu, opc_type, ways, index, cgx_id, lmac_id,
+ mac, chan, ctype, seq_id, cmd, mcam_idx, pcifunc);
+ if (err) {
+ rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
+ dev_err(rvu->dev, "%s: could not add to exact match table\n", __func__);
+ return err;
+ }
+
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ if (cmd)
+ __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 1, &enable_cam);
+
+ /* First command rule; enable drop on hit rule */
+ if (enable_cam) {
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true);
+ dev_dbg(rvu->dev, "%s: Enabling mcam idx %d\n",
+ __func__, drop_mcam_idx);
+ }
+
+ dev_dbg(rvu->dev,
+ "%s: Successfully added entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
+ __func__, index, mac, ways, opc_type);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_update_table_entry - Update exact match table.
+ * @rvu: resource virtualization unit.
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @old_mac: Existing MAC address entry.
+ * @new_mac: New MAC address entry.
+ * @seq_id: Sequence identifier of the entry.
+ *
+ * Updates MAC address of an entry. If entry is in MEM table, new
+ * hash value may not match with old one.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id,
+ u8 *old_mac, u8 *new_mac, u32 *seq_id)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_exact_table_entry *entry;
+ struct npc_exact_table *table;
+ u32 hash_index;
+ u64 mdata;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id);
+ if (!entry) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev,
+ "%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n",
+ __func__, cgx_id, lmac_id, old_mac);
+ return -ENODATA;
+ }
+
+ /* If entry is in mem table and new hash index is different than old
+ * hash index, we cannot update the entry. Fail in these scenarios.
+ */
+ if (entry->opc_type == NPC_EXACT_OPC_MEM) {
+ hash_index = rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype,
+ new_mac, table->mem_table.mask,
+ table->mem_table.depth);
+ if (hash_index != entry->index) {
+ dev_dbg(rvu->dev,
+ "%s: Update failed due to index mismatch(new=0x%x, old=%x)\n",
+ __func__, hash_index, entry->index);
+ mutex_unlock(&table->lock);
+ return -EINVAL;
+ }
+ }
+
+ mdata = rvu_exact_prepare_table_entry(rvu, true, entry->ctype, entry->chan, new_mac);
+
+ if (entry->opc_type == NPC_EXACT_OPC_MEM)
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, entry->ways, entry->index, mdata);
+ else
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, entry->index, mdata);
+
+ /* Update entry fields */
+ ether_addr_copy(entry->mac, new_mac);
+ *seq_id = entry->seq_id;
+
+ dev_dbg(rvu->dev,
+ "%s: Successfully updated entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
+ __func__, entry->index, entry->mac, entry->ways, entry->opc_type);
+
+ dev_dbg(rvu->dev, "%s: Successfully updated entry (old mac=%pM new_mac=%pM\n",
+ __func__, old_mac, new_mac);
+
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_promisc_disable - Disable promiscuous mode.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: pcifunc
+ *
+ * Drop rule is against each PF. We dont support DMAC filter for
+ * VF.
+ * Return: 0 upon success
+ */
+
+int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ u32 drop_mcam_idx;
+ bool *promisc;
+ bool rc;
+
+ table = rvu->hw->table;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&table->lock);
+ promisc = &table->promisc_mode[drop_mcam_idx];
+
+ if (!*promisc) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: Err Already promisc mode disabled (cgx=%d lmac=%d)\n",
+ __func__, cgx_id, lmac_id);
+ return LMAC_AF_ERR_INVALID_PARAM;
+ }
+ *promisc = false;
+ mutex_unlock(&table->lock);
+
+ /* Enable drop rule */
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
+ true);
+
+ dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d)\n",
+ __func__, cgx_id, lmac_id);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_promisc_enable - Enable promiscuous mode.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: pcifunc.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ u32 drop_mcam_idx;
+ bool *promisc;
+ bool rc;
+
+ table = rvu->hw->table;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&table->lock);
+ promisc = &table->promisc_mode[drop_mcam_idx];
+
+ if (*promisc) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: Already in promisc mode (cgx=%d lmac=%d)\n",
+ __func__, cgx_id, lmac_id);
+ return LMAC_AF_ERR_INVALID_PARAM;
+ }
+ *promisc = true;
+ mutex_unlock(&table->lock);
+
+ /* disable drop rule */
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
+ false);
+
+ dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d)\n",
+ __func__, cgx_id, lmac_id);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_reset - Delete PF mac address.
+ * @rvu: resource virtualization unit.
+ * @req: Reset request
+ * @rsp: Reset response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u32 seq_id = req->index;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ int rc;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
+ if (rc) {
+ /* TODO: how to handle this error case ? */
+ dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf);
+ return 0;
+ }
+
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n",
+ __func__, pfvf->mac_addr, pf, seq_id);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_update - Update mac address field with new value.
+ * @rvu: resource virtualization unit.
+ * @req: Update request.
+ * @rsp: Update response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct cgx_mac_addr_update_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct npc_exact_table_entry *entry;
+ struct npc_exact_table *table;
+ struct rvu_pfvf *pfvf;
+ u32 seq_id, mcam_idx;
+ u8 old_mac[ETH_ALEN];
+ u8 cgx_id, lmac_id;
+ int rc;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n",
+ __func__, req->index, req->mac_addr);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, req->index);
+ if (!entry) {
+ dev_err(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, req->index);
+ mutex_unlock(&table->lock);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED;
+ }
+ ether_addr_copy(old_mac, entry->mac);
+ seq_id = entry->seq_id;
+ mcam_idx = entry->mcam_idx;
+ mutex_unlock(&table->lock);
+
+ rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, old_mac,
+ req->mac_addr, &seq_id);
+ if (!rc) {
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev, "%s mac:%pM (pfvf:%pM default:%pM) update to PF=%d success\n",
+ __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf);
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ return 0;
+ }
+
+ /* Try deleting and adding it again */
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (rc) {
+ /* This could be a new entry */
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__,
+ pfvf->mac_addr, pf);
+ }
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id, true,
+ mcam_idx, req->hdr.pcifunc);
+ if (rc) {
+ dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__,
+ req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+ }
+
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev,
+ "%s MAC (new:%pM, old=%pM default:%pM) del and add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf, seq_id);
+
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_add - Adds MAC address to exact match table.
+ * @rvu: resource virtualization unit.
+ * @req: Add request.
+ * @rsp: Add response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ int rc = 0;
+ u32 seq_id;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id,
+ true, -1, req->hdr.pcifunc);
+
+ if (!rc) {
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev, "%s MAC (%pM) add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pf, seq_id);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s MAC (%pM) add to PF=%d failed\n", __func__,
+ req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_del - Delete DMAC filter
+ * @rvu: resource virtualization unit.
+ * @req: Delete request.
+ * @rsp: Delete response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ int rc;
+
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s del to PF=%d success (seq_id=%u)\n",
+ __func__, pf, req->index);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s del to PF=%d failed (seq_id=%u)\n",
+ __func__, pf, req->index);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_set - Add PF mac address to dmac filter.
+ * @rvu: resource virtualization unit.
+ * @req: Set request.
+ * @rsp: Set response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u32 seq_id = req->index;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ u32 mcam_idx = -1;
+ int rc, nixlf;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = &rvu->pf[pf];
+
+ /* If table does not have an entry; both update entry and del table entry API
+ * below fails. Those are not failure conditions.
+ */
+ rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr,
+ req->mac_addr, &seq_id);
+ if (!rc) {
+ rsp->index = seq_id;
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ ether_addr_copy(rsp->mac_addr, req->mac_addr);
+ dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n",
+ __func__, req->mac_addr, pf);
+ return 0;
+ }
+
+ /* Try deleting and adding it again */
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (rc) {
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n",
+ __func__, pfvf->mac_addr, pf);
+ }
+
+ /* find mcam entry if exist */
+ rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, NULL);
+ if (!rc) {
+ mcam_idx = npc_get_nixlf_mcam_index(&rvu->hw->mcam, req->hdr.pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ }
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id,
+ true, mcam_idx, req->hdr.pcifunc);
+ if (rc) {
+ dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n",
+ __func__, req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+ }
+
+ rsp->index = seq_id;
+ ether_addr_copy(rsp->mac_addr, req->mac_addr);
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ dev_dbg(rvu->dev,
+ "%s MAC (%pM) del and add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pf, seq_id);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_can_disable_feature - Check if feature can be disabled.
+ * @rvu: resource virtualization unit.
+ * Return: True if exact match feature is supported.
+ */
+bool rvu_npc_exact_can_disable_feature(struct rvu *rvu)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ bool empty;
+
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return false;
+
+ mutex_lock(&table->lock);
+ empty = list_empty(&table->lhead_gbl);
+ mutex_unlock(&table->lock);
+
+ return empty;
+}
+
+/**
+ * rvu_npc_exact_disable_feature - Disable feature.
+ * @rvu: resource virtualization unit.
+ */
+void rvu_npc_exact_disable_feature(struct rvu *rvu)
+{
+ rvu->hw->cap.npc_exact_match_enabled = false;
+}
+
+/**
+ * rvu_npc_exact_reset - Delete and free all entry which match pcifunc.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: PCI func to match.
+ */
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ struct npc_exact_table_entry *tmp, *iter;
+ u32 seq_id;
+
+ mutex_lock(&table->lock);
+ list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) {
+ if (pcifunc != iter->pcifunc)
+ continue;
+
+ seq_id = iter->seq_id;
+ dev_dbg(rvu->dev, "%s: resetting pcifun=%d seq_id=%u\n", __func__,
+ pcifunc, seq_id);
+
+ mutex_unlock(&table->lock);
+ rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
+ mutex_lock(&table->lock);
+ }
+ mutex_unlock(&table->lock);
+}
+
+/**
+ * rvu_npc_exact_init - initialize exact match table
+ * @rvu: resource virtualization unit.
+ *
+ * Initialize HW and SW resources to manage 4way-2K table and fully
+ * associative 32-entry mcam table.
+ * Return: 0 upon success.
+ */
+int rvu_npc_exact_init(struct rvu *rvu)
+{
+ u64 bcast_mcast_val, bcast_mcast_mask;
+ struct npc_exact_table *table;
+ u64 exact_val, exact_mask;
+ u64 chan_val, chan_mask;
+ u8 cgx_id, lmac_id;
+ u32 *drop_mcam_idx;
+ u16 max_lmac_cnt;
+ u64 npc_const3;
+ int table_size;
+ int blkaddr;
+ u16 pcifunc;
+ int err, i;
+ u64 cfg;
+ bool rc;
+
+ /* Read NPC_AF_CONST3 and check for have exact
+ * match functionality is present
+ */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check exact match feature is supported */
+ npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3);
+ if (!(npc_const3 & BIT_ULL(62)))
+ return 0;
+
+ /* Check if kex profile has enabled EXACT match nibble */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
+ if (!(cfg & NPC_EXACT_NIBBLE_HIT))
+ return 0;
+
+ /* Set capability to true */
+ rvu->hw->cap.npc_exact_match_enabled = true;
+
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__);
+ memset(table, 0, sizeof(*table));
+ rvu->hw->table = table;
+
+ /* Read table size, ways and depth */
+ table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3);
+ table->mem_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
+ table->cam_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
+
+ dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n",
+ __func__, table->mem_table.ways, table->cam_table.depth);
+
+ /* Check if depth of table is not a sequre of 2
+ * TODO: why _builtin_popcount() is not working ?
+ */
+ if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) {
+ dev_err(rvu->dev,
+ "%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n",
+ __func__, table->mem_table.depth);
+ return -EINVAL;
+ }
+
+ table_size = table->mem_table.depth * table->mem_table.ways;
+
+ /* Allocate bitmap for 4way 2K table */
+ table->mem_table.bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table_size),
+ sizeof(long), GFP_KERNEL);
+ if (!table->mem_table.bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__);
+
+ /* Allocate bitmap for 32 entry mcam */
+ table->cam_table.bmap = devm_kcalloc(rvu->dev, 1, sizeof(long), GFP_KERNEL);
+
+ if (!table->cam_table.bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__);
+
+ table->tot_ids = (table->mem_table.depth * table->mem_table.ways) + table->cam_table.depth;
+ table->id_bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table->tot_ids),
+ table->tot_ids, GFP_KERNEL);
+
+ if (!table->id_bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n",
+ __func__, table->tot_ids);
+
+ /* Initialize list heads for npc_exact_table entries.
+ * This entry is used by debugfs to show entries in
+ * exact match table.
+ */
+ for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++)
+ INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]);
+
+ INIT_LIST_HEAD(&table->lhead_cam_tbl_entry);
+ INIT_LIST_HEAD(&table->lhead_gbl);
+
+ mutex_init(&table->lock);
+
+ rvu_exact_config_secret_key(rvu);
+ rvu_exact_config_search_key(rvu);
+
+ rvu_exact_config_table_mask(rvu);
+ rvu_exact_config_result_ctrl(rvu, table->mem_table.depth);
+
+ /* - No drop rule for LBK
+ * - Drop rules for SDP and each LMAC.
+ */
+ exact_val = !NPC_EXACT_RESULT_HIT;
+ exact_mask = NPC_EXACT_RESULT_HIT;
+
+ /* nibble - 3 2 1 0
+ * L3B L3M L2B L2M
+ */
+ bcast_mcast_val = 0b0000;
+ bcast_mcast_mask = 0b0011;
+
+ /* Install SDP drop rule */
+ drop_mcam_idx = &table->num_drop_rules;
+
+ max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx +
+ PF_CGXMAP_BASE;
+
+ for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
+ if (rvu->pf2cgxlmac_map[i] == 0xFF)
+ continue;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[i], &cgx_id, &lmac_id);
+
+ rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, NIX_INTF_TYPE_CGX, cgx_id,
+ lmac_id, &chan_val, &chan_mask);
+ if (!rc) {
+ dev_err(rvu->dev,
+ "%s: failed, info chan_val=0x%llx chan_mask=0x%llx rule_id=%d\n",
+ __func__, chan_val, chan_mask, *drop_mcam_idx);
+ return -EINVAL;
+ }
+
+ /* Filter rules are only for PF */
+ pcifunc = RVU_PFFUNC(i, 0);
+
+ dev_dbg(rvu->dev,
+ "%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
+ __func__, cgx_id, lmac_id, chan_val, chan_mask);
+
+ rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules,
+ chan_val, chan_mask, pcifunc);
+ if (!rc) {
+ dev_err(rvu->dev,
+ "%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n",
+ __func__, cgx_id, lmac_id, chan_val);
+ return -EINVAL;
+ }
+
+ err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx,
+ &table->counter_idx[*drop_mcam_idx],
+ chan_val, chan_mask,
+ exact_val, exact_mask,
+ bcast_mcast_val, bcast_mcast_mask);
+ if (err) {
+ dev_err(rvu->dev,
+ "failed to configure drop rule (cgx=%d lmac=%d)\n",
+ cgx_id, lmac_id);
+ return err;
+ }
+
+ (*drop_mcam_idx)++;
+ }
+
+ dev_info(rvu->dev, "initialized exact match table successfully\n");
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
new file mode 100644
index 000000000..57a09328d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#ifndef __RVU_NPC_HASH_H
+#define __RVU_NPC_HASH_H
+
+#define RVU_NPC_HASH_SECRET_KEY0 0xa9d5af4c9fbc76b1
+#define RVU_NPC_HASH_SECRET_KEY1 0xa9d5af4c9fbc87b4
+#define RVU_NPC_HASH_SECRET_KEY2 0x5954c9e7
+
+#define NPC_MAX_HASH 2
+#define NPC_MAX_HASH_MASK 2
+
+#define KEX_LD_CFG_USE_HASH(use_hash, bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
+ ((use_hash) << 20 | ((bytesm1) << 16) | ((hdr_ofs) << 8) | \
+ ((ena) << 7) | ((flags_ena) << 6) | ((key_ofs) & 0x3F))
+#define KEX_LD_CFG_HASH(hdr_ofs, bytesm1, lt_en, lid_en, lid, ltype_match, ltype_mask) \
+ (((hdr_ofs) << 32) | ((bytesm1) << 16) | \
+ ((lt_en) << 12) | ((lid_en) << 11) | ((lid) << 8) | \
+ ((ltype_match) << 4) | ((ltype_mask) & 0xF))
+
+#define SET_KEX_LD_HASH(intf, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_CFG(intf, ld), cfg)
+
+#define SET_KEX_LD_HASH_MASK(intf, ld, mask_idx, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx), cfg)
+
+#define GET_KEX_LD_HASH_CTRL(intf, ld) \
+ rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld))
+
+#define GET_KEX_LD_HASH_MASK(intf, ld, mask_idx) \
+ rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx))
+
+#define SET_KEX_LD_HASH_CTRL(intf, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld), cfg)
+
+struct npc_mcam_kex_hash {
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ bool lid_lt_ld_hash_en[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_CFG */
+ u64 hash[NPC_MAX_INTF][NPC_MAX_HASH];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_MASK(0..1) */
+ u64 hash_mask[NPC_MAX_INTF][NPC_MAX_HASH][NPC_MAX_HASH_MASK];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_RESULT_CTRL */
+ u64 hash_ctrl[NPC_MAX_INTF][NPC_MAX_HASH];
+} __packed;
+
+void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ struct mcam_entry *entry,
+ int blkaddr,
+ u64 features,
+ struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct flow_msg *opkt,
+ struct flow_msg *omask);
+void npc_config_secret_key(struct rvu *rvu, int blkaddr);
+void npc_program_mkex_hash(struct rvu *rvu, int blkaddr);
+u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp,
+ u8 intf, u8 hash_idx);
+
+static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
+ .lid_lt_ld_hash_en = {
+ [NIX_INTF_RX] = {
+ [NPC_LID_LC] = {
+ [NPC_LT_LC_IP6] = {
+ false,
+ false,
+ },
+ },
+ },
+
+ [NIX_INTF_TX] = {
+ [NPC_LID_LC] = {
+ [NPC_LT_LC_IP6] = {
+ false,
+ false,
+ },
+ },
+ },
+ },
+
+ .hash = {
+ [NIX_INTF_RX] = {
+ KEX_LD_CFG_HASH(0x8ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ KEX_LD_CFG_HASH(0x18ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ },
+
+ [NIX_INTF_TX] = {
+ KEX_LD_CFG_HASH(0x8ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ KEX_LD_CFG_HASH(0x18ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ },
+ },
+
+ .hash_mask = {
+ [NIX_INTF_RX] = {
+ [0] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ [1] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ },
+
+ [NIX_INTF_TX] = {
+ [0] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ [1] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ },
+ },
+
+ .hash_ctrl = {
+ [NIX_INTF_RX] = {
+ [0] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ [1] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ },
+
+ [NIX_INTF_TX] = {
+ [0] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ [1] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ },
+ },
+};
+
+/* If exact match table support is enabled, enable drop rules */
+#define NPC_MCAM_DROP_RULE_MAX 30
+#define NPC_MCAM_SDP_DROP_RULE_IDX 0
+
+#define RVU_PFFUNC(pf, func) \
+ ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
+ (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+enum npc_exact_opc_type {
+ NPC_EXACT_OPC_MEM,
+ NPC_EXACT_OPC_CAM,
+};
+
+struct npc_exact_table_entry {
+ struct list_head list;
+ struct list_head glist;
+ u32 seq_id; /* Sequence number of entry */
+ u32 index; /* Mem table or cam table index */
+ u32 mcam_idx;
+ /* Mcam index. This is valid only if "cmd" field is false */
+ enum npc_exact_opc_type opc_type;
+ u16 chan;
+ u16 pcifunc;
+ u8 ways;
+ u8 mac[ETH_ALEN];
+ u8 ctype;
+ u8 cgx_id;
+ u8 lmac_id;
+ bool cmd; /* Is added by ethtool command ? */
+};
+
+struct npc_exact_table {
+ struct mutex lock; /* entries update lock */
+ unsigned long *id_bmap;
+ int num_drop_rules;
+ u32 tot_ids;
+ u16 cnt_cmd_rules[NPC_MCAM_DROP_RULE_MAX];
+ u16 counter_idx[NPC_MCAM_DROP_RULE_MAX];
+ bool promisc_mode[NPC_MCAM_DROP_RULE_MAX];
+ struct {
+ int ways;
+ int depth;
+ unsigned long *bmap;
+ u64 mask; // Masks before hash calculation.
+ u16 hash_mask; // 11 bits for hash mask
+ u16 hash_offset; // 11 bits offset
+ } mem_table;
+
+ struct {
+ int depth;
+ unsigned long *bmap;
+ } cam_table;
+
+ struct {
+ bool valid;
+ u16 chan_val;
+ u16 chan_mask;
+ u16 pcifunc;
+ u8 drop_rule_idx;
+ } drop_rule_map[NPC_MCAM_DROP_RULE_MAX];
+
+#define NPC_EXACT_TBL_MAX_WAYS 4
+
+ struct list_head lhead_mem_tbl_entry[NPC_EXACT_TBL_MAX_WAYS];
+ int mem_tbl_entry_cnt;
+
+ struct list_head lhead_cam_tbl_entry;
+ int cam_tbl_entry_cnt;
+
+ struct list_head lhead_gbl;
+};
+
+bool rvu_npc_exact_has_match_table(struct rvu *rvu);
+u32 rvu_npc_exact_get_max_entries(struct rvu *rvu);
+int rvu_npc_exact_init(struct rvu *rvu);
+int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
+ struct msg_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct cgx_mac_addr_update_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp);
+
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc);
+
+bool rvu_npc_exact_can_disable_feature(struct rvu *rvu);
+void rvu_npc_exact_disable_feature(struct rvu *rvu);
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc);
+u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx);
+int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc);
+int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc);
+#endif /* RVU_NPC_HASH_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
new file mode 100644
index 000000000..d46ac29ad
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "common.h"
+#include "mbox.h"
+#include "rvu.h"
+
+struct reg_range {
+ u64 start;
+ u64 end;
+};
+
+struct hw_reg_map {
+ u8 regblk;
+ u8 num_ranges;
+ u64 mask;
+#define MAX_REG_RANGES 8
+ struct reg_range range[MAX_REG_RANGES];
+};
+
+static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
+ {NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } },
+ {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
+ {0x1200, 0x12E0} } },
+ {NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
+ {0x1610, 0x1618}, {0x1700, 0x17C8} } },
+ {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17C8} } },
+ {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
+};
+
+bool rvu_check_valid_reg(int regmap, int regblk, u64 reg)
+{
+ int idx;
+ struct hw_reg_map *map;
+
+ /* Only 64bit offsets */
+ if (reg & 0x07)
+ return false;
+
+ if (regmap == TXSCHQ_HWREGMAP) {
+ if (regblk >= NIX_TXSCH_LVL_CNT)
+ return false;
+ map = &txsch_reg_map[regblk];
+ } else {
+ return false;
+ }
+
+ /* Should never happen */
+ if (map->regblk != regblk)
+ return false;
+
+ reg &= map->mask;
+
+ for (idx = 0; idx < map->num_ranges; idx++) {
+ if (reg >= map->range[idx].start &&
+ reg < map->range[idx].end)
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
new file mode 100644
index 000000000..b690e5566
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -0,0 +1,733 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef RVU_REG_H
+#define RVU_REG_H
+
+/* Admin function registers */
+#define RVU_AF_MSIXTR_BASE (0x10)
+#define RVU_AF_ECO (0x20)
+#define RVU_AF_BLK_RST (0x30)
+#define RVU_AF_PF_BAR4_ADDR (0x40)
+#define RVU_AF_RAS (0x100)
+#define RVU_AF_RAS_W1S (0x108)
+#define RVU_AF_RAS_ENA_W1S (0x110)
+#define RVU_AF_RAS_ENA_W1C (0x118)
+#define RVU_AF_GEN_INT (0x120)
+#define RVU_AF_GEN_INT_W1S (0x128)
+#define RVU_AF_GEN_INT_ENA_W1S (0x130)
+#define RVU_AF_GEN_INT_ENA_W1C (0x138)
+#define RVU_AF_AFPF_MBOX0 (0x02000)
+#define RVU_AF_AFPF_MBOX1 (0x02008)
+#define RVU_AF_AFPFX_MBOXX(a, b) (0x2000 | (a) << 4 | (b) << 3)
+#define RVU_AF_PFME_STATUS (0x2800)
+#define RVU_AF_PFTRPEND (0x2810)
+#define RVU_AF_PFTRPEND_W1S (0x2820)
+#define RVU_AF_PF_RST (0x2840)
+#define RVU_AF_HWVF_RST (0x2850)
+#define RVU_AF_PFAF_MBOX_INT (0x2880)
+#define RVU_AF_PFAF_MBOX_INT_W1S (0x2888)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1S (0x2890)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1C (0x2898)
+#define RVU_AF_PFFLR_INT (0x28a0)
+#define RVU_AF_PFFLR_INT_W1S (0x28a8)
+#define RVU_AF_PFFLR_INT_ENA_W1S (0x28b0)
+#define RVU_AF_PFFLR_INT_ENA_W1C (0x28b8)
+#define RVU_AF_PFME_INT (0x28c0)
+#define RVU_AF_PFME_INT_W1S (0x28c8)
+#define RVU_AF_PFME_INT_ENA_W1S (0x28d0)
+#define RVU_AF_PFME_INT_ENA_W1C (0x28d8)
+#define RVU_AF_PFX_BAR4_ADDR(a) (0x5000 | (a) << 4)
+#define RVU_AF_PFX_BAR4_CFG (0x5200 | (a) << 4)
+#define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4)
+#define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4)
+#define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4)
+#define RVU_AF_SMMU_ADDR_REQ (0x6000)
+#define RVU_AF_SMMU_TXN_REQ (0x6008)
+#define RVU_AF_SMMU_ADDR_RSP_STS (0x6010)
+#define RVU_AF_SMMU_ADDR_TLN (0x6018)
+#define RVU_AF_SMMU_TLN_FLIT0 (0x6020)
+
+/* Admin function's privileged PF/VF registers */
+#define RVU_PRIV_CONST (0x8000000)
+#define RVU_PRIV_GEN_CFG (0x8000010)
+#define RVU_PRIV_CLK_CFG (0x8000020)
+#define RVU_PRIV_ACTIVE_PC (0x8000030)
+#define RVU_PRIV_PFX_CFG(a) (0x8000100 | (a) << 16)
+#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110 | (a) << 16)
+#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120 | (a) << 16)
+#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200 | (a) << 16)
+#define RVU_PRIV_PFX_NIXX_CFG(a) (0x8000300 | (a) << 3)
+#define RVU_PRIV_PFX_NPA_CFG (0x8000310)
+#define RVU_PRIV_PFX_SSO_CFG (0x8000320)
+#define RVU_PRIV_PFX_SSOW_CFG (0x8000330)
+#define RVU_PRIV_PFX_TIM_CFG (0x8000340)
+#define RVU_PRIV_PFX_CPTX_CFG(a) (0x8000350 | (a) << 3)
+#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400 | (a) << 3)
+#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280 | (a) << 16)
+#define RVU_PRIV_HWVFX_NIXX_CFG(a) (0x8001300 | (a) << 3)
+#define RVU_PRIV_HWVFX_NPA_CFG (0x8001310)
+#define RVU_PRIV_HWVFX_SSO_CFG (0x8001320)
+#define RVU_PRIV_HWVFX_SSOW_CFG (0x8001330)
+#define RVU_PRIV_HWVFX_TIM_CFG (0x8001340)
+#define RVU_PRIV_HWVFX_CPTX_CFG(a) (0x8001350 | (a) << 3)
+
+/* RVU PF registers */
+#define RVU_PF_VFX_PFVF_MBOX0 (0x00000)
+#define RVU_PF_VFX_PFVF_MBOX1 (0x00008)
+#define RVU_PF_VFX_PFVF_MBOXX(a, b) (0x0 | (a) << 12 | (b) << 3)
+#define RVU_PF_VF_BAR4_ADDR (0x10)
+#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3)
+#define RVU_PF_VFME_STATUSX(a) (0x800 | (a) << 3)
+#define RVU_PF_VFTRPENDX(a) (0x820 | (a) << 3)
+#define RVU_PF_VFTRPEND_W1SX(a) (0x840 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INTX(a) (0x880 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8A0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8C0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8E0 | (a) << 3)
+#define RVU_PF_VFFLR_INTX(a) (0x900 | (a) << 3)
+#define RVU_PF_VFFLR_INT_W1SX(a) (0x920 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960 | (a) << 3)
+#define RVU_PF_VFME_INTX(a) (0x980 | (a) << 3)
+#define RVU_PF_VFME_INT_W1SX(a) (0x9A0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9C0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9E0 | (a) << 3)
+#define RVU_PF_PFAF_MBOX0 (0xC00)
+#define RVU_PF_PFAF_MBOX1 (0xC08)
+#define RVU_PF_PFAF_MBOXX(a) (0xC00 | (a) << 3)
+#define RVU_PF_INT (0xc20)
+#define RVU_PF_INT_W1S (0xc28)
+#define RVU_PF_INT_ENA_W1S (0xc30)
+#define RVU_PF_INT_ENA_W1C (0xc38)
+#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
+#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
+#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_PF_VF_MBOX_ADDR (0xC40)
+#define RVU_PF_LMTLINE_ADDR (0xC48)
+
+/* RVU VF registers */
+#define RVU_VF_VFPF_MBOX0 (0x00000)
+#define RVU_VF_VFPF_MBOX1 (0x00008)
+
+/* NPA block's admin function registers */
+#define NPA_AF_BLK_RST (0x0000)
+#define NPA_AF_CONST (0x0010)
+#define NPA_AF_CONST1 (0x0018)
+#define NPA_AF_LF_RST (0x0020)
+#define NPA_AF_GEN_CFG (0x0030)
+#define NPA_AF_NDC_CFG (0x0040)
+#define NPA_AF_INP_CTL (0x00D0)
+#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0)
+#define NPA_AF_AVG_DELAY (0x0100)
+#define NPA_AF_GEN_INT (0x0140)
+#define NPA_AF_GEN_INT_W1S (0x0148)
+#define NPA_AF_GEN_INT_ENA_W1S (0x0150)
+#define NPA_AF_GEN_INT_ENA_W1C (0x0158)
+#define NPA_AF_RVU_INT (0x0160)
+#define NPA_AF_RVU_INT_W1S (0x0168)
+#define NPA_AF_RVU_INT_ENA_W1S (0x0170)
+#define NPA_AF_RVU_INT_ENA_W1C (0x0178)
+#define NPA_AF_ERR_INT (0x0180)
+#define NPA_AF_ERR_INT_W1S (0x0188)
+#define NPA_AF_ERR_INT_ENA_W1S (0x0190)
+#define NPA_AF_ERR_INT_ENA_W1C (0x0198)
+#define NPA_AF_RAS (0x01A0)
+#define NPA_AF_RAS_W1S (0x01A8)
+#define NPA_AF_RAS_ENA_W1S (0x01B0)
+#define NPA_AF_RAS_ENA_W1C (0x01B8)
+#define NPA_AF_BP_TEST (0x0200)
+#define NPA_AF_ECO (0x0300)
+#define NPA_AF_AQ_CFG (0x0600)
+#define NPA_AF_AQ_BASE (0x0610)
+#define NPA_AF_AQ_STATUS (0x0620)
+#define NPA_AF_AQ_DOOR (0x0630)
+#define NPA_AF_AQ_DONE_WAIT (0x0640)
+#define NPA_AF_AQ_DONE (0x0650)
+#define NPA_AF_AQ_DONE_ACK (0x0660)
+#define NPA_AF_AQ_DONE_INT (0x0680)
+#define NPA_AF_AQ_DONE_INT_W1S (0x0688)
+#define NPA_AF_AQ_DONE_ENA_W1S (0x0690)
+#define NPA_AF_AQ_DONE_ENA_W1C (0x0698)
+#define NPA_AF_BATCH_CTL (0x06a0)
+#define NPA_AF_LFX_AURAS_CFG(a) (0x4000 | (a) << 18)
+#define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010 | (a) << 18)
+#define NPA_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 18)
+#define NPA_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 18)
+#define NPA_PRIV_AF_INT_CFG (0x10000)
+#define NPA_PRIV_LFX_CFG (0x10010)
+#define NPA_PRIV_LFX_INT_CFG (0x10020)
+#define NPA_AF_RVU_LF_CFG_DEBUG (0x10030)
+
+/* NIX block's admin function registers */
+#define NIX_AF_CFG (0x0000)
+#define NIX_AF_STATUS (0x0010)
+#define NIX_AF_NDC_CFG (0x0018)
+#define NIX_AF_CONST (0x0020)
+#define NIX_AF_CONST1 (0x0028)
+#define NIX_AF_CONST2 (0x0030)
+#define NIX_AF_CONST3 (0x0038)
+#define NIX_AF_SQ_CONST (0x0040)
+#define NIX_AF_CQ_CONST (0x0048)
+#define NIX_AF_RQ_CONST (0x0050)
+#define NIX_AF_PL_CONST (0x0058)
+#define NIX_AF_PSE_CONST (0x0060)
+#define NIX_AF_TL1_CONST (0x0070)
+#define NIX_AF_TL2_CONST (0x0078)
+#define NIX_AF_TL3_CONST (0x0080)
+#define NIX_AF_TL4_CONST (0x0088)
+#define NIX_AF_MDQ_CONST (0x0090)
+#define NIX_AF_MC_MIRROR_CONST (0x0098)
+#define NIX_AF_LSO_CFG (0x00A8)
+#define NIX_AF_BLK_RST (0x00B0)
+#define NIX_AF_TX_TSTMP_CFG (0x00C0)
+#define NIX_AF_PL_TS (0x00C8)
+#define NIX_AF_RX_CFG (0x00D0)
+#define NIX_AF_AVG_DELAY (0x00E0)
+#define NIX_AF_CINT_DELAY (0x00F0)
+#define NIX_AF_RX_MCAST_BASE (0x0100)
+#define NIX_AF_RX_MCAST_CFG (0x0110)
+#define NIX_AF_RX_MCAST_BUF_BASE (0x0120)
+#define NIX_AF_RX_MCAST_BUF_CFG (0x0130)
+#define NIX_AF_RX_MIRROR_BUF_BASE (0x0140)
+#define NIX_AF_RX_MIRROR_BUF_CFG (0x0148)
+#define NIX_AF_LF_RST (0x0150)
+#define NIX_AF_GEN_INT (0x0160)
+#define NIX_AF_GEN_INT_W1S (0x0168)
+#define NIX_AF_GEN_INT_ENA_W1S (0x0170)
+#define NIX_AF_GEN_INT_ENA_W1C (0x0178)
+#define NIX_AF_ERR_INT (0x0180)
+#define NIX_AF_ERR_INT_W1S (0x0188)
+#define NIX_AF_ERR_INT_ENA_W1S (0x0190)
+#define NIX_AF_ERR_INT_ENA_W1C (0x0198)
+#define NIX_AF_RAS (0x01A0)
+#define NIX_AF_RAS_W1S (0x01A8)
+#define NIX_AF_RAS_ENA_W1S (0x01B0)
+#define NIX_AF_RAS_ENA_W1C (0x01B8)
+#define NIX_AF_RVU_INT (0x01C0)
+#define NIX_AF_RVU_INT_W1S (0x01C8)
+#define NIX_AF_RVU_INT_ENA_W1S (0x01D0)
+#define NIX_AF_RVU_INT_ENA_W1C (0x01D8)
+#define NIX_AF_TCP_TIMER (0x01E0)
+#define NIX_AF_RX_DEF_ET(a) (0x01F0ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_DEF_OL2 (0x0200)
+#define NIX_AF_RX_DEF_OIP4 (0x0210)
+#define NIX_AF_RX_DEF_IIP4 (0x0220)
+#define NIX_AF_RX_DEF_VLAN0_PCP_DEI (0x0228)
+#define NIX_AF_RX_DEF_OIP6 (0x0230)
+#define NIX_AF_RX_DEF_VLAN1_PCP_DEI (0x0238)
+#define NIX_AF_RX_DEF_IIP6 (0x0240)
+#define NIX_AF_RX_DEF_OTCP (0x0250)
+#define NIX_AF_RX_DEF_ITCP (0x0260)
+#define NIX_AF_RX_DEF_OUDP (0x0270)
+#define NIX_AF_RX_DEF_IUDP (0x0280)
+#define NIX_AF_RX_DEF_OSCTP (0x0290)
+#define NIX_AF_RX_DEF_CST_APAD0 (0x0298)
+#define NIX_AF_RX_DEF_ISCTP (0x02A0)
+#define NIX_AF_RX_DEF_IPSECX (0x02B0)
+#define NIX_AF_RX_DEF_CST_APAD1 (0x02A8)
+#define NIX_AF_RX_DEF_IIP4_DSCP (0x02E0)
+#define NIX_AF_RX_DEF_OIP4_DSCP (0x02E8)
+#define NIX_AF_RX_DEF_IIP6_DSCP (0x02F0)
+#define NIX_AF_RX_DEF_OIP6_DSCP (0x02F8)
+#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
+#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
+#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3)
+#define NIX_AF_NDC_TX_SYNC (0x03F0)
+#define NIX_AF_AQ_CFG (0x0400)
+#define NIX_AF_AQ_BASE (0x0410)
+#define NIX_AF_AQ_STATUS (0x0420)
+#define NIX_AF_AQ_DOOR (0x0430)
+#define NIX_AF_AQ_DONE_WAIT (0x0440)
+#define NIX_AF_AQ_DONE (0x0450)
+#define NIX_AF_AQ_DONE_ACK (0x0460)
+#define NIX_AF_AQ_DONE_TIMER (0x0470)
+#define NIX_AF_AQ_DONE_INT (0x0480)
+#define NIX_AF_AQ_DONE_INT_W1S (0x0488)
+#define NIX_AF_AQ_DONE_ENA_W1S (0x0490)
+#define NIX_AF_AQ_DONE_ENA_W1C (0x0498)
+#define NIX_AF_RX_LINKX_SLX_SPKT_CNT (0x0500)
+#define NIX_AF_RX_LINKX_SLX_SXQE_CNT (0x0510)
+#define NIX_AF_RX_MCAST_JOBSX_SW_CNT (0x0520)
+#define NIX_AF_RX_MIRROR_JOBSX_SW_CNT (0x0530)
+#define NIX_AF_RX_LINKX_CFG(a) (0x0540 | (a) << 16)
+#define NIX_AF_RX_SW_SYNC (0x0550)
+#define NIX_AF_RX_SW_SYNC_DONE (0x0560)
+#define NIX_AF_SEB_ECO (0x0600)
+#define NIX_AF_SEB_TEST_BP (0x0610)
+#define NIX_AF_NORM_TX_FIFO_STATUS (0x0620)
+#define NIX_AF_EXPR_TX_FIFO_STATUS (0x0630)
+#define NIX_AF_SDP_TX_FIFO_STATUS (0x0640)
+#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660)
+#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670)
+#define NIX_AF_SEB_CFG (0x05F0)
+#define NIX_PTP_1STEP_EN BIT_ULL(2)
+
+#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
+#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_SQM_DBG_CTL_STATUS (0x750)
+#define NIX_AF_DWRR_SDP_MTU (0x790)
+#define NIX_AF_DWRR_RPM_MTU (0x7A0)
+#define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
+#define NIX_AF_PSE_SHAPER_CFG (0x810)
+#define NIX_AF_TX_EXPR_CREDIT (0x830)
+#define NIX_AF_MARK_FORMATX_CTL(a) (0x900 | (a) << 18)
+#define NIX_AF_TX_LINKX_NORM_CREDIT(a) (0xA00 | (a) << 16)
+#define NIX_AF_TX_LINKX_EXPR_CREDIT(a) (0xA10 | (a) << 16)
+#define NIX_AF_TX_LINKX_SW_XOFF(a) (0xA20 | (a) << 16)
+#define NIX_AF_TX_LINKX_HW_XOFF(a) (0xA30 | (a) << 16)
+#define NIX_AF_SDP_LINK_CREDIT (0xa40)
+#define NIX_AF_SDP_SW_XOFFX(a) (0xA60 | (a) << 3)
+#define NIX_AF_SDP_HW_XOFFX(a) (0xAC0 | (a) << 3)
+#define NIX_AF_TL4X_BP_STATUS(a) (0xB00 | (a) << 16)
+#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
+#define NIX_AF_TL1X_SHAPE(a) (0xC10 | (a) << 16)
+#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
+#define NIX_AF_TL1X_SHAPE_STATE(a) (0xC50 | (a) << 16)
+#define NIX_AF_TL1X_SW_XOFF(a) (0xC70 | (a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
+#define NIX_AF_TL1X_GREEN(a) (0xC90 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW(a) (0xCA0 | (a) << 16)
+#define NIX_AF_TL1X_RED(a) (0xCB0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG0(a) (0xCC0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG1(a) (0xCC8 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG2(a) (0xCD0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG3(a) (0xCD8 | (a) << 16)
+#define NIX_AF_TL1A_DEBUG (0xce0)
+#define NIX_AF_TL1B_DEBUG (0xcf0)
+#define NIX_AF_TL1_DEBUG_GREEN (0xd00)
+#define NIX_AF_TL1_DEBUG_NODE (0xd10)
+#define NIX_AF_TL1X_DROPPED_PACKETS(a) (0xD20 | (a) << 16)
+#define NIX_AF_TL1X_DROPPED_BYTES(a) (0xD30 | (a) << 16)
+#define NIX_AF_TL1X_RED_PACKETS(a) (0xD40 | (a) << 16)
+#define NIX_AF_TL1X_RED_BYTES(a) (0xD50 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW_PACKETS(a) (0xD60 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW_BYTES(a) (0xD70 | (a) << 16)
+#define NIX_AF_TL1X_GREEN_PACKETS(a) (0xD80 | (a) << 16)
+#define NIX_AF_TL1X_GREEN_BYTES(a) (0xD90 | (a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
+#define NIX_AF_TL2X_SHAPE(a) (0xE10 | (a) << 16)
+#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16)
+#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16)
+#define NIX_AF_TL2X_SCHED_STATE(a) (0xE40 | (a) << 16)
+#define NIX_AF_TL2X_SHAPE_STATE(a) (0xE50 | (a) << 16)
+#define NIX_AF_TL2X_POINTERS(a) (0xE60 | (a) << 16)
+#define NIX_AF_TL2X_SW_XOFF(a) (0xE70 | (a) << 16)
+#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16)
+#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
+#define NIX_AF_TL2X_GREEN(a) (0xE90 | (a) << 16)
+#define NIX_AF_TL2X_YELLOW(a) (0xEA0 | (a) << 16)
+#define NIX_AF_TL2X_RED(a) (0xEB0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG0(a) (0xEC0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG1(a) (0xEC8 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG2(a) (0xED0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG3(a) (0xED8 | (a) << 16)
+#define NIX_AF_TL2A_DEBUG (0xee0)
+#define NIX_AF_TL2B_DEBUG (0xef0)
+#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
+#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16)
+#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16)
+#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16)
+#define NIX_AF_TL3X_SCHED_STATE(a) (0x1040 | (a) << 16)
+#define NIX_AF_TL3X_SHAPE_STATE(a) (0x1050 | (a) << 16)
+#define NIX_AF_TL3X_POINTERS(a) (0x1060 | (a) << 16)
+#define NIX_AF_TL3X_SW_XOFF(a) (0x1070 | (a) << 16)
+#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16)
+#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
+#define NIX_AF_TL3X_GREEN(a) (0x1090 | (a) << 16)
+#define NIX_AF_TL3X_YELLOW(a) (0x10A0 | (a) << 16)
+#define NIX_AF_TL3X_RED(a) (0x10B0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG0(a) (0x10C0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG1(a) (0x10C8 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG2(a) (0x10D0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG3(a) (0x10D8 | (a) << 16)
+#define NIX_AF_TL3A_DEBUG (0x10e0)
+#define NIX_AF_TL3B_DEBUG (0x10f0)
+#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
+#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16)
+#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16)
+#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
+#define NIX_AF_TL4X_SCHED_STATE(a) (0x1240 | (a) << 16)
+#define NIX_AF_TL4X_SHAPE_STATE(a) (0x1250 | (a) << 16)
+#define NIX_AF_TL4X_POINTERS(a) (0x1260 | (a) << 16)
+#define NIX_AF_TL4X_SW_XOFF(a) (0x1270 | (a) << 16)
+#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16)
+#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
+#define NIX_AF_TL4X_GREEN(a) (0x1290 | (a) << 16)
+#define NIX_AF_TL4X_YELLOW(a) (0x12A0 | (a) << 16)
+#define NIX_AF_TL4X_RED(a) (0x12B0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG0(a) (0x12C0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG1(a) (0x12C8 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG2(a) (0x12D0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG3(a) (0x12D8 | (a) << 16)
+#define NIX_AF_TL4A_DEBUG (0x12e0)
+#define NIX_AF_TL4B_DEBUG (0x12f0)
+#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
+#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16)
+#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16)
+#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16)
+#define NIX_AF_MDQX_SCHED_STATE(a) (0x1440 | (a) << 16)
+#define NIX_AF_MDQX_SHAPE_STATE(a) (0x1450 | (a) << 16)
+#define NIX_AF_MDQX_POINTERS(a) (0x1460 | (a) << 16)
+#define NIX_AF_MDQX_SW_XOFF(a) (0x1470 | (a) << 16)
+#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
+#define NIX_AF_MDQX_MD_DEBUG(a) (0x14C0 | (a) << 16)
+#define NIX_AF_MDQX_PTR_FIFO(a) (0x14D0 | (a) << 16)
+#define NIX_AF_MDQA_DEBUG (0x14e0)
+#define NIX_AF_MDQB_DEBUG (0x14f0)
+#define NIX_AF_TL3_TL2X_CFG(a) (0x1600 | (a) << 18)
+#define NIX_AF_TL3_TL2X_BP_STATUS(a) (0x1610 | (a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
+#define NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(a, b) (0x1800 | (a) << 18 | (b) << 3)
+#define NIX_AF_TX_MCASTX(a) (0x1900 | (a) << 15)
+#define NIX_AF_TX_VTAG_DEFX_CTL(a) (0x1A00 | (a) << 16)
+#define NIX_AF_TX_VTAG_DEFX_DATA(a) (0x1A10 | (a) << 16)
+#define NIX_AF_RX_BPIDX_STATUS(a) (0x1A20 | (a) << 17)
+#define NIX_AF_RX_CHANX_CFG(a) (0x1A30 | (a) << 15)
+#define NIX_AF_CINT_TIMERX(a) (0x1A40 | (a) << 18)
+#define NIX_AF_LSO_FORMATX_FIELDX(a, b) (0x1B00 | (a) << 16 | (b) << 3)
+#define NIX_AF_LFX_CFG(a) (0x4000 | (a) << 17)
+#define NIX_AF_LFX_SQS_CFG(a) (0x4020 | (a) << 17)
+#define NIX_AF_LFX_TX_CFG2(a) (0x4028 | (a) << 17)
+#define NIX_AF_LFX_SQS_BASE(a) (0x4030 | (a) << 17)
+#define NIX_AF_LFX_RQS_CFG(a) (0x4040 | (a) << 17)
+#define NIX_AF_LFX_RQS_BASE(a) (0x4050 | (a) << 17)
+#define NIX_AF_LFX_CQS_CFG(a) (0x4060 | (a) << 17)
+#define NIX_AF_LFX_CQS_BASE(a) (0x4070 | (a) << 17)
+#define NIX_AF_LFX_TX_CFG(a) (0x4080 | (a) << 17)
+#define NIX_AF_LFX_TX_PARSE_CFG(a) (0x4090 | (a) << 17)
+#define NIX_AF_LFX_RX_CFG(a) (0x40A0 | (a) << 17)
+#define NIX_AF_LFX_RSS_CFG(a) (0x40C0 | (a) << 17)
+#define NIX_AF_LFX_RSS_BASE(a) (0x40D0 | (a) << 17)
+#define NIX_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 17)
+#define NIX_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 17)
+#define NIX_AF_LFX_CINTS_CFG(a) (0x4120 | (a) << 17)
+#define NIX_AF_LFX_CINTS_BASE(a) (0x4130 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG0(a) (0x4140 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG1(a) (0x4148 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_CFG(a) (0x4150 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a) (0x4158 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_SA_BASE(a) (0x4170 | (a) << 17)
+#define NIX_AF_LFX_TX_STATUS(a) (0x4180 | (a) << 17)
+#define NIX_AF_LFX_RX_VTAG_TYPEX(a, b) (0x4200 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_LOCKX(a, b) (0x4300 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_TX_STATX(a, b) (0x4400 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_RX_STATX(a, b) (0x4500 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_RSS_GRPX(a, b) (0x4600 | (a) << 17 | (b) << 3)
+#define NIX_AF_RX_NPC_MC_RCV (0x4700)
+#define NIX_AF_RX_NPC_MC_DROP (0x4710)
+#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720)
+#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730)
+#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
+#define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17)
+
+#define NIX_PRIV_AF_INT_CFG (0x8000000)
+#define NIX_PRIV_LFX_CFG (0x8000010)
+#define NIX_PRIV_LFX_INT_CFG (0x8000020)
+#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030)
+
+#define NIX_AF_LINKX_BASE_MASK GENMASK_ULL(11, 0)
+#define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16)
+#define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32)
+
+/* SSO */
+#define SSO_AF_CONST (0x1000)
+#define SSO_AF_CONST1 (0x1008)
+#define SSO_AF_BLK_RST (0x10f8)
+#define SSO_AF_LF_HWGRP_RST (0x10e0)
+#define SSO_AF_RVU_LF_CFG_DEBUG (0x3800)
+#define SSO_PRIV_LFX_HWGRP_CFG (0x10000)
+#define SSO_PRIV_LFX_HWGRP_INT_CFG (0x20000)
+
+/* SSOW */
+#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG (0x0010)
+#define SSOW_AF_LF_HWS_RST (0x0030)
+#define SSOW_PRIV_LFX_HWS_CFG (0x1000)
+#define SSOW_PRIV_LFX_HWS_INT_CFG (0x2000)
+
+/* TIM */
+#define TIM_AF_CONST (0x90)
+#define TIM_PRIV_LFX_CFG (0x20000)
+#define TIM_PRIV_LFX_INT_CFG (0x24000)
+#define TIM_AF_RVU_LF_CFG_DEBUG (0x30000)
+#define TIM_AF_BLK_RST (0x10)
+#define TIM_AF_LF_RST (0x20)
+
+/* CPT */
+#define CPT_AF_CONSTANTS0 (0x0000)
+#define CPT_AF_CONSTANTS1 (0x1000)
+#define CPT_AF_DIAG (0x3000)
+#define CPT_AF_ECO (0x4000)
+#define CPT_AF_FLTX_INT(a) (0xa000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_W1S(a) (0xb000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_ENA_W1C(a) (0xc000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_ENA_W1S(a) (0xd000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_EXE(a) (0xe000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_EXE_W1S(a) (0xf000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_LF(a) (0x10000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_LF_W1S(a) (0x11000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_CTL2(a) (0x12000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_STS(a) (0x13000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_ERR_INFO (0x14000)
+#define CPT_AF_EXEX_ACTIVE(a) (0x16000ull | (u64)(a) << 3)
+#define CPT_AF_INST_REQ_PC (0x17000)
+#define CPT_AF_INST_LATENCY_PC (0x18000)
+#define CPT_AF_RD_REQ_PC (0x19000)
+#define CPT_AF_RD_LATENCY_PC (0x1a000)
+#define CPT_AF_RD_UC_PC (0x1b000)
+#define CPT_AF_ACTIVE_CYCLES_PC (0x1c000)
+#define CPT_AF_EXE_DBG_CTL (0x1d000)
+#define CPT_AF_EXE_DBG_DATA (0x1e000)
+#define CPT_AF_EXE_REQ_TIMER (0x1f000)
+#define CPT_AF_EXEX_CTL(a) (0x20000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_PERF_CTL (0x21000)
+#define CPT_AF_EXE_DBG_CNTX(a) (0x22000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_PERF_EVENT_CNT (0x23000)
+#define CPT_AF_EXE_EPCI_INBX_CNT(a) (0x24000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_EPCI_OUTBX_CNT(a) (0x25000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_UCODE_BASE(a) (0x26000ull | (u64)(a) << 3)
+#define CPT_AF_LFX_CTL(a) (0x27000ull | (u64)(a) << 3)
+#define CPT_AF_LFX_CTL2(a) (0x29000ull | (u64)(a) << 3)
+#define CPT_AF_CPTCLK_CNT (0x2a000)
+#define CPT_AF_PF_FUNC (0x2b000)
+#define CPT_AF_LFX_PTR_CTL(a) (0x2c000ull | (u64)(a) << 3)
+#define CPT_AF_GRPX_THR(a) (0x2d000ull | (u64)(a) << 3)
+#define CPT_AF_CTL (0x2e000ull)
+#define CPT_AF_XEX_THR(a) (0x2f000ull | (u64)(a) << 3)
+#define CPT_PRIV_LFX_CFG (0x41000)
+#define CPT_PRIV_AF_INT_CFG (0x42000)
+#define CPT_PRIV_LFX_INT_CFG (0x43000)
+#define CPT_AF_LF_RST (0x44000)
+#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000)
+#define CPT_AF_BLK_RST (0x46000)
+#define CPT_AF_RVU_INT (0x47000)
+#define CPT_AF_RVU_INT_W1S (0x47008)
+#define CPT_AF_RVU_INT_ENA_W1S (0x47010)
+#define CPT_AF_RVU_INT_ENA_W1C (0x47018)
+#define CPT_AF_RAS_INT (0x47020)
+#define CPT_AF_RAS_INT_W1S (0x47028)
+#define CPT_AF_RAS_INT_ENA_W1S (0x47030)
+#define CPT_AF_RAS_INT_ENA_W1C (0x47038)
+#define CPT_AF_CTX_FLUSH_TIMER (0x48000ull)
+#define CPT_AF_CTX_ERR (0x48008ull)
+#define CPT_AF_CTX_ENC_ID (0x48010ull)
+#define CPT_AF_CTX_MIS_PC (0x49400ull)
+#define CPT_AF_CTX_HIT_PC (0x49408ull)
+#define CPT_AF_CTX_AOP_PC (0x49410ull)
+#define CPT_AF_CTX_AOP_LATENCY_PC (0x49418ull)
+#define CPT_AF_CTX_IFETCH_PC (0x49420ull)
+#define CPT_AF_CTX_IFETCH_LATENCY_PC (0x49428ull)
+#define CPT_AF_CTX_FFETCH_PC (0x49430ull)
+#define CPT_AF_CTX_FFETCH_LATENCY_PC (0x49438ull)
+#define CPT_AF_CTX_WBACK_PC (0x49440ull)
+#define CPT_AF_CTX_WBACK_LATENCY_PC (0x49448ull)
+#define CPT_AF_CTX_PSH_PC (0x49450ull)
+#define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull)
+#define CPT_AF_CTX_CAM_DATA(a) (0x49800ull | (u64)(a) << 3)
+#define CPT_AF_RXC_TIME (0x50010ull)
+#define CPT_AF_RXC_TIME_CFG (0x50018ull)
+#define CPT_AF_RXC_DFRG (0x50020ull)
+#define CPT_AF_RXC_ACTIVE_STS (0x50028ull)
+#define CPT_AF_RXC_ZOMBIE_STS (0x50030ull)
+#define CPT_AF_X2PX_LINK_CFG(a) (0x51000ull | (u64)(a) << 3)
+
+#define AF_BAR2_ALIASX(a, b) (0x9100000ull | (a) << 12 | (b))
+#define CPT_AF_BAR2_SEL 0x9000000
+#define CPT_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+
+#define CPT_AF_LF_CTL2_SHIFT 3
+#define CPT_AF_LF_SSO_PF_FUNC_SHIFT 32
+
+#define CPT_LF_CTL 0x10
+#define CPT_LF_INPROG 0x40
+#define CPT_LF_Q_GRP_PTR 0x120
+#define CPT_LF_CTX_FLUSH 0x510
+
+#define NPC_AF_BLK_RST (0x00040)
+
+/* NPC */
+#define NPC_AF_CFG (0x00000)
+#define NPC_AF_ACTIVE_PC (0x00010)
+#define NPC_AF_CONST (0x00020)
+#define NPC_AF_CONST1 (0x00030)
+#define NPC_AF_BLK_RST (0x00040)
+#define NPC_AF_MCAM_SCRUB_CTL (0x000a0)
+#define NPC_AF_KCAM_SCRUB_CTL (0x000b0)
+#define NPC_AF_CONST2 (0x00100)
+#define NPC_AF_CONST3 (0x00110)
+#define NPC_AF_KPUX_CFG(a) (0x00500 | (a) << 3)
+#define NPC_AF_PCK_CFG (0x00600)
+#define NPC_AF_PCK_DEF_OL2 (0x00610)
+#define NPC_AF_PCK_DEF_OIP4 (0x00620)
+#define NPC_AF_PCK_DEF_OIP6 (0x00630)
+#define NPC_AF_PCK_DEF_IIP4 (0x00640)
+#define NPC_AF_INTFX_HASHX_RESULT_CTRL(a, b) (0x006c0 | (a) << 4 | (b) << 3)
+#define NPC_AF_INTFX_HASHX_MASKX(a, b, c) (0x00700 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NPC_AF_KEX_LDATAX_FLAGS_CFG(a) (0x00800 | (a) << 3)
+#define NPC_AF_INTFX_HASHX_CFG(a, b) (0x00b00 | (a) << 6 | (b) << 4)
+#define NPC_AF_INTFX_SECRET_KEY0(a) (0x00e00 | (a) << 3)
+#define NPC_AF_INTFX_SECRET_KEY1(a) (0x00e20 | (a) << 3)
+#define NPC_AF_INTFX_SECRET_KEY2(a) (0x00e40 | (a) << 3)
+#define NPC_AF_INTFX_KEX_CFG(a) (0x01010 | (a) << 8)
+#define NPC_AF_PKINDX_ACTION0(a) (0x80000ull | (a) << 6)
+#define NPC_AF_PKINDX_ACTION1(a) (0x80008ull | (a) << 6)
+#define NPC_AF_PKINDX_CPI_DEFX(a, b) (0x80020ull | (a) << 6 | (b) << 3)
+#define NPC_AF_KPUX_ENTRYX_CAMX(a, b, c) \
+ (0x100000 | (a) << 14 | (b) << 6 | (c) << 3)
+#define NPC_AF_KPUX_ENTRYX_ACTION0(a, b) \
+ (0x100020 | (a) << 14 | (b) << 6)
+#define NPC_AF_KPUX_ENTRYX_ACTION1(a, b) \
+ (0x100028 | (a) << 14 | (b) << 6)
+#define NPC_AF_KPUX_ENTRY_DISX(a, b) (0x180000 | (a) << 6 | (b) << 3)
+#define NPC_AF_CPIX_CFG(a) (0x200000 | (a) << 3)
+#define NPC_AF_INTFX_LIDX_LTX_LDX_CFG(a, b, c, d) \
+ (0x900000 | (a) << 16 | (b) << 12 | (c) << 5 | (d) << 3)
+#define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \
+ (0x980000 | (a) << 16 | (b) << 12 | (c) << 3)
+#define NPC_AF_INTFX_MISS_STAT_ACT(a) (0x1880040 + (a) * 0x8)
+#define NPC_AF_INTFX_MISS_ACT(a) (0x1a00000 | (a) << 4)
+#define NPC_AF_INTFX_MISS_TAG_ACT(a) (0x1b00008 | (a) << 4)
+#define NPC_AF_MCAM_BANKX_HITX(a, b) (0x1c80000 | (a) << 8 | (b) << 4)
+#define NPC_AF_LKUP_CTL (0x2000000)
+#define NPC_AF_LKUP_DATAX(a) (0x2000200 | (a) << 4)
+#define NPC_AF_LKUP_RESULTX(a) (0x2000400 | (a) << 4)
+#define NPC_AF_INTFX_STAT(a) (0x2000800 | (a) << 4)
+#define NPC_AF_DBG_CTL (0x3000000)
+#define NPC_AF_DBG_STATUS (0x3000010)
+#define NPC_AF_KPUX_DBG(a) (0x3000020 | (a) << 8)
+#define NPC_AF_IKPU_ERR_CTL (0x3000080)
+#define NPC_AF_KPUX_ERR_CTL(a) (0x30000a0 | (a) << 8)
+#define NPC_AF_MCAM_DBG (0x3001000)
+#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
+#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
+
+#define NPC_AF_EXACT_MEM_ENTRY(a, b) (0x300000 | (a) << 15 | (b) << 3)
+#define NPC_AF_EXACT_CAM_ENTRY(a) (0xC00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_MASK(a) (0x660 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_RESULT_CTL(a)(0x680 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_CFG(a) (0xA00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET0(a) (0xE00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET1(a) (0xE20 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET2(a) (0xE40 | (a) << 3)
+
+#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000000ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000010ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000020ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CFG(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1800000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000038ull | (a) << 8 | (b) << 22); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1900000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000040ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1900008ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000048ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1880000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000050ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MATCH_STATX(a) ({ \
+ u64 offset; \
+ \
+ offset = (0x1880008ull | (a) << 8); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000078ull | (a) << 8); \
+ offset; }) \
+
+/* NDC */
+#define NDC_AF_CONST (0x00000)
+#define NDC_AF_CLK_EN (0x00020)
+#define NDC_AF_CTL (0x00030)
+#define NDC_AF_BANK_CTL (0x00040)
+#define NDC_AF_BANK_CTL_DONE (0x00048)
+#define NDC_AF_INTR (0x00058)
+#define NDC_AF_INTR_W1S (0x00060)
+#define NDC_AF_INTR_ENA_W1S (0x00068)
+#define NDC_AF_INTR_ENA_W1C (0x00070)
+#define NDC_AF_ACTIVE_PC (0x00078)
+#define NDC_AF_CAMS_RD_INTERVAL (0x00080)
+#define NDC_AF_BP_TEST_ENABLE (0x001F8)
+#define NDC_AF_BP_TEST(a) (0x00200 | (a) << 3)
+#define NDC_AF_BLK_RST (0x002F0)
+#define NDC_PRIV_AF_INT_CFG (0x002F8)
+#define NDC_AF_HASHX(a) (0x00300 | (a) << 3)
+#define NDC_AF_PORTX_RTX_RWX_REQ_PC(a, b, c) \
+ (0x00C00 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NDC_AF_PORTX_RTX_RWX_OSTDN_PC(a, b, c) \
+ (0x00D00 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NDC_AF_PORTX_RTX_RWX_LAT_PC(a, b, c) \
+ (0x00E00 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NDC_AF_PORTX_RTX_CANT_ALLOC_PC(a, b) \
+ (0x00F00 | (a) << 5 | (b) << 4)
+#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3)
+#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3)
+#define NDC_AF_BANKX_LINEX_METADATA(a, b) \
+ (0x10000 | (a) << 12 | (b) << 3)
+
+/* LBK */
+#define LBK_CONST (0x10ull)
+#define LBK_LINK_CFG_P2X (0x400ull)
+#define LBK_LINK_CFG_X2P (0x408ull)
+#define LBK_CONST_CHANS GENMASK_ULL(47, 32)
+#define LBK_CONST_DST GENMASK_ULL(31, 28)
+#define LBK_CONST_SRC GENMASK_ULL(27, 24)
+#define LBK_CONST_BUF_SIZE GENMASK_ULL(23, 0)
+#define LBK_LINK_CFG_RANGE_MASK GENMASK_ULL(19, 16)
+#define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6)
+#define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0)
+
+/* APR */
+#define APR_AF_LMT_CFG (0x000ull)
+#define APR_AF_LMT_MAP_BASE (0x008ull)
+#define APR_AF_LMT_CTL (0x010ull)
+#define APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT 23
+#define APR_LMT_MAP_ENT_SCH_ENA_SHIFT 22
+#define APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT 21
+
+#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
new file mode 100644
index 000000000..ae50d5625
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include "rvu.h"
+
+/* SDP PF device id */
+#define PCI_DEVID_OTX2_SDP_PF 0xA0F6
+
+/* Maximum SDP blocks in a chip */
+#define MAX_SDP 2
+
+/* SDP PF number */
+static int sdp_pf_num[MAX_SDP] = {-1, -1};
+
+bool is_sdp_pfvf(u16 pcifunc)
+{
+ u16 pf = rvu_get_pf(pcifunc);
+ u32 found = 0, i = 0;
+
+ while (i < MAX_SDP) {
+ if (pf == sdp_pf_num[i])
+ found = 1;
+ i++;
+ }
+
+ if (!found)
+ return false;
+
+ return true;
+}
+
+bool is_sdp_pf(u16 pcifunc)
+{
+ return (is_sdp_pfvf(pcifunc) &&
+ !(pcifunc & RVU_PFVF_FUNC_MASK));
+}
+
+bool is_sdp_vf(u16 pcifunc)
+{
+ return (is_sdp_pfvf(pcifunc) &&
+ !!(pcifunc & RVU_PFVF_FUNC_MASK));
+}
+
+int rvu_sdp_init(struct rvu *rvu)
+{
+ struct pci_dev *pdev = NULL;
+ struct rvu_pfvf *pfvf;
+ u32 i = 0;
+
+ while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OTX2_SDP_PF,
+ pdev)) != NULL) {
+ /* The RVU PF number is one less than bus number */
+ sdp_pf_num[i] = pdev->bus->number - 1;
+ pfvf = &rvu->pf[sdp_pf_num[i]];
+
+ pfvf->sdp_info = devm_kzalloc(rvu->dev,
+ sizeof(struct sdp_node_info),
+ GFP_KERNEL);
+ if (!pfvf->sdp_info) {
+ pci_dev_put(pdev);
+ return -ENOMEM;
+ }
+
+ dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]);
+
+ i++;
+ }
+
+ pci_dev_put(pdev);
+
+ return 0;
+}
+
+int
+rvu_mbox_handler_set_sdp_chan_info(struct rvu *rvu,
+ struct sdp_chan_info_msg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ memcpy(pfvf->sdp_info, &req->info, sizeof(struct sdp_node_info));
+ dev_info(rvu->dev, "AF: SDP%d max_vfs %d num_pf_rings %d pf_srn %d\n",
+ req->info.node_id, req->info.max_vfs, req->info.num_pf_rings,
+ req->info.pf_srn);
+ return 0;
+}
+
+int
+rvu_mbox_handler_get_sdp_chan_info(struct rvu *rvu, struct msg_req *req,
+ struct sdp_get_chan_info_msg *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr;
+
+ if (!hw->cap.programmable_chans) {
+ rsp->chan_base = NIX_CHAN_SDP_CH_START;
+ rsp->num_chan = NIX_CHAN_SDP_NUM_CHANS;
+ } else {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ rsp->chan_base = hw->sdp_chan_base;
+ rsp->num_chan = rvu_read64(rvu, blkaddr, NIX_AF_CONST1) & 0xFFFUL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
new file mode 100644
index 000000000..edc9367b1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -0,0 +1,823 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef RVU_STRUCT_H
+#define RVU_STRUCT_H
+
+/* RVU Block revision IDs */
+#define RVU_BLK_RVUM_REVID 0x01
+
+#define RVU_MULTI_BLK_VER 0x7ULL
+
+/* RVU Block Address Enumeration */
+enum rvu_block_addr_e {
+ BLKADDR_RVUM = 0x0ULL,
+ BLKADDR_LMT = 0x1ULL,
+ BLKADDR_MSIX = 0x2ULL,
+ BLKADDR_NPA = 0x3ULL,
+ BLKADDR_NIX0 = 0x4ULL,
+ BLKADDR_NIX1 = 0x5ULL,
+ BLKADDR_NPC = 0x6ULL,
+ BLKADDR_SSO = 0x7ULL,
+ BLKADDR_SSOW = 0x8ULL,
+ BLKADDR_TIM = 0x9ULL,
+ BLKADDR_CPT0 = 0xaULL,
+ BLKADDR_CPT1 = 0xbULL,
+ BLKADDR_NDC_NIX0_RX = 0xcULL,
+ BLKADDR_NDC_NIX0_TX = 0xdULL,
+ BLKADDR_NDC_NPA0 = 0xeULL,
+ BLKADDR_NDC_NIX1_RX = 0x10ULL,
+ BLKADDR_NDC_NIX1_TX = 0x11ULL,
+ BLKADDR_APR = 0x16ULL,
+ BLK_COUNT = 0x17ULL,
+};
+
+/* RVU Block Type Enumeration */
+enum rvu_block_type_e {
+ BLKTYPE_RVUM = 0x0,
+ BLKTYPE_MSIX = 0x1,
+ BLKTYPE_LMT = 0x2,
+ BLKTYPE_NIX = 0x3,
+ BLKTYPE_NPA = 0x4,
+ BLKTYPE_NPC = 0x5,
+ BLKTYPE_SSO = 0x6,
+ BLKTYPE_SSOW = 0x7,
+ BLKTYPE_TIM = 0x8,
+ BLKTYPE_CPT = 0x9,
+ BLKTYPE_NDC = 0xa,
+ BLKTYPE_MAX = 0xa,
+};
+
+/* RVU Admin function Interrupt Vector Enumeration */
+enum rvu_af_int_vec_e {
+ RVU_AF_INT_VEC_POISON = 0x0,
+ RVU_AF_INT_VEC_PFFLR = 0x1,
+ RVU_AF_INT_VEC_PFME = 0x2,
+ RVU_AF_INT_VEC_GEN = 0x3,
+ RVU_AF_INT_VEC_MBOX = 0x4,
+ RVU_AF_INT_VEC_CNT = 0x5,
+};
+
+/* CPT Admin function Interrupt Vector Enumeration */
+enum cpt_af_int_vec_e {
+ CPT_AF_INT_VEC_FLT0 = 0x0,
+ CPT_AF_INT_VEC_FLT1 = 0x1,
+ CPT_AF_INT_VEC_RVU = 0x2,
+ CPT_AF_INT_VEC_RAS = 0x3,
+ CPT_AF_INT_VEC_CNT = 0x4,
+};
+
+enum cpt_10k_af_int_vec_e {
+ CPT_10K_AF_INT_VEC_FLT0 = 0x0,
+ CPT_10K_AF_INT_VEC_FLT1 = 0x1,
+ CPT_10K_AF_INT_VEC_FLT2 = 0x2,
+ CPT_10K_AF_INT_VEC_RVU = 0x3,
+ CPT_10K_AF_INT_VEC_RAS = 0x4,
+ CPT_10K_AF_INT_VEC_CNT = 0x5,
+};
+
+/* NPA Admin function Interrupt Vector Enumeration */
+enum npa_af_int_vec_e {
+ NPA_AF_INT_VEC_RVU = 0x0,
+ NPA_AF_INT_VEC_GEN = 0x1,
+ NPA_AF_INT_VEC_AQ_DONE = 0x2,
+ NPA_AF_INT_VEC_AF_ERR = 0x3,
+ NPA_AF_INT_VEC_POISON = 0x4,
+ NPA_AF_INT_VEC_CNT = 0x5,
+};
+
+/* NIX Admin function Interrupt Vector Enumeration */
+enum nix_af_int_vec_e {
+ NIX_AF_INT_VEC_RVU = 0x0,
+ NIX_AF_INT_VEC_GEN = 0x1,
+ NIX_AF_INT_VEC_AQ_DONE = 0x2,
+ NIX_AF_INT_VEC_AF_ERR = 0x3,
+ NIX_AF_INT_VEC_POISON = 0x4,
+ NIX_AF_INT_VEC_CNT = 0x5,
+};
+
+/**
+ * RVU PF Interrupt Vector Enumeration
+ */
+enum rvu_pf_int_vec_e {
+ RVU_PF_INT_VEC_VFFLR0 = 0x0,
+ RVU_PF_INT_VEC_VFFLR1 = 0x1,
+ RVU_PF_INT_VEC_VFME0 = 0x2,
+ RVU_PF_INT_VEC_VFME1 = 0x3,
+ RVU_PF_INT_VEC_VFPF_MBOX0 = 0x4,
+ RVU_PF_INT_VEC_VFPF_MBOX1 = 0x5,
+ RVU_PF_INT_VEC_AFPF_MBOX = 0x6,
+ RVU_PF_INT_VEC_CNT = 0x7,
+};
+
+/* NPA admin queue completion enumeration */
+enum npa_aq_comp {
+ NPA_AQ_COMP_NOTDONE = 0x0,
+ NPA_AQ_COMP_GOOD = 0x1,
+ NPA_AQ_COMP_SWERR = 0x2,
+ NPA_AQ_COMP_CTX_POISON = 0x3,
+ NPA_AQ_COMP_CTX_FAULT = 0x4,
+ NPA_AQ_COMP_LOCKERR = 0x5,
+};
+
+/* NPA admin queue context types */
+enum npa_aq_ctype {
+ NPA_AQ_CTYPE_AURA = 0x0,
+ NPA_AQ_CTYPE_POOL = 0x1,
+};
+
+/* NPA admin queue instruction opcodes */
+enum npa_aq_instop {
+ NPA_AQ_INSTOP_NOP = 0x0,
+ NPA_AQ_INSTOP_INIT = 0x1,
+ NPA_AQ_INSTOP_WRITE = 0x2,
+ NPA_AQ_INSTOP_READ = 0x3,
+ NPA_AQ_INSTOP_LOCK = 0x4,
+ NPA_AQ_INSTOP_UNLOCK = 0x5,
+};
+
+/* ALLOC/FREE input queues Enumeration from coprocessors */
+enum npa_inpq {
+ NPA_INPQ_NIX0_RX = 0x0,
+ NPA_INPQ_NIX0_TX = 0x1,
+ NPA_INPQ_NIX1_RX = 0x2,
+ NPA_INPQ_NIX1_TX = 0x3,
+ NPA_INPQ_SSO = 0x4,
+ NPA_INPQ_TIM = 0x5,
+ NPA_INPQ_DPI = 0x6,
+ NPA_INPQ_AURA_OP = 0xe,
+ NPA_INPQ_INTERNAL_RSV = 0xf,
+};
+
+/* NPA admin queue instruction structure */
+struct npa_aq_inst_s {
+ u64 op : 4; /* W0 */
+ u64 ctype : 4;
+ u64 lf : 9;
+ u64 reserved_17_23 : 7;
+ u64 cindex : 20;
+ u64 reserved_44_62 : 19;
+ u64 doneint : 1;
+ u64 res_addr; /* W1 */
+};
+
+/* NPA admin queue result structure */
+struct npa_aq_res_s {
+ u64 op : 4; /* W0 */
+ u64 ctype : 4;
+ u64 compcode : 8;
+ u64 doneint : 1;
+ u64 reserved_17_63 : 47;
+ u64 reserved_64_127; /* W1 */
+};
+
+struct npa_aura_s {
+ u64 pool_addr; /* W0 */
+ u64 ena : 1; /* W1 */
+ u64 reserved_65 : 2;
+ u64 pool_caching : 1;
+ u64 pool_way_mask : 16;
+ u64 avg_con : 9;
+ u64 reserved_93 : 1;
+ u64 pool_drop_ena : 1;
+ u64 aura_drop_ena : 1;
+ u64 bp_ena : 2;
+ u64 reserved_98_103 : 6;
+ u64 aura_drop : 8;
+ u64 shift : 6;
+ u64 reserved_118_119 : 2;
+ u64 avg_level : 8;
+ u64 count : 36; /* W2 */
+ u64 reserved_164_167 : 4;
+ u64 nix0_bpid : 9;
+ u64 reserved_177_179 : 3;
+ u64 nix1_bpid : 9;
+ u64 reserved_189_191 : 3;
+ u64 limit : 36; /* W3 */
+ u64 reserved_228_231 : 4;
+ u64 bp : 8;
+ u64 reserved_241_243 : 3;
+ u64 fc_be : 1;
+ u64 fc_ena : 1;
+ u64 fc_up_crossing : 1;
+ u64 fc_stype : 2;
+ u64 fc_hyst_bits : 4;
+ u64 reserved_252_255 : 4;
+ u64 fc_addr; /* W4 */
+ u64 pool_drop : 8; /* W5 */
+ u64 update_time : 16;
+ u64 err_int : 8;
+ u64 err_int_ena : 8;
+ u64 thresh_int : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_up : 1;
+ u64 reserved_363 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_371 : 1;
+ u64 err_qint_idx : 7;
+ u64 reserved_379_383 : 5;
+ u64 thresh : 36; /* W6*/
+ u64 rsvd_423_420 : 4;
+ u64 fc_msh_dst : 11;
+ u64 reserved_435_447 : 13;
+ u64 reserved_448_511; /* W7 */
+};
+
+struct npa_pool_s {
+ u64 stack_base; /* W0 */
+ u64 ena : 1;
+ u64 nat_align : 1;
+ u64 reserved_66_67 : 2;
+ u64 stack_caching : 1;
+ u64 reserved_70_71 : 3;
+ u64 stack_way_mask : 16;
+ u64 buf_offset : 12;
+ u64 reserved_100_103 : 4;
+ u64 buf_size : 11;
+ u64 reserved_115_127 : 13;
+ u64 stack_max_pages : 32;
+ u64 stack_pages : 32;
+ u64 op_pc : 48;
+ u64 reserved_240_255 : 16;
+ u64 stack_offset : 4;
+ u64 reserved_260_263 : 4;
+ u64 shift : 6;
+ u64 reserved_270_271 : 2;
+ u64 avg_level : 8;
+ u64 avg_con : 9;
+ u64 fc_ena : 1;
+ u64 fc_stype : 2;
+ u64 fc_hyst_bits : 4;
+ u64 fc_up_crossing : 1;
+ u64 fc_be : 1;
+ u64 reserved_298_299 : 2;
+ u64 update_time : 16;
+ u64 reserved_316_319 : 4;
+ u64 fc_addr; /* W5 */
+ u64 ptr_start; /* W6 */
+ u64 ptr_end; /* W7 */
+ u64 reserved_512_535 : 24;
+ u64 err_int : 8;
+ u64 err_int_ena : 8;
+ u64 thresh_int : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_up : 1;
+ u64 reserved_555 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_563 : 1;
+ u64 err_qint_idx : 7;
+ u64 reserved_571_575 : 5;
+ u64 thresh : 36;
+ u64 rsvd_615_612 : 4;
+ u64 fc_msh_dst : 11;
+ u64 reserved_627_639 : 13;
+ u64 reserved_640_703; /* W10 */
+ u64 reserved_704_767; /* W11 */
+ u64 reserved_768_831; /* W12 */
+ u64 reserved_832_895; /* W13 */
+ u64 reserved_896_959; /* W14 */
+ u64 reserved_960_1023; /* W15 */
+};
+
+/* NIX admin queue completion status */
+enum nix_aq_comp {
+ NIX_AQ_COMP_NOTDONE = 0x0,
+ NIX_AQ_COMP_GOOD = 0x1,
+ NIX_AQ_COMP_SWERR = 0x2,
+ NIX_AQ_COMP_CTX_POISON = 0x3,
+ NIX_AQ_COMP_CTX_FAULT = 0x4,
+ NIX_AQ_COMP_LOCKERR = 0x5,
+ NIX_AQ_COMP_SQB_ALLOC_FAIL = 0x6,
+};
+
+/* NIX admin queue context types */
+enum nix_aq_ctype {
+ NIX_AQ_CTYPE_RQ = 0x0,
+ NIX_AQ_CTYPE_SQ = 0x1,
+ NIX_AQ_CTYPE_CQ = 0x2,
+ NIX_AQ_CTYPE_MCE = 0x3,
+ NIX_AQ_CTYPE_RSS = 0x4,
+ NIX_AQ_CTYPE_DYNO = 0x5,
+ NIX_AQ_CTYPE_BANDPROF = 0x6,
+};
+
+/* NIX admin queue instruction opcodes */
+enum nix_aq_instop {
+ NIX_AQ_INSTOP_NOP = 0x0,
+ NIX_AQ_INSTOP_INIT = 0x1,
+ NIX_AQ_INSTOP_WRITE = 0x2,
+ NIX_AQ_INSTOP_READ = 0x3,
+ NIX_AQ_INSTOP_LOCK = 0x4,
+ NIX_AQ_INSTOP_UNLOCK = 0x5,
+};
+
+/* NIX admin queue instruction structure */
+struct nix_aq_inst_s {
+ u64 op : 4;
+ u64 ctype : 4;
+ u64 lf : 9;
+ u64 reserved_17_23 : 7;
+ u64 cindex : 20;
+ u64 reserved_44_62 : 19;
+ u64 doneint : 1;
+ u64 res_addr; /* W1 */
+};
+
+/* NIX admin queue result structure */
+struct nix_aq_res_s {
+ u64 op : 4;
+ u64 ctype : 4;
+ u64 compcode : 8;
+ u64 doneint : 1;
+ u64 reserved_17_63 : 47;
+ u64 reserved_64_127; /* W1 */
+};
+
+/* NIX Completion queue context structure */
+struct nix_cq_ctx_s {
+ u64 base;
+ u64 rsvd_64_67 : 4;
+ u64 bp_ena : 1;
+ u64 rsvd_69_71 : 3;
+ u64 bpid : 9;
+ u64 rsvd_81_83 : 3;
+ u64 qint_idx : 7;
+ u64 cq_err : 1;
+ u64 cint_idx : 7;
+ u64 avg_con : 9;
+ u64 wrptr : 20;
+ u64 tail : 20;
+ u64 head : 20;
+ u64 avg_level : 8;
+ u64 update_time : 16;
+ u64 bp : 8;
+ u64 drop : 8;
+ u64 drop_ena : 1;
+ u64 ena : 1;
+ u64 rsvd_210_211 : 2;
+ u64 substream : 20;
+ u64 caching : 1;
+ u64 rsvd_233_235 : 3;
+ u64 qsize : 4;
+ u64 cq_err_int : 8;
+ u64 cq_err_int_ena : 8;
+};
+
+/* CN10K NIX Receive queue context structure */
+struct nix_cn10k_rq_ctx_s {
+ u64 ena : 1;
+ u64 sso_ena : 1;
+ u64 ipsech_ena : 1;
+ u64 ena_wqwd : 1;
+ u64 cq : 20;
+ u64 rsvd_36_24 : 13;
+ u64 lenerr_dis : 1;
+ u64 csum_il4_dis : 1;
+ u64 csum_ol4_dis : 1;
+ u64 len_il4_dis : 1;
+ u64 len_il3_dis : 1;
+ u64 len_ol4_dis : 1;
+ u64 len_ol3_dis : 1;
+ u64 wqe_aura : 20;
+ u64 spb_aura : 20;
+ u64 lpb_aura : 20;
+ u64 sso_grp : 10;
+ u64 sso_tt : 2;
+ u64 pb_caching : 2;
+ u64 wqe_caching : 1;
+ u64 xqe_drop_ena : 1;
+ u64 spb_drop_ena : 1;
+ u64 lpb_drop_ena : 1;
+ u64 pb_stashing : 1;
+ u64 ipsecd_drop_ena : 1;
+ u64 chi_ena : 1;
+ u64 rsvd_127_125 : 3;
+ u64 band_prof_id : 10; /* W2 */
+ u64 rsvd_138 : 1;
+ u64 policer_ena : 1;
+ u64 spb_sizem1 : 6;
+ u64 wqe_skip : 2;
+ u64 rsvd_150_148 : 3;
+ u64 spb_ena : 1;
+ u64 lpb_sizem1 : 12;
+ u64 first_skip : 7;
+ u64 rsvd_171 : 1;
+ u64 later_skip : 6;
+ u64 xqe_imm_size : 6;
+ u64 rsvd_189_184 : 6;
+ u64 xqe_imm_copy : 1;
+ u64 xqe_hdr_split : 1;
+ u64 xqe_drop : 8; /* W3 */
+ u64 xqe_pass : 8;
+ u64 wqe_pool_drop : 8;
+ u64 wqe_pool_pass : 8;
+ u64 spb_aura_drop : 8;
+ u64 spb_aura_pass : 8;
+ u64 spb_pool_drop : 8;
+ u64 spb_pool_pass : 8;
+ u64 lpb_aura_drop : 8; /* W4 */
+ u64 lpb_aura_pass : 8;
+ u64 lpb_pool_drop : 8;
+ u64 lpb_pool_pass : 8;
+ u64 rsvd_291_288 : 4;
+ u64 rq_int : 8;
+ u64 rq_int_ena : 8;
+ u64 qint_idx : 7;
+ u64 rsvd_319_315 : 5;
+ u64 ltag : 24; /* W5 */
+ u64 good_utag : 8;
+ u64 bad_utag : 8;
+ u64 flow_tagw : 6;
+ u64 ipsec_vwqe : 1;
+ u64 vwqe_ena : 1;
+ u64 vwqe_wait : 8;
+ u64 max_vsize_exp : 4;
+ u64 vwqe_skip : 2;
+ u64 rsvd_383_382 : 2;
+ u64 octs : 48; /* W6 */
+ u64 rsvd_447_432 : 16;
+ u64 pkts : 48; /* W7 */
+ u64 rsvd_511_496 : 16;
+ u64 drop_octs : 48; /* W8 */
+ u64 rsvd_575_560 : 16;
+ u64 drop_pkts : 48; /* W9 */
+ u64 rsvd_639_624 : 16;
+ u64 re_pkts : 48; /* W10 */
+ u64 rsvd_703_688 : 16;
+ u64 rsvd_767_704; /* W11 */
+ u64 rsvd_831_768; /* W12 */
+ u64 rsvd_895_832; /* W13 */
+ u64 rsvd_959_896; /* W14 */
+ u64 rsvd_1023_960; /* W15 */
+};
+
+/* CN10K NIX Send queue context structure */
+struct nix_cn10k_sq_ctx_s {
+ u64 ena : 1;
+ u64 qint_idx : 6;
+ u64 substream : 20;
+ u64 sdp_mcast : 1;
+ u64 cq : 20;
+ u64 sqe_way_mask : 16;
+ u64 smq : 10; /* W1 */
+ u64 cq_ena : 1;
+ u64 xoff : 1;
+ u64 sso_ena : 1;
+ u64 smq_rr_weight : 14;
+ u64 default_chan : 12;
+ u64 sqb_count : 16;
+ u64 rsvd_120_119 : 2;
+ u64 smq_rr_count_lb : 7;
+ u64 smq_rr_count_ub : 25; /* W2 */
+ u64 sqb_aura : 20;
+ u64 sq_int : 8;
+ u64 sq_int_ena : 8;
+ u64 sqe_stype : 2;
+ u64 rsvd_191 : 1;
+ u64 max_sqe_size : 2; /* W3 */
+ u64 cq_limit : 8;
+ u64 lmt_dis : 1;
+ u64 mnq_dis : 1;
+ u64 smq_next_sq : 20;
+ u64 smq_lso_segnum : 8;
+ u64 tail_offset : 6;
+ u64 smenq_offset : 6;
+ u64 head_offset : 6;
+ u64 smenq_next_sqb_vld : 1;
+ u64 smq_pend : 1;
+ u64 smq_next_sq_vld : 1;
+ u64 rsvd_255_253 : 3;
+ u64 next_sqb : 64; /* W4 */
+ u64 tail_sqb : 64; /* W5 */
+ u64 smenq_sqb : 64; /* W6 */
+ u64 smenq_next_sqb : 64; /* W7 */
+ u64 head_sqb : 64; /* W8 */
+ u64 rsvd_583_576 : 8; /* W9 */
+ u64 vfi_lso_total : 18;
+ u64 vfi_lso_sizem1 : 3;
+ u64 vfi_lso_sb : 8;
+ u64 vfi_lso_mps : 14;
+ u64 vfi_lso_vlan0_ins_ena : 1;
+ u64 vfi_lso_vlan1_ins_ena : 1;
+ u64 vfi_lso_vld : 1;
+ u64 rsvd_639_630 : 10;
+ u64 scm_lso_rem : 18; /* W10 */
+ u64 rsvd_703_658 : 46;
+ u64 octs : 48; /* W11 */
+ u64 rsvd_767_752 : 16;
+ u64 pkts : 48; /* W12 */
+ u64 rsvd_831_816 : 16;
+ u64 rsvd_895_832 : 64; /* W13 */
+ u64 dropped_octs : 48;
+ u64 rsvd_959_944 : 16;
+ u64 dropped_pkts : 48;
+ u64 rsvd_1023_1008 : 16;
+};
+
+/* NIX Receive queue context structure */
+struct nix_rq_ctx_s {
+ u64 ena : 1;
+ u64 sso_ena : 1;
+ u64 ipsech_ena : 1;
+ u64 ena_wqwd : 1;
+ u64 cq : 20;
+ u64 substream : 20;
+ u64 wqe_aura : 20;
+ u64 spb_aura : 20;
+ u64 lpb_aura : 20;
+ u64 sso_grp : 10;
+ u64 sso_tt : 2;
+ u64 pb_caching : 2;
+ u64 wqe_caching : 1;
+ u64 xqe_drop_ena : 1;
+ u64 spb_drop_ena : 1;
+ u64 lpb_drop_ena : 1;
+ u64 rsvd_127_122 : 6;
+ u64 rsvd_139_128 : 12; /* W2 */
+ u64 spb_sizem1 : 6;
+ u64 wqe_skip : 2;
+ u64 rsvd_150_148 : 3;
+ u64 spb_ena : 1;
+ u64 lpb_sizem1 : 12;
+ u64 first_skip : 7;
+ u64 rsvd_171 : 1;
+ u64 later_skip : 6;
+ u64 xqe_imm_size : 6;
+ u64 rsvd_189_184 : 6;
+ u64 xqe_imm_copy : 1;
+ u64 xqe_hdr_split : 1;
+ u64 xqe_drop : 8; /* W3*/
+ u64 xqe_pass : 8;
+ u64 wqe_pool_drop : 8;
+ u64 wqe_pool_pass : 8;
+ u64 spb_aura_drop : 8;
+ u64 spb_aura_pass : 8;
+ u64 spb_pool_drop : 8;
+ u64 spb_pool_pass : 8;
+ u64 lpb_aura_drop : 8; /* W4 */
+ u64 lpb_aura_pass : 8;
+ u64 lpb_pool_drop : 8;
+ u64 lpb_pool_pass : 8;
+ u64 rsvd_291_288 : 4;
+ u64 rq_int : 8;
+ u64 rq_int_ena : 8;
+ u64 qint_idx : 7;
+ u64 rsvd_319_315 : 5;
+ u64 ltag : 24; /* W5 */
+ u64 good_utag : 8;
+ u64 bad_utag : 8;
+ u64 flow_tagw : 6;
+ u64 rsvd_383_366 : 18;
+ u64 octs : 48; /* W6 */
+ u64 rsvd_447_432 : 16;
+ u64 pkts : 48; /* W7 */
+ u64 rsvd_511_496 : 16;
+ u64 drop_octs : 48; /* W8 */
+ u64 rsvd_575_560 : 16;
+ u64 drop_pkts : 48; /* W9 */
+ u64 rsvd_639_624 : 16;
+ u64 re_pkts : 48; /* W10 */
+ u64 rsvd_703_688 : 16;
+ u64 rsvd_767_704; /* W11 */
+ u64 rsvd_831_768; /* W12 */
+ u64 rsvd_895_832; /* W13 */
+ u64 rsvd_959_896; /* W14 */
+ u64 rsvd_1023_960; /* W15 */
+};
+
+/* NIX sqe sizes */
+enum nix_maxsqesz {
+ NIX_MAXSQESZ_W16 = 0x0,
+ NIX_MAXSQESZ_W8 = 0x1,
+};
+
+/* NIX SQB caching type */
+enum nix_stype {
+ NIX_STYPE_STF = 0x0,
+ NIX_STYPE_STT = 0x1,
+ NIX_STYPE_STP = 0x2,
+};
+
+/* NIX Send queue context structure */
+struct nix_sq_ctx_s {
+ u64 ena : 1;
+ u64 qint_idx : 6;
+ u64 substream : 20;
+ u64 sdp_mcast : 1;
+ u64 cq : 20;
+ u64 sqe_way_mask : 16;
+ u64 smq : 9;
+ u64 cq_ena : 1;
+ u64 xoff : 1;
+ u64 sso_ena : 1;
+ u64 smq_rr_quantum : 24;
+ u64 default_chan : 12;
+ u64 sqb_count : 16;
+ u64 smq_rr_count : 25;
+ u64 sqb_aura : 20;
+ u64 sq_int : 8;
+ u64 sq_int_ena : 8;
+ u64 sqe_stype : 2;
+ u64 rsvd_191 : 1;
+ u64 max_sqe_size : 2;
+ u64 cq_limit : 8;
+ u64 lmt_dis : 1;
+ u64 mnq_dis : 1;
+ u64 smq_next_sq : 20;
+ u64 smq_lso_segnum : 8;
+ u64 tail_offset : 6;
+ u64 smenq_offset : 6;
+ u64 head_offset : 6;
+ u64 smenq_next_sqb_vld : 1;
+ u64 smq_pend : 1;
+ u64 smq_next_sq_vld : 1;
+ u64 rsvd_255_253 : 3;
+ u64 next_sqb : 64;/* W4 */
+ u64 tail_sqb : 64;/* W5 */
+ u64 smenq_sqb : 64;/* W6 */
+ u64 smenq_next_sqb : 64;/* W7 */
+ u64 head_sqb : 64;/* W8 */
+ u64 rsvd_583_576 : 8;
+ u64 vfi_lso_total : 18;
+ u64 vfi_lso_sizem1 : 3;
+ u64 vfi_lso_sb : 8;
+ u64 vfi_lso_mps : 14;
+ u64 vfi_lso_vlan0_ins_ena : 1;
+ u64 vfi_lso_vlan1_ins_ena : 1;
+ u64 vfi_lso_vld : 1;
+ u64 rsvd_639_630 : 10;
+ u64 scm_lso_rem : 18;
+ u64 rsvd_703_658 : 46;
+ u64 octs : 48;
+ u64 rsvd_767_752 : 16;
+ u64 pkts : 48;
+ u64 rsvd_831_816 : 16;
+ u64 rsvd_895_832 : 64;/* W13 */
+ u64 dropped_octs : 48;
+ u64 rsvd_959_944 : 16;
+ u64 dropped_pkts : 48;
+ u64 rsvd_1023_1008 : 16;
+};
+
+/* NIX Receive side scaling entry structure*/
+struct nix_rsse_s {
+ uint32_t rq : 20;
+ uint32_t reserved_20_31 : 12;
+
+};
+
+/* NIX receive multicast/mirror entry structure */
+struct nix_rx_mce_s {
+ uint64_t op : 2;
+ uint64_t rsvd_2 : 1;
+ uint64_t eol : 1;
+ uint64_t index : 20;
+ uint64_t rsvd_31_24 : 8;
+ uint64_t pf_func : 16;
+ uint64_t next : 16;
+};
+
+enum nix_band_prof_layers {
+ BAND_PROF_LEAF_LAYER = 0,
+ BAND_PROF_INVAL_LAYER = 1,
+ BAND_PROF_MID_LAYER = 2,
+ BAND_PROF_TOP_LAYER = 3,
+ BAND_PROF_NUM_LAYERS = 4,
+};
+
+enum NIX_RX_BAND_PROF_ACTIONRESULT_E {
+ NIX_RX_BAND_PROF_ACTIONRESULT_PASS = 0x0,
+ NIX_RX_BAND_PROF_ACTIONRESULT_DROP = 0x1,
+ NIX_RX_BAND_PROF_ACTIONRESULT_RED = 0x2,
+};
+
+enum nix_band_prof_pc_mode {
+ NIX_RX_PC_MODE_VLAN = 0,
+ NIX_RX_PC_MODE_DSCP = 1,
+ NIX_RX_PC_MODE_GEN = 2,
+ NIX_RX_PC_MODE_RSVD = 3,
+};
+
+/* NIX ingress policer bandwidth profile structure */
+struct nix_bandprof_s {
+ uint64_t pc_mode : 2; /* W0 */
+ uint64_t icolor : 2;
+ uint64_t tnl_ena : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t peir_exponent : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pebs_exponent : 5;
+ uint64_t reserved_21_23 : 3;
+ uint64_t cir_exponent : 5;
+ uint64_t reserved_29_31 : 3;
+ uint64_t cbs_exponent : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t peir_mantissa : 8;
+ uint64_t pebs_mantissa : 8;
+ uint64_t cir_mantissa : 8;
+ uint64_t cbs_mantissa : 8; /* W1 */
+ uint64_t lmode : 1;
+ uint64_t l_sellect : 3;
+ uint64_t rdiv : 4;
+ uint64_t adjust_exponent : 5;
+ uint64_t reserved_85_86 : 2;
+ uint64_t adjust_mantissa : 9;
+ uint64_t gc_action : 2;
+ uint64_t yc_action : 2;
+ uint64_t rc_action : 2;
+ uint64_t meter_algo : 2;
+ uint64_t band_prof_id : 7;
+ uint64_t reserved_111_118 : 8;
+ uint64_t hl_en : 1;
+ uint64_t reserved_120_127 : 8;
+ uint64_t ts : 48; /* W2 */
+ uint64_t reserved_176_191 : 16;
+ uint64_t pe_accum : 32; /* W3 */
+ uint64_t c_accum : 32;
+ uint64_t green_pkt_pass : 48; /* W4 */
+ uint64_t reserved_304_319 : 16;
+ uint64_t yellow_pkt_pass : 48; /* W5 */
+ uint64_t reserved_368_383 : 16;
+ uint64_t red_pkt_pass : 48; /* W6 */
+ uint64_t reserved_432_447 : 16;
+ uint64_t green_octs_pass : 48; /* W7 */
+ uint64_t reserved_496_511 : 16;
+ uint64_t yellow_octs_pass : 48; /* W8 */
+ uint64_t reserved_560_575 : 16;
+ uint64_t red_octs_pass : 48; /* W9 */
+ uint64_t reserved_624_639 : 16;
+ uint64_t green_pkt_drop : 48; /* W10 */
+ uint64_t reserved_688_703 : 16;
+ uint64_t yellow_pkt_drop : 48; /* W11 */
+ uint64_t reserved_752_767 : 16;
+ uint64_t red_pkt_drop : 48; /* W12 */
+ uint64_t reserved_816_831 : 16;
+ uint64_t green_octs_drop : 48; /* W13 */
+ uint64_t reserved_880_895 : 16;
+ uint64_t yellow_octs_drop : 48; /* W14 */
+ uint64_t reserved_944_959 : 16;
+ uint64_t red_octs_drop : 48; /* W15 */
+ uint64_t reserved_1008_1023 : 16;
+};
+
+enum nix_lsoalg {
+ NIX_LSOALG_NOP,
+ NIX_LSOALG_ADD_SEGNUM,
+ NIX_LSOALG_ADD_PAYLEN,
+ NIX_LSOALG_ADD_OFFSET,
+ NIX_LSOALG_TCP_FLAGS,
+};
+
+enum nix_txlayer {
+ NIX_TXLAYER_OL3,
+ NIX_TXLAYER_OL4,
+ NIX_TXLAYER_IL3,
+ NIX_TXLAYER_IL4,
+};
+
+struct nix_lso_format {
+ u64 offset : 8;
+ u64 layer : 2;
+ u64 rsvd_10_11 : 2;
+ u64 sizem1 : 2;
+ u64 rsvd_14_15 : 2;
+ u64 alg : 3;
+ u64 rsvd_19_63 : 45;
+};
+
+struct nix_rx_flowkey_alg {
+ u64 key_offset :6;
+ u64 ln_mask :1;
+ u64 fn_mask :1;
+ u64 hdr_offset :8;
+ u64 bytesm1 :5;
+ u64 lid :3;
+ u64 reserved_24_24 :1;
+ u64 ena :1;
+ u64 sel_chan :1;
+ u64 ltype_mask :4;
+ u64 ltype_match :4;
+ u64 reserved_35_63 :29;
+};
+
+/* NIX VTAG size */
+enum nix_vtag_size {
+ VTAGSIZE_T4 = 0x0,
+ VTAGSIZE_T8 = 0x1,
+};
+
+enum nix_tx_vtag_op {
+ NOP = 0x0,
+ VTAG_INSERT = 0x1,
+ VTAG_REPLACE = 0x2,
+};
+
+/* NIX RX VTAG actions */
+#define VTAG_STRIP BIT_ULL(4)
+#define VTAG_CAPTURE BIT_ULL(5)
+
+#endif /* RVU_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
new file mode 100644
index 000000000..329b5a029
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include "rvu.h"
+
+static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc,
+ u16 chan_mask)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* If the pcifunc is not initialized then nothing to do.
+ * This same function will be called again via rvu_switch_update_rules
+ * after pcifunc is initialized.
+ */
+ if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+ return 0;
+
+ ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = pfvf->rx_chan_base;
+ req.chan_mask = chan_mask;
+ req.intf = pfvf->nix_rx_intf;
+ req.op = NIX_RX_ACTION_DEFAULT;
+ req.default_rule = 1;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct rvu_pfvf *pfvf;
+ u8 lbkid;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* If the pcifunc is not initialized then nothing to do.
+ * This same function will be called again via rvu_switch_update_rules
+ * after pcifunc is initialized.
+ */
+ if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+ return 0;
+
+ lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
+ ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.entry = entry;
+ req.features = BIT_ULL(NPC_DMAC);
+ req.intf = pfvf->nix_tx_intf;
+ req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
+ req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
+ req.set_cntr = 1;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_rules(struct rvu *rvu)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u16 start = rswitch->start_entry;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc, entry = 0;
+ int pf, vf, numvfs;
+ int err;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = pf << 10;
+ /* rvu_get_nix_blkaddr sets up the corresponding NIX block
+ * address and NIX RX and TX interfaces for a pcifunc.
+ * Generally it is called during attach call of a pcifunc but it
+ * is called here since we are pre-installing rules before
+ * nixlfs are attached
+ */
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ /* MCAM RX rule for a PF/VF already exists as default unicast
+ * rules installed by AF. Hence change the channel in those
+ * rules to ignore channel so that packets with the required
+ * DMAC received from LBK(by other PF/VFs in system) or from
+ * external world (from wire) are accepted.
+ */
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+ if (err) {
+ dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n",
+ pf, err);
+ return err;
+ }
+
+ err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry);
+ if (err) {
+ dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n",
+ pf, err);
+ return err;
+ }
+
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+ if (err) {
+ dev_err(rvu->dev,
+ "RX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ return err;
+ }
+
+ err = rvu_switch_install_tx_rule(rvu, pcifunc,
+ start + entry);
+ if (err) {
+ dev_err(rvu->dev,
+ "TX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ return err;
+ }
+
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+ }
+ }
+
+ return 0;
+}
+
+void rvu_switch_enable(struct rvu *rvu)
+{
+ struct npc_mcam_alloc_entry_req alloc_req = { 0 };
+ struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_delete_flow_rsp uninstall_rsp = { 0 };
+ struct npc_mcam_free_entry_req free_req = { 0 };
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct msg_rsp rsp;
+ int ret;
+
+ alloc_req.contig = true;
+ alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
+ ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
+ &alloc_rsp);
+ if (ret) {
+ dev_err(rvu->dev,
+ "Unable to allocate MCAM entries\n");
+ goto exit;
+ }
+
+ if (alloc_rsp.count != alloc_req.count) {
+ dev_err(rvu->dev,
+ "Unable to allocate %d MCAM entries, got %d\n",
+ alloc_req.count, alloc_rsp.count);
+ goto free_entries;
+ }
+
+ rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16),
+ GFP_KERNEL);
+ if (!rswitch->entry2pcifunc)
+ goto free_entries;
+
+ rswitch->used_entries = alloc_rsp.count;
+ rswitch->start_entry = alloc_rsp.entry;
+
+ ret = rvu_switch_install_rules(rvu);
+ if (ret)
+ goto uninstall_rules;
+
+ return;
+
+uninstall_rules:
+ uninstall_req.start = rswitch->start_entry;
+ uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
+ kfree(rswitch->entry2pcifunc);
+free_entries:
+ free_req.all = 1;
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+exit:
+ return;
+}
+
+void rvu_switch_disable(struct rvu *rvu)
+{
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_delete_flow_rsp uninstall_rsp = { 0 };
+ struct npc_mcam_free_entry_req free_req = { 0 };
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs;
+ struct msg_rsp rsp;
+ u16 pcifunc;
+ int err;
+
+ if (!rswitch->used_entries)
+ return;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = pf << 10;
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+ if (err)
+ dev_err(rvu->dev,
+ "Reverting RX rule for PF%d failed(%d)\n",
+ pf, err);
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+ if (err)
+ dev_err(rvu->dev,
+ "Reverting RX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ }
+ }
+
+ uninstall_req.start = rswitch->start_entry;
+ uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
+ free_req.all = 1;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+ rswitch->used_entries = 0;
+ kfree(rswitch->entry2pcifunc);
+}
+
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u32 max = rswitch->used_entries;
+ u16 entry;
+
+ if (!rswitch->used_entries)
+ return;
+
+ for (entry = 0; entry < max; entry++) {
+ if (rswitch->entry2pcifunc[entry] == pcifunc)
+ break;
+ }
+
+ if (entry >= max)
+ return;
+
+ rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry);
+ rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
new file mode 100644
index 000000000..775fd4c35
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#define CREATE_TRACE_POINTS
+#include "rvu_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
new file mode 100644
index 000000000..28984d0e8
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvu
+
+#if !defined(__RVU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVU_TRACE_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/pci.h>
+
+#include "mbox.h"
+
+TRACE_EVENT(otx2_msg_alloc,
+ TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size),
+ TP_ARGS(pdev, id, size),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, id)
+ __field(u64, size)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ __entry->id = id;
+ __entry->size = size;
+ ),
+ TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->size)
+);
+
+TRACE_EVENT(otx2_msg_send,
+ TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size),
+ TP_ARGS(pdev, num_msgs, msg_size),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, num_msgs)
+ __field(u64, msg_size)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ __entry->num_msgs = num_msgs;
+ __entry->msg_size = msg_size;
+ ),
+ TP_printk("[%s] sent %d msg(s) of size:%lld\n", __get_str(dev),
+ __entry->num_msgs, __entry->msg_size)
+);
+
+TRACE_EVENT(otx2_msg_check,
+ TP_PROTO(const struct pci_dev *pdev, u16 reqid, u16 rspid, int rc),
+ TP_ARGS(pdev, reqid, rspid, rc),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, reqid)
+ __field(u16, rspid)
+ __field(int, rc)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ __entry->reqid = reqid;
+ __entry->rspid = rspid;
+ __entry->rc = rc;
+ ),
+ TP_printk("[%s] req->id:0x%x rsp->id:0x%x resp_code:%d\n",
+ __get_str(dev), __entry->reqid,
+ __entry->rspid, __entry->rc)
+);
+
+TRACE_EVENT(otx2_msg_interrupt,
+ TP_PROTO(const struct pci_dev *pdev, const char *msg, u64 intr),
+ TP_ARGS(pdev, msg, intr),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __string(str, msg)
+ __field(u64, intr)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ __assign_str(str, msg);
+ __entry->intr = intr;
+ ),
+ TP_printk("[%s] mbox interrupt %s (0x%llx)\n", __get_str(dev),
+ __get_str(str), __entry->intr)
+);
+
+TRACE_EVENT(otx2_msg_process,
+ TP_PROTO(const struct pci_dev *pdev, u16 id, int err),
+ TP_ARGS(pdev, id, err),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, id)
+ __field(int, err)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ __entry->id = id;
+ __entry->err = err;
+ ),
+ TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->err)
+);
+
+#endif /* __RVU_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE rvu_trace
+
+#include <trace/define_trace.h>