summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
commitb20732900e4636a467c0183a47f7396700f5f743 (patch)
tree42f079ff82e701ebcb76829974b4caca3e5b6798 /drivers/net/ethernet/marvell
parentAdding upstream version 6.8.12. (diff)
downloadlinux-b20732900e4636a467c0183a47f7396700f5f743.tar.xz
linux-b20732900e4636a467c0183a47f7396700f5f743.zip
Adding upstream version 6.9.7.upstream/6.9.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c5
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig19
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c489
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c500
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h160
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c273
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c1231
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h334
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c430
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h166
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h154
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h162
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c510
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h224
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c330
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h276
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h617
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c186
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c41
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c4
37 files changed, 6111 insertions, 141 deletions
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 884d64114b..837295fecd 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -180,6 +180,7 @@ config SKY2_DEBUG
source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
source "drivers/net/ethernet/marvell/octeon_ep/Kconfig"
+source "drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig"
source "drivers/net/ethernet/marvell/prestera/Kconfig"
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index ceba4aa4f0..a399defe25 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -12,5 +12,6 @@ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
obj-y += octeon_ep/
+obj-y += octeon_ep_vf/
obj-y += octeontx2/
obj-y += prestera/
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a641b3534c..40a5f1431e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -5097,7 +5097,7 @@ static int mvneta_ethtool_set_wol(struct net_device *dev,
}
static int mvneta_ethtool_get_eee(struct net_device *dev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
u32 lpi_ctl0;
@@ -5113,7 +5113,7 @@ static int mvneta_ethtool_get_eee(struct net_device *dev,
}
static int mvneta_ethtool_set_eee(struct net_device *dev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
u32 lpi_ctl0;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 23adf53c2a..cebc79a710 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4013,7 +4013,10 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
}
}
- skb = build_skb(data, frag_size);
+ if (frag_size)
+ skb = build_skb(data, frag_size);
+ else
+ skb = slab_build_skb(data);
if (!skb) {
netdev_warn(port->dev, "skb build failed\n");
goto err_drop_frame;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig b/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig
new file mode 100644
index 0000000000..e371a3ef0c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell's Octeon PCI Endpoint NIC VF Driver Configuration
+#
+
+config OCTEON_EP_VF
+ tristate "Marvell Octeon PCI Endpoint NIC VF Driver"
+ depends on 64BIT
+ depends on PCI
+ help
+ This driver supports the networking functionality of Marvell's
+ Octeon PCI Endpoint NIC VF.
+
+ To know the list of devices supported by this driver, refer to the
+ documentation in
+ <file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst>.
+
+ To compile this driver as a module, choose M here.
+ The name of the module will be octeon_ep_vf.
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile b/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile
new file mode 100644
index 0000000000..4a5f9fcb0b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Network driver for Marvell's Octeon PCI Endpoint NIC VF
+#
+
+obj-$(CONFIG_OCTEON_EP_VF) += octeon_ep_vf.o
+
+octeon_ep_vf-y := octep_vf_main.o octep_vf_cn9k.o octep_vf_cnxk.o \
+ octep_vf_tx.o octep_vf_rx.o octep_vf_mbox.o \
+ octep_vf_ethtool.o
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
new file mode 100644
index 0000000000..88937fce75
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+#include "octep_vf_regs_cn9k.h"
+
+/* Dump useful hardware IQ/OQ CSRs for debug purpose */
+static void cn93_vf_dump_q_regs(struct octep_vf_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INSTR_DBELL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_CONTROL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_ENABLE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INSTR_BADDR(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_CNTS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_CONTROL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_ENABLE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_CNTS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_BYTE_CNT(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static void cn93_vf_reset_iq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no);
+
+ /* Disable the Tx/Instruction Ring */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no));
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no),
+ val & GENMASK_ULL(31, 0));
+}
+
+/* Reset Hardware Rx queue */
+static void cn93_vf_reset_oq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ /* Disable Output (Rx) Ring */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_vf_read_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no));
+ octep_vf_write_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(q_no), GENMASK_ULL(35, 0));
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(q_no), GENMASK_ULL(31, 0));
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_vf_reset_io_queues_cn93(struct octep_vf_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CN93 VF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cn93_vf_reset_iq(oct, q);
+ cn93_vf_reset_oq(oct, q);
+ }
+}
+
+/* Initialize configuration limits and initial active config */
+static void octep_vf_init_config_cn93_vf(struct octep_vf_device *oct)
+{
+ struct octep_vf_config *conf = oct->conf;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(0));
+ conf->ring_cfg.max_io_rings = (reg_val >> CN93_VF_R_IN_CTL_RPVF_POS) &
+ CN93_VF_R_IN_CTL_RPVF_MASK;
+ conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings;
+
+ conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR;
+ conf->iq.db_min = OCTEP_VF_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD;
+
+ conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings;
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_vf_setup_iq_regs_cn93(struct octep_vf_device *oct, int iq_no)
+{
+ struct octep_vf_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_VF_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CN93_VF_R_IN_CTL_IDLE));
+ }
+ reg_val |= CN93_VF_R_IN_CTL_RDSIZE;
+ reg_val |= CN93_VF_R_IN_CTL_IS_64B;
+ reg_val |= CN93_VF_R_IN_CTL_ESR;
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr for this queue */
+ iq->doorbell_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no)
+{
+ struct octep_vf_oq *oq = oct->oq[oq_no];
+ u32 time_threshold = 0;
+ u64 oq_ctl = ULL(0);
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_VF_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CN93_VF_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CN93_VF_R_OUT_CTL_IMODE);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ES_I);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ES_D);
+ reg_val |= (CN93_VF_R_OUT_CTL_ES_P);
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count);
+
+ oq_ctl = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
+ oq_ctl &= ~GENMASK_ULL(22, 0); //clear the ISIZE and BSIZE (22-0)
+ oq_ctl |= (oq->buffer_size & GENMASK_ULL(15, 0)); //populate the BSIZE (15-0)
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+}
+
+/* Setup registers for a VF mailbox */
+static void octep_vf_setup_mbox_regs_cn93(struct octep_vf_device *oct, int q_no)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+
+ /* PF to VF DATA reg. VF reads from this reg */
+ mbox->mbox_read_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_DATA(q_no);
+
+ /* VF mbox interrupt reg */
+ mbox->mbox_int_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_INT(q_no);
+
+ /* VF to PF DATA reg. VF writes into this reg */
+ mbox->mbox_write_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Mailbox Interrupt handler */
+static void cn93_handle_vf_mbox_intr(struct octep_vf_device *oct)
+{
+ if (oct->mbox)
+ schedule_work(&oct->mbox->wk.work);
+ else
+ dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n");
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_vf_ioq_intr_handler_cn93(void *data)
+{
+ struct octep_vf_ioq_vector *vector = data;
+ struct octep_vf_device *oct;
+ struct octep_vf_oq *oq;
+ u64 reg_val;
+
+ oct = vector->octep_vf_dev;
+ oq = vector->oq;
+ /* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */
+ if (oq->q_no == 0) {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0));
+ if (reg_val & CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS) {
+ cn93_handle_vf_mbox_intr(oct);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val);
+ }
+ }
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_vf_reinit_regs_cn93(struct octep_vf_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_vf_enable_interrupts_cn93(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+ /* Enable PF to VF mbox interrupt by setting 2nd bit*/
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0),
+ CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB);
+}
+
+/* Disable all interrupts */
+static void octep_vf_disable_interrupts_cn93(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ /* Disable PF to VF mbox interrupt by setting 2nd bit*/
+ if (oct->mbox)
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0);
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_vf_update_iq_read_index_cn93(struct octep_vf_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_vf_enable_iq_cn93(struct octep_vf_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no), GENMASK_ULL(31, 0));
+
+ while (octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_vf_enable_oq_cn93(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no), GENMASK_ULL(31, 0));
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_enable_io_queues_cn93(struct octep_vf_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_enable_iq_cn93(oct, q);
+ octep_vf_enable_oq_cn93(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assigned to VF */
+static void octep_vf_disable_iq_cn93(struct octep_vf_device *oct, int iq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assigned to VF */
+static void octep_vf_disable_oq_cn93(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_disable_io_queues_cn93(struct octep_vf_device *oct)
+{
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_disable_iq_cn93(oct, q);
+ octep_vf_disable_oq_cn93(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_vf_dump_registers_cn93(struct octep_vf_device *oct)
+{
+ u8 num_rings, q;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++)
+ cn93_vf_dump_q_regs(oct, q);
+}
+
+/**
+ * octep_vf_device_setup_cn93() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - set initial configuration and max limits.
+ */
+void octep_vf_device_setup_cn93(struct octep_vf_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cn93;
+ oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cn93;
+ oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cn93;
+
+ oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cn93;
+ oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cn93;
+
+ oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cn93;
+ oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cn93;
+
+ oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cn93;
+
+ oct->hw_ops.enable_iq = octep_vf_enable_iq_cn93;
+ oct->hw_ops.enable_oq = octep_vf_enable_oq_cn93;
+ oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cn93;
+
+ oct->hw_ops.disable_iq = octep_vf_disable_iq_cn93;
+ oct->hw_ops.disable_oq = octep_vf_disable_oq_cn93;
+ oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cn93;
+ oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cn93;
+
+ oct->hw_ops.dump_registers = octep_vf_dump_registers_cn93;
+ octep_vf_init_config_cn93_vf(oct);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
new file mode 100644
index 0000000000..1f79dfad42
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+#include "octep_vf_regs_cnxk.h"
+
+/* Dump useful hardware IQ/OQ CSRs for debug purpose */
+static void cnxk_vf_dump_q_regs(struct octep_vf_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_CONTROL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_ENABLE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_CNTS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_CONTROL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_ENABLE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_CNTS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno)));
+ dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_ERR_TYPE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_ERR_TYPE(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static void cnxk_vf_reset_iq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no);
+
+ /* Disable the Tx/Instruction Ring */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no));
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no), val & GENMASK_ULL(31, 0));
+}
+
+/* Reset Hardware Rx queue */
+static void cnxk_vf_reset_oq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ /* Disable Output (Rx) Ring */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_vf_read_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no));
+ octep_vf_write_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(q_no), GENMASK_ULL(35, 0));
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(q_no), GENMASK_ULL(31, 0));
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_vf_reset_io_queues_cnxk(struct octep_vf_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CNXK VF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cnxk_vf_reset_iq(oct, q);
+ cnxk_vf_reset_oq(oct, q);
+ }
+}
+
+/* Initialize configuration limits and initial active config */
+static void octep_vf_init_config_cnxk_vf(struct octep_vf_device *oct)
+{
+ struct octep_vf_config *conf = oct->conf;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(0));
+ conf->ring_cfg.max_io_rings = (reg_val >> CNXK_VF_R_IN_CTL_RPVF_POS) &
+ CNXK_VF_R_IN_CTL_RPVF_MASK;
+ conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings;
+
+ conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR;
+ conf->iq.db_min = OCTEP_VF_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD;
+ conf->oq.wmark = OCTEP_VF_OQ_WMARK_MIN;
+
+ conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings;
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_vf_setup_iq_regs_cnxk(struct octep_vf_device *oct, int iq_no)
+{
+ struct octep_vf_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CNXK_VF_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CNXK_VF_R_IN_CTL_IDLE));
+ }
+ reg_val |= CNXK_VF_R_IN_CTL_RDSIZE;
+ reg_val |= CNXK_VF_R_IN_CTL_IS_64B;
+ reg_val |= CNXK_VF_R_IN_CTL_ESR;
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr for this queue */
+ iq->doorbell_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
+{
+ struct octep_vf_oq *oq = oct->oq[oq_no];
+ u32 time_threshold = 0;
+ u64 oq_ctl = ULL(0);
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_IMODE);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_I);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_D);
+ reg_val |= (CNXK_VF_R_OUT_CTL_ES_P);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count);
+
+ oq_ctl = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
+ /* Clear the ISIZE and BSIZE (22-0) */
+ oq_ctl &= ~GENMASK_ULL(22, 0);
+ /* Populate the BSIZE (15-0) */
+ oq_ctl |= (oq->buffer_size & GENMASK_ULL(15, 0));
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ /* set watermark for backpressure */
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no));
+ reg_val &= ~GENMASK_ULL(31, 0);
+ reg_val |= CFG_GET_OQ_WMARK(oct->conf);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no), reg_val);
+}
+
+/* Setup registers for a VF mailbox */
+static void octep_vf_setup_mbox_regs_cnxk(struct octep_vf_device *oct, int q_no)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+
+ /* PF to VF DATA reg. VF reads from this reg */
+ mbox->mbox_read_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_DATA(q_no);
+
+ /* VF mbox interrupt reg */
+ mbox->mbox_int_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_INT(q_no);
+
+ /* VF to PF DATA reg. VF writes into this reg */
+ mbox->mbox_write_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Mailbox Interrupt handler */
+static void cnxk_handle_vf_mbox_intr(struct octep_vf_device *oct)
+{
+ if (oct->mbox)
+ schedule_work(&oct->mbox->wk.work);
+ else
+ dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n");
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_vf_ioq_intr_handler_cnxk(void *data)
+{
+ struct octep_vf_ioq_vector *vector = data;
+ struct octep_vf_device *oct;
+ struct octep_vf_oq *oq;
+ u64 reg_val;
+
+ oct = vector->octep_vf_dev;
+ oq = vector->oq;
+ /* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */
+ if (oq->q_no == 0) {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0));
+ if (reg_val & CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS) {
+ cnxk_handle_vf_mbox_intr(oct);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val);
+ }
+ }
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_vf_reinit_regs_cnxk(struct octep_vf_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_vf_enable_interrupts_cnxk(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+ /* Enable PF to VF mbox interrupt by setting 2nd bit*/
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0),
+ CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB);
+}
+
+/* Disable all interrupts */
+static void octep_vf_disable_interrupts_cnxk(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ /* Disable PF to VF mbox interrupt by setting 2nd bit*/
+ if (oct->mbox)
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0);
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_vf_update_iq_read_index_cnxk(struct octep_vf_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_vf_enable_iq_cnxk(struct octep_vf_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no), GENMASK_ULL(31, 0));
+
+ while (octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_vf_enable_oq_cnxk(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no), GENMASK_ULL(31, 0));
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_enable_io_queues_cnxk(struct octep_vf_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_enable_iq_cnxk(oct, q);
+ octep_vf_enable_oq_cnxk(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assigned to VF */
+static void octep_vf_disable_iq_cnxk(struct octep_vf_device *oct, int iq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assigned to VF */
+static void octep_vf_disable_oq_cnxk(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_disable_io_queues_cnxk(struct octep_vf_device *oct)
+{
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_disable_iq_cnxk(oct, q);
+ octep_vf_disable_oq_cnxk(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_vf_dump_registers_cnxk(struct octep_vf_device *oct)
+{
+ u8 num_rings, q;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++)
+ cnxk_vf_dump_q_regs(oct, q);
+}
+
+/**
+ * octep_vf_device_setup_cnxk() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - set initial configuration and max limits.
+ */
+void octep_vf_device_setup_cnxk(struct octep_vf_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cnxk;
+ oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cnxk;
+ oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cnxk;
+
+ oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cnxk;
+ oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cnxk;
+
+ oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cnxk;
+ oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cnxk;
+
+ oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cnxk;
+
+ oct->hw_ops.enable_iq = octep_vf_enable_iq_cnxk;
+ oct->hw_ops.enable_oq = octep_vf_enable_oq_cnxk;
+ oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cnxk;
+
+ oct->hw_ops.disable_iq = octep_vf_disable_iq_cnxk;
+ oct->hw_ops.disable_oq = octep_vf_disable_oq_cnxk;
+ oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cnxk;
+ oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cnxk;
+
+ oct->hw_ops.dump_registers = octep_vf_dump_registers_cnxk;
+ octep_vf_init_config_cnxk_vf(oct);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h
new file mode 100644
index 0000000000..e03a647b01
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_CONFIG_H_
+#define _OCTEP_VF_CONFIG_H_
+
+/* Tx instruction types by length */
+#define OCTEP_VF_32BYTE_INSTR 32
+#define OCTEP_VF_64BYTE_INSTR 64
+
+/* Tx Queue: maximum descriptors per ring */
+#define OCTEP_VF_IQ_MAX_DESCRIPTORS 1024
+/* Minimum input (Tx) requests to be enqueued to ring doorbell */
+#define OCTEP_VF_DB_MIN 8
+/* Packet threshold for Tx queue interrupt */
+#define OCTEP_VF_IQ_INTR_THRESHOLD 0x0
+
+/* Minimum watermark for backpressure */
+#define OCTEP_VF_OQ_WMARK_MIN 256
+
+/* Rx Queue: maximum descriptors per ring */
+#define OCTEP_VF_OQ_MAX_DESCRIPTORS 1024
+
+/* Rx buffer size: Use page size buffers.
+ * Build skb from allocated page buffer once the packet is received.
+ * When a gathered packet is received, make head page as skb head and
+ * page buffers in consecutive Rx descriptors as fragments.
+ */
+#define OCTEP_VF_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE))
+#define OCTEP_VF_OQ_PKTS_PER_INTR 128
+#define OCTEP_VF_OQ_REFILL_THRESHOLD (OCTEP_VF_OQ_MAX_DESCRIPTORS / 4)
+
+#define OCTEP_VF_OQ_INTR_PKT_THRESHOLD 1
+#define OCTEP_VF_OQ_INTR_TIME_THRESHOLD 10
+
+#define OCTEP_VF_MSIX_NAME_SIZE (IFNAMSIZ + 32)
+
+/* Tx Queue wake threshold
+ * wakeup a stopped Tx queue if minimum 2 descriptors are available.
+ * Even a skb with fragments consume only one Tx queue descriptor entry.
+ */
+#define OCTEP_VF_WAKE_QUEUE_THRESHOLD 2
+
+/* Minimum MTU supported by Octeon network interface */
+#define OCTEP_VF_MIN_MTU ETH_MIN_MTU
+/* Maximum MTU supported by Octeon interface*/
+#define OCTEP_VF_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN))
+/* Default MTU */
+#define OCTEP_VF_DEFAULT_MTU 1500
+
+/* Macros to get octeon config params */
+#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
+#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
+#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
+#define CFG_GET_IQ_INSTR_SIZE(cfg) (64)
+#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
+#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold)
+
+#define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs)
+#define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size)
+#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
+#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
+#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+#define CFG_GET_OQ_WMARK(cfg) ((cfg)->oq.wmark)
+
+#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->ring_cfg.active_io_rings)
+#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->ring_cfg.max_io_rings)
+
+#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us)
+#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us)
+
+#define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix)
+
+/* Hardware Tx Queue configuration. */
+struct octep_vf_iq_config {
+ /* Size of the Input queue (number of commands) */
+ u16 num_descs;
+
+ /* Command size - 32 or 64 bytes */
+ u16 instr_type;
+
+ /* Minimum number of commands pending to be posted to Octeon before driver
+ * hits the Input queue doorbell.
+ */
+ u16 db_min;
+
+ /* Trigger the IQ interrupt when processed cmd count reaches
+ * this level.
+ */
+ u32 intr_threshold;
+};
+
+/* Hardware Rx Queue configuration. */
+struct octep_vf_oq_config {
+ /* Size of Output queue (number of descriptors) */
+ u16 num_descs;
+
+ /* Size of buffer in this Output queue. */
+ u16 buf_size;
+
+ /* The number of buffers that were consumed during packet processing
+ * by the driver on this Output queue before the driver attempts to
+ * replenish the descriptor ring with new buffers.
+ */
+ u16 refill_threshold;
+
+ /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver usually does not use packet count interrupt coalescing.
+ */
+ u32 oq_intr_pkt;
+
+ /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host
+ * if at least one packet was sent in the time interval specified by
+ * this field. The driver uses time interval interrupt coalescing by
+ * default. The time is specified in microseconds.
+ */
+ u32 oq_intr_time;
+
+ /* Water mark for backpressure.
+ * Output queue sends backpressure signal to source when
+ * free buffer count falls below wmark.
+ */
+ u32 wmark;
+};
+
+/* Tx/Rx configuration */
+struct octep_vf_ring_config {
+ /* Max number of IOQs */
+ u16 max_io_rings;
+
+ /* Number of active IOQs */
+ u16 active_io_rings;
+};
+
+/* Octeon MSI-x config. */
+struct octep_vf_msix_config {
+ /* Number of IOQ interrupts */
+ u16 ioq_msix;
+};
+
+/* Data Structure to hold configuration limits and active config */
+struct octep_vf_config {
+ /* Input Queue attributes. */
+ struct octep_vf_iq_config iq;
+
+ /* Output Queue attributes. */
+ struct octep_vf_oq_config oq;
+
+ /* MSI-X interrupt config */
+ struct octep_vf_msix_config msix_cfg;
+
+ /* NIC VF ring Configuration */
+ struct octep_vf_ring_config ring_cfg;
+};
+#endif /* _OCTEP_VF_CONFIG_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
new file mode 100644
index 0000000000..a1979b45e3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+static const char octep_vf_gstrings_global_stats[][ETH_GSTRING_LEN] = {
+ "rx_alloc_errors",
+ "tx_busy_errors",
+ "tx_hw_pkts",
+ "tx_hw_octs",
+ "tx_hw_bcast",
+ "tx_hw_mcast",
+ "rx_hw_pkts",
+ "rx_hw_bytes",
+ "rx_hw_bcast",
+ "rx_dropped_bytes_fifo_full",
+};
+
+#define OCTEP_VF_GLOBAL_STATS_CNT (sizeof(octep_vf_gstrings_global_stats) / ETH_GSTRING_LEN)
+
+static const char octep_vf_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
+ "tx_packets_posted[Q-%u]",
+ "tx_packets_completed[Q-%u]",
+ "tx_bytes[Q-%u]",
+ "tx_busy[Q-%u]",
+};
+
+#define OCTEP_VF_TX_Q_STATS_CNT (sizeof(octep_vf_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
+
+static const char octep_vf_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets[Q-%u]",
+ "rx_bytes[Q-%u]",
+ "rx_alloc_errors[Q-%u]",
+};
+
+#define OCTEP_VF_RX_Q_STATS_CNT (sizeof(octep_vf_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
+
+static void octep_vf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+
+ strscpy(info->driver, OCTEP_VF_DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info));
+}
+
+static void octep_vf_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ char *strings = (char *)data;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < OCTEP_VF_GLOBAL_STATS_CNT; i++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_vf_gstrings_global_stats[i]);
+ strings += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_VF_TX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_vf_gstrings_tx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_VF_RX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_vf_gstrings_rx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int octep_vf_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return OCTEP_VF_GLOBAL_STATS_CNT + (num_queues *
+ (OCTEP_VF_TX_Q_STATS_CNT + OCTEP_VF_RX_Q_STATS_CNT));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void octep_vf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct octep_vf_iface_tx_stats *iface_tx_stats;
+ struct octep_vf_iface_rx_stats *iface_rx_stats;
+ u64 rx_alloc_errors, tx_busy_errors;
+ int q, i;
+
+ rx_alloc_errors = 0;
+ tx_busy_errors = 0;
+
+ octep_vf_get_if_stats(oct);
+ iface_tx_stats = &oct->iface_tx_stats;
+ iface_rx_stats = &oct->iface_rx_stats;
+
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_vf_iq *iq = oct->iq[q];
+ struct octep_vf_oq *oq = oct->oq[q];
+
+ tx_busy_errors += iq->stats.tx_busy;
+ rx_alloc_errors += oq->stats.alloc_failures;
+ }
+ i = 0;
+ data[i++] = rx_alloc_errors;
+ data[i++] = tx_busy_errors;
+ data[i++] = iface_tx_stats->pkts;
+ data[i++] = iface_tx_stats->octs;
+ data[i++] = iface_tx_stats->bcst;
+ data[i++] = iface_tx_stats->mcst;
+ data[i++] = iface_rx_stats->pkts;
+ data[i++] = iface_rx_stats->octets;
+ data[i++] = iface_rx_stats->bcast_pkts;
+ data[i++] = iface_rx_stats->dropped_octets_fifo_full;
+
+ /* Per Tx Queue stats */
+ for (q = 0; q < oct->num_iqs; q++) {
+ struct octep_vf_iq *iq = oct->iq[q];
+
+ data[i++] = iq->stats.instr_posted;
+ data[i++] = iq->stats.instr_completed;
+ data[i++] = iq->stats.bytes_sent;
+ data[i++] = iq->stats.tx_busy;
+ }
+
+ /* Per Rx Queue stats */
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_vf_oq *oq = oct->oq[q];
+
+ data[i++] = oq->stats.packets;
+ data[i++] = oq->stats.bytes;
+ data[i++] = oq->stats.alloc_failures;
+ }
+}
+
+#define OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(octep_vf_speeds, ksettings, name) \
+{ \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_T)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_R)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_CR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_KR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_SR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \
+}
+
+static int octep_vf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct octep_vf_iface_link_info *link_info;
+ u32 advertised_modes, supported_modes;
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ octep_vf_get_link_info(oct);
+
+ advertised_modes = oct->link_info.advertised_modes;
+ supported_modes = oct->link_info.supported_modes;
+ link_info = &oct->link_info;
+
+ OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported);
+ OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising);
+
+ if (link_info->autoneg) {
+ if (link_info->autoneg & OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ if (link_info->autoneg & OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED) {
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+
+ cmd->base.port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ if (netif_carrier_ok(netdev)) {
+ cmd->base.speed = link_info->speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ return 0;
+}
+
+static const struct ethtool_ops octep_vf_ethtool_ops = {
+ .get_drvinfo = octep_vf_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = octep_vf_get_strings,
+ .get_sset_count = octep_vf_get_sset_count,
+ .get_ethtool_stats = octep_vf_get_ethtool_stats,
+ .get_link_ksettings = octep_vf_get_link_ksettings,
+};
+
+void octep_vf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &octep_vf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
new file mode 100644
index 0000000000..dd49d0b8b4
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -0,0 +1,1231 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/vmalloc.h>
+#include <net/netdev_queues.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+struct workqueue_struct *octep_vf_wq;
+
+/* Supported Devices */
+static const struct pci_device_id octep_vf_pci_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN98_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KA_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KA_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KB_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KB_VF)},
+ {0, },
+};
+MODULE_DEVICE_TABLE(pci, octep_vf_pci_id_tbl);
+
+MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>");
+MODULE_DESCRIPTION(OCTEP_VF_DRV_STRING);
+MODULE_LICENSE("GPL");
+
+/**
+ * octep_vf_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate resources to hold per Tx/Rx queue interrupt info.
+ * This is the information passed to interrupt handler, from which napi poll
+ * is scheduled and includes quick access to private data of Tx/Rx queue
+ * corresponding to the interrupt being handled.
+ *
+ * Return: 0, on successful allocation of resources for all queue interrupts.
+ * -1, if failed to allocate any resource.
+ */
+static int octep_vf_alloc_ioq_vectors(struct octep_vf_device *oct)
+{
+ struct octep_vf_ioq_vector *ioq_vector;
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i]));
+ if (!oct->ioq_vector[i])
+ goto free_ioq_vector;
+
+ ioq_vector = oct->ioq_vector[i];
+ ioq_vector->iq = oct->iq[i];
+ ioq_vector->oq = oct->oq[i];
+ ioq_vector->octep_vf_dev = oct;
+ }
+
+ dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs);
+ return 0;
+
+free_ioq_vector:
+ while (i) {
+ i--;
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ return -1;
+}
+
+/**
+ * octep_vf_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_free_ioq_vectors(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ if (oct->ioq_vector[i]) {
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ }
+ netdev_info(oct->netdev, "Freed IOQ Vectors\n");
+}
+
+/**
+ * octep_vf_enable_msix_range() - enable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts)
+ * for the Octeon device.
+ *
+ * Return: 0, on successfully enabling all MSI-x interrupts.
+ * -1, if failed to enable any MSI-x interrupt.
+ */
+static int octep_vf_enable_msix_range(struct octep_vf_device *oct)
+{
+ int num_msix, msix_allocated;
+ int i;
+
+ /* Generic interrupts apart from input/output queues */
+ //num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf);
+ num_msix = oct->num_oqs;
+ oct->msix_entries = kcalloc(num_msix, sizeof(struct msix_entry), GFP_KERNEL);
+ if (!oct->msix_entries)
+ goto msix_alloc_err;
+
+ for (i = 0; i < num_msix; i++)
+ oct->msix_entries[i].entry = i;
+
+ msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries,
+ num_msix, num_msix);
+ if (msix_allocated != num_msix) {
+ dev_err(&oct->pdev->dev,
+ "Failed to enable %d msix irqs; got only %d\n",
+ num_msix, msix_allocated);
+ goto enable_msix_err;
+ }
+ oct->num_irqs = msix_allocated;
+ dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n");
+
+ return 0;
+
+enable_msix_err:
+ if (msix_allocated > 0)
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+msix_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_vf_disable_msix() - disable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Disable MSI-x on the Octeon device.
+ */
+static void octep_vf_disable_msix(struct octep_vf_device *oct)
+{
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ dev_info(&oct->pdev->dev, "Disabled MSI-X\n");
+}
+
+/**
+ * octep_vf_ioq_intr_handler() - handler for all Tx/Rx queue interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data contains pointers to Tx/Rx queue private data
+ * and correspong NAPI context.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_vf_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_vf_ioq_vector *ioq_vector = data;
+ struct octep_vf_device *oct = ioq_vector->octep_vf_dev;
+
+ return oct->hw_ops.ioq_intr_handler(ioq_vector);
+}
+
+/**
+ * octep_vf_request_irqs() - Register interrupt handlers.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Register handlers for all queue and non-queue interrupts.
+ *
+ * Return: 0, on successful registration of all interrupt handlers.
+ * -1, on any error.
+ */
+static int octep_vf_request_irqs(struct octep_vf_device *oct)
+{
+ struct net_device *netdev = oct->netdev;
+ struct octep_vf_ioq_vector *ioq_vector;
+ struct msix_entry *msix_entry;
+ int ret, i;
+
+ /* Request IRQs for Tx/Rx queues */
+ for (i = 0; i < oct->num_oqs; i++) {
+ ioq_vector = oct->ioq_vector[i];
+ msix_entry = &oct->msix_entries[i];
+
+ snprintf(ioq_vector->name, sizeof(ioq_vector->name),
+ "%s-q%d", netdev->name, i);
+ ret = request_irq(msix_entry->vector,
+ octep_vf_ioq_intr_handler, 0,
+ ioq_vector->name, ioq_vector);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for Q-%d; err=%d",
+ i, ret);
+ goto ioq_irq_err;
+ }
+
+ cpumask_set_cpu(i % num_online_cpus(),
+ &ioq_vector->affinity_mask);
+ irq_set_affinity_hint(msix_entry->vector,
+ &ioq_vector->affinity_mask);
+ }
+
+ return 0;
+ioq_irq_err:
+ while (i) {
+ --i;
+ free_irq(oct->msix_entries[i].vector, oct);
+ }
+ return -1;
+}
+
+/**
+ * octep_vf_free_irqs() - free all registered interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free all queue and non-queue interrupts of the Octeon device.
+ */
+static void octep_vf_free_irqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_irqs; i++) {
+ irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
+ free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]);
+ }
+ netdev_info(oct->netdev, "IRQs freed\n");
+}
+
+/**
+ * octep_vf_setup_irqs() - setup interrupts for the Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate data structures to hold per interrupt information, allocate/enable
+ * MSI-x interrupt and register interrupt handlers.
+ *
+ * Return: 0, on successful allocation and registration of all interrupts.
+ * -1, on any error.
+ */
+static int octep_vf_setup_irqs(struct octep_vf_device *oct)
+{
+ if (octep_vf_alloc_ioq_vectors(oct))
+ goto ioq_vector_err;
+
+ if (octep_vf_enable_msix_range(oct))
+ goto enable_msix_err;
+
+ if (octep_vf_request_irqs(oct))
+ goto request_irq_err;
+
+ return 0;
+
+request_irq_err:
+ octep_vf_disable_msix(oct);
+enable_msix_err:
+ octep_vf_free_ioq_vectors(oct);
+ioq_vector_err:
+ return -1;
+}
+
+/**
+ * octep_vf_clean_irqs() - free all interrupts and its resources.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_clean_irqs(struct octep_vf_device *oct)
+{
+ octep_vf_free_irqs(oct);
+ octep_vf_disable_msix(oct);
+ octep_vf_free_ioq_vectors(oct);
+}
+
+/**
+ * octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *oq)
+{
+ u32 pkts_pend = oq->pkts_pending;
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+ if (iq->pkts_processed) {
+ writel(iq->pkts_processed, iq->inst_cnt_reg);
+ iq->pkt_in_done -= iq->pkts_processed;
+ iq->pkts_processed = 0;
+ }
+ if (oq->last_pkt_count - pkts_pend) {
+ writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+ oq->last_pkt_count = pkts_pend;
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+ smp_wmb();
+ writeq(1UL << OCTEP_VF_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_VF_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+}
+
+/**
+ * octep_vf_napi_poll() - NAPI poll function for Tx/Rx.
+ *
+ * @napi: pointer to napi context.
+ * @budget: max number of packets to be processed in single invocation.
+ */
+static int octep_vf_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octep_vf_ioq_vector *ioq_vector =
+ container_of(napi, struct octep_vf_ioq_vector, napi);
+ u32 tx_pending, rx_done;
+
+ tx_pending = octep_vf_iq_process_completions(ioq_vector->iq, 64);
+ rx_done = octep_vf_oq_process_rx(ioq_vector->oq, budget);
+
+ /* need more polling if tx completion processing is still pending or
+ * processed at least 'budget' number of rx packets.
+ */
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
+ if (likely(napi_complete_done(napi, rx_done)))
+ octep_vf_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+
+ return rx_done;
+}
+
+/**
+ * octep_vf_napi_add() - Add NAPI poll for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_add(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
+ netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, octep_vf_napi_poll);
+ oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
+ }
+}
+
+/**
+ * octep_vf_napi_delete() - delete NAPI poll callback for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_delete(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i);
+ netif_napi_del(&oct->ioq_vector[i]->napi);
+ oct->oq[i]->napi = NULL;
+ }
+}
+
+/**
+ * octep_vf_napi_enable() - enable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_enable(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i);
+ napi_enable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+/**
+ * octep_vf_napi_disable() - disable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_disable(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i);
+ napi_disable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+static void octep_vf_link_up(struct net_device *netdev)
+{
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+}
+
+static void octep_vf_set_rx_state(struct octep_vf_device *oct, bool up)
+{
+ int err;
+
+ err = octep_vf_mbox_set_rx_state(oct, up);
+ if (err)
+ netdev_err(oct->netdev, "Set Rx state to %d failed with err:%d\n", up, err);
+}
+
+static int octep_vf_get_link_status(struct octep_vf_device *oct)
+{
+ int err;
+
+ err = octep_vf_mbox_get_link_status(oct, &oct->link_info.oper_up);
+ if (err)
+ netdev_err(oct->netdev, "Get link status failed with err:%d\n", err);
+ return oct->link_info.oper_up;
+}
+
+static void octep_vf_set_link_status(struct octep_vf_device *oct, bool up)
+{
+ int err;
+
+ err = octep_vf_mbox_set_link_status(oct, up);
+ if (err) {
+ netdev_err(oct->netdev, "Set link status to %d failed with err:%d\n", up, err);
+ return;
+ }
+ oct->link_info.oper_up = up;
+}
+
+/**
+ * octep_vf_open() - start the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues
+ * and interrupts..
+ *
+ * Return: 0, on successfully setting up device and bring it up.
+ * -1, on any error.
+ */
+static int octep_vf_open(struct net_device *netdev)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ int err, ret;
+
+ netdev_info(netdev, "Starting netdev ...\n");
+ netif_carrier_off(netdev);
+
+ oct->hw_ops.reset_io_queues(oct);
+
+ if (octep_vf_setup_iqs(oct))
+ goto setup_iq_err;
+ if (octep_vf_setup_oqs(oct))
+ goto setup_oq_err;
+ if (octep_vf_setup_irqs(oct))
+ goto setup_irq_err;
+
+ err = netif_set_real_num_tx_queues(netdev, oct->num_oqs);
+ if (err)
+ goto set_queues_err;
+ err = netif_set_real_num_rx_queues(netdev, oct->num_iqs);
+ if (err)
+ goto set_queues_err;
+
+ octep_vf_napi_add(oct);
+ octep_vf_napi_enable(oct);
+
+ oct->link_info.admin_up = 1;
+ octep_vf_set_rx_state(oct, true);
+
+ ret = octep_vf_get_link_status(oct);
+ if (!ret)
+ octep_vf_set_link_status(oct, true);
+
+ /* Enable the input and output queues for this Octeon device */
+ oct->hw_ops.enable_io_queues(oct);
+
+ /* Enable Octeon device interrupts */
+ oct->hw_ops.enable_interrupts(oct);
+
+ octep_vf_oq_dbell_init(oct);
+
+ ret = octep_vf_get_link_status(oct);
+ if (ret)
+ octep_vf_link_up(netdev);
+
+ return 0;
+
+set_queues_err:
+ octep_vf_napi_disable(oct);
+ octep_vf_napi_delete(oct);
+ octep_vf_clean_irqs(oct);
+setup_irq_err:
+ octep_vf_free_oqs(oct);
+setup_oq_err:
+ octep_vf_free_iqs(oct);
+setup_iq_err:
+ return -1;
+}
+
+/**
+ * octep_vf_stop() - stop the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * stop the device Tx/Rx operations, bring down the link and
+ * free up all resources allocated for Tx/Rx queues and interrupts.
+ */
+static int octep_vf_stop(struct net_device *netdev)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+
+ netdev_info(netdev, "Stopping the device ...\n");
+
+ /* Stop Tx from stack */
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ octep_vf_set_link_status(oct, false);
+ octep_vf_set_rx_state(oct, false);
+
+ oct->link_info.admin_up = 0;
+ oct->link_info.oper_up = 0;
+
+ oct->hw_ops.disable_interrupts(oct);
+ octep_vf_napi_disable(oct);
+ octep_vf_napi_delete(oct);
+
+ octep_vf_clean_irqs(oct);
+ octep_vf_clean_iqs(oct);
+
+ oct->hw_ops.disable_io_queues(oct);
+ oct->hw_ops.reset_io_queues(oct);
+ octep_vf_free_oqs(oct);
+ octep_vf_free_iqs(oct);
+ netdev_info(netdev, "Device stopped !!\n");
+ return 0;
+}
+
+/**
+ * octep_vf_iq_full_check() - check if a Tx queue is full.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Return: 0, if the Tx queue is not full.
+ * 1, if the Tx queue is full.
+ */
+static int octep_vf_iq_full_check(struct octep_vf_iq *iq)
+{
+ int ret;
+
+ ret = netif_subqueue_maybe_stop(iq->netdev, iq->q_no, IQ_INSTR_SPACE(iq),
+ OCTEP_VF_WAKE_QUEUE_THRESHOLD,
+ OCTEP_VF_WAKE_QUEUE_THRESHOLD);
+ switch (ret) {
+ case 0: /* Stopped the queue, since IQ is full */
+ return 1;
+ case -1: /*
+ * Pending updates in write index from
+ * iq_process_completion in other cpus
+ * caused queues to get re-enabled after
+ * being stopped
+ */
+ iq->stats.restart_cnt++;
+ fallthrough;
+ case 1: /* Queue left enabled, since IQ is not yet full*/
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * octep_vf_start_xmit() - Enqueue packet to Octoen hardware Tx Queue.
+ *
+ * @skb: packet skbuff pointer.
+ * @netdev: kernel network device.
+ *
+ * Return: NETDEV_TX_BUSY, if Tx Queue is full.
+ * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue.
+ */
+static netdev_tx_t octep_vf_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ netdev_features_t feat = netdev->features;
+ struct octep_vf_tx_sglist_desc *sglist;
+ struct octep_vf_tx_buffer *tx_buffer;
+ struct octep_vf_tx_desc_hw *hw_desc;
+ struct skb_shared_info *shinfo;
+ struct octep_vf_instr_hdr *ih;
+ struct octep_vf_iq *iq;
+ skb_frag_t *frag;
+ u16 nr_frags, si;
+ int xmit_more;
+ u16 q_no, wi;
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
+ q_no = skb_get_queue_mapping(skb);
+ if (q_no >= oct->num_iqs) {
+ netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no);
+ q_no = q_no % oct->num_iqs;
+ }
+
+ iq = oct->iq[q_no];
+
+ shinfo = skb_shinfo(skb);
+ nr_frags = shinfo->nr_frags;
+
+ wi = iq->host_write_index;
+ hw_desc = &iq->desc_ring[wi];
+ hw_desc->ih64 = 0;
+
+ tx_buffer = iq->buff_info + wi;
+ tx_buffer->skb = skb;
+
+ ih = &hw_desc->ih;
+ ih->tlen = skb->len;
+ ih->pkind = oct->fw_info.pkind;
+ ih->fsz = oct->fw_info.fsz;
+ ih->tlen = skb->len + ih->fsz;
+
+ if (!nr_frags) {
+ tx_buffer->gather = 0;
+ tx_buffer->dma = dma_map_single(iq->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, tx_buffer->dma))
+ goto dma_map_err;
+ hw_desc->dptr = tx_buffer->dma;
+ } else {
+ /* Scatter/Gather */
+ dma_addr_t dma;
+ u16 len;
+
+ sglist = tx_buffer->sglist;
+
+ ih->gsz = nr_frags + 1;
+ ih->gather = 1;
+ tx_buffer->gather = 1;
+
+ len = skb_headlen(skb);
+ dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_err;
+
+ memset(sglist, 0, OCTEP_VF_SGLIST_SIZE_PER_PKT);
+ sglist[0].len[3] = len;
+ sglist[0].dma_ptr[0] = dma;
+
+ si = 1; /* entry 0 is main skb, mapped above */
+ frag = &shinfo->frags[0];
+ while (nr_frags--) {
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(iq->dev, frag, 0,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_sg_err;
+
+ sglist[si >> 2].len[3 - (si & 3)] = len;
+ sglist[si >> 2].dma_ptr[si & 3] = dma;
+
+ frag++;
+ si++;
+ }
+ hw_desc->dptr = tx_buffer->sglist_dma;
+ }
+ if (oct->fw_info.tx_ol_flags) {
+ if ((feat & (NETIF_F_TSO)) && (skb_is_gso(skb))) {
+ hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM;
+ hw_desc->txm.ol_flags |= OCTEP_VF_TX_OFFLOAD_TSO;
+ hw_desc->txm.gso_size = skb_shinfo(skb)->gso_size;
+ hw_desc->txm.gso_segs = skb_shinfo(skb)->gso_segs;
+ } else if (feat & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+ hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM;
+ }
+ /* due to ESR txm will be swapped by hw */
+ hw_desc->txm64[0] = (__force u64)cpu_to_be64(hw_desc->txm64[0]);
+ }
+
+ xmit_more = netdev_xmit_more();
+
+ netdev_tx_sent_queue(iq->netdev_q, skb->len);
+
+ skb_tx_timestamp(skb);
+ iq->fill_cnt++;
+ wi++;
+ iq->host_write_index = wi & iq->ring_size_mask;
+
+ /* octep_iq_full_check stops the queue and returns
+ * true if so, in case the queue has become full
+ * by inserting current packet. If so, we can
+ * go ahead and ring doorbell.
+ */
+ if (!octep_vf_iq_full_check(iq) && xmit_more &&
+ iq->fill_cnt < iq->fill_threshold)
+ return NETDEV_TX_OK;
+
+ goto ring_dbell;
+
+dma_map_sg_err:
+ if (si > 0) {
+ dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
+ sglist[0].len[0], DMA_TO_DEVICE);
+ sglist[0].len[0] = 0;
+ }
+ while (si > 1) {
+ dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
+ sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
+ sglist[si >> 2].len[si & 3] = 0;
+ si--;
+ }
+ tx_buffer->gather = 0;
+dma_map_err:
+ dev_kfree_skb_any(skb);
+ring_dbell:
+ /* Flush the hw descriptors before writing to doorbell */
+ smp_wmb();
+ writel(iq->fill_cnt, iq->doorbell_reg);
+ iq->stats.instr_posted += iq->fill_cnt;
+ iq->fill_cnt = 0;
+ return NETDEV_TX_OK;
+}
+
+int octep_vf_get_if_stats(struct octep_vf_device *oct)
+{
+ struct octep_vf_iface_rxtx_stats vf_stats;
+ int ret, size;
+
+ memset(&vf_stats, 0, sizeof(struct octep_vf_iface_rxtx_stats));
+ ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_STATS,
+ (u8 *)&vf_stats, &size);
+
+ if (ret)
+ return ret;
+
+ memcpy(&oct->iface_rx_stats, &vf_stats.iface_rx_stats,
+ sizeof(struct octep_vf_iface_rx_stats));
+ memcpy(&oct->iface_tx_stats, &vf_stats.iface_tx_stats,
+ sizeof(struct octep_vf_iface_tx_stats));
+
+ return 0;
+}
+
+int octep_vf_get_link_info(struct octep_vf_device *oct)
+{
+ int ret, size;
+
+ ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
+ (u8 *)&oct->link_info, &size);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Get VF link info failed via VF Mbox\n");
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * octep_vf_get_stats64() - Get Octeon network device statistics.
+ *
+ * @netdev: kernel network device.
+ * @stats: pointer to stats structure to be filled in.
+ */
+static void octep_vf_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ int q;
+
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_vf_iq *iq = oct->iq[q];
+ struct octep_vf_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ }
+ stats->tx_packets = tx_packets;
+ stats->tx_bytes = tx_bytes;
+ stats->rx_packets = rx_packets;
+ stats->rx_bytes = rx_bytes;
+ if (!octep_vf_get_if_stats(oct)) {
+ stats->multicast = oct->iface_rx_stats.mcast_pkts;
+ stats->rx_errors = oct->iface_rx_stats.err_pkts;
+ stats->rx_dropped = oct->iface_rx_stats.dropped_pkts_fifo_full +
+ oct->iface_rx_stats.err_pkts;
+ stats->rx_missed_errors = oct->iface_rx_stats.dropped_pkts_fifo_full;
+ stats->tx_dropped = oct->iface_tx_stats.dropped;
+ }
+}
+
+/**
+ * octep_vf_tx_timeout_task - work queue task to Handle Tx queue timeout.
+ *
+ * @work: pointer to Tx queue timeout work_struct
+ *
+ * Stop and start the device so that it frees up all queue resources
+ * and restarts the queues, that potentially clears a Tx queue timeout
+ * condition.
+ **/
+static void octep_vf_tx_timeout_task(struct work_struct *work)
+{
+ struct octep_vf_device *oct = container_of(work, struct octep_vf_device,
+ tx_timeout_task);
+ struct net_device *netdev = oct->netdev;
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ octep_vf_stop(netdev);
+ octep_vf_open(netdev);
+ }
+ rtnl_unlock();
+ netdev_put(netdev, NULL);
+}
+
+/**
+ * octep_vf_tx_timeout() - Handle Tx Queue timeout.
+ *
+ * @netdev: pointer to kernel network device.
+ * @txqueue: Timed out Tx queue number.
+ *
+ * Schedule a work to handle Tx queue timeout.
+ */
+static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+
+ netdev_hold(netdev, NULL, GFP_ATOMIC);
+ schedule_work(&oct->tx_timeout_task);
+}
+
+static int octep_vf_set_mac(struct net_device *netdev, void *p)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct sockaddr *addr = (struct sockaddr *)p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = octep_vf_mbox_set_mac_addr(oct, addr->sa_data);
+ if (err)
+ return err;
+
+ memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ return 0;
+}
+
+static int octep_vf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct octep_vf_iface_link_info *link_info;
+ int err;
+
+ link_info = &oct->link_info;
+ if (link_info->mtu == new_mtu)
+ return 0;
+
+ err = octep_vf_mbox_set_mtu(oct, new_mtu);
+ if (!err) {
+ oct->link_info.mtu = new_mtu;
+ netdev->mtu = new_mtu;
+ }
+ return err;
+}
+
+static int octep_vf_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u16 rx_offloads = 0, tx_offloads = 0;
+ int err;
+
+ /* We only support features received from firmware */
+ if ((features & netdev->hw_features) != features)
+ return -EINVAL;
+
+ if (features & NETIF_F_TSO)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO;
+
+ if (features & NETIF_F_TSO6)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO;
+
+ if (features & NETIF_F_IP_CSUM)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM;
+
+ if (features & NETIF_F_IPV6_CSUM)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM;
+
+ if (features & NETIF_F_RXCSUM)
+ rx_offloads |= OCTEP_VF_RX_OFFLOAD_CKSUM;
+
+ err = octep_vf_mbox_set_offloads(oct, tx_offloads, rx_offloads);
+ if (!err)
+ netdev->features = features;
+
+ return err;
+}
+
+static const struct net_device_ops octep_vf_netdev_ops = {
+ .ndo_open = octep_vf_open,
+ .ndo_stop = octep_vf_stop,
+ .ndo_start_xmit = octep_vf_start_xmit,
+ .ndo_get_stats64 = octep_vf_get_stats64,
+ .ndo_tx_timeout = octep_vf_tx_timeout,
+ .ndo_set_mac_address = octep_vf_set_mac,
+ .ndo_change_mtu = octep_vf_change_mtu,
+ .ndo_set_features = octep_vf_set_features,
+};
+
+static const char *octep_vf_devid_to_str(struct octep_vf_device *oct)
+{
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_VF:
+ return "CN93XX";
+ case OCTEP_PCI_DEVICE_ID_CNF95N_VF:
+ return "CNF95N";
+ case OCTEP_PCI_DEVICE_ID_CN10KA_VF:
+ return "CN10KA";
+ case OCTEP_PCI_DEVICE_ID_CNF10KA_VF:
+ return "CNF10KA";
+ case OCTEP_PCI_DEVICE_ID_CNF10KB_VF:
+ return "CNF10KB";
+ case OCTEP_PCI_DEVICE_ID_CN10KB_VF:
+ return "CN10KB";
+ default:
+ return "Unsupported";
+ }
+}
+
+/**
+ * octep_vf_device_setup() - Setup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Setup Octeon device hardware operations, configuration, etc ...
+ */
+int octep_vf_device_setup(struct octep_vf_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+
+ /* allocate memory for oct->conf */
+ oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL);
+ if (!oct->conf)
+ return -ENOMEM;
+
+ /* Map BAR region 0 */
+ oct->mmio.hw_addr = ioremap(pci_resource_start(oct->pdev, 0),
+ pci_resource_len(oct->pdev, 0));
+ if (!oct->mmio.hw_addr) {
+ dev_err(&pdev->dev,
+ "Failed to remap BAR0; start=0x%llx len=0x%llx\n",
+ pci_resource_start(oct->pdev, 0),
+ pci_resource_len(oct->pdev, 0));
+ goto ioremap_err;
+ }
+ oct->mmio.mapped = 1;
+
+ oct->chip_id = pdev->device;
+ oct->rev_id = pdev->revision;
+ dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device);
+
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_VF:
+ case OCTEP_PCI_DEVICE_ID_CNF95N_VF:
+ case OCTEP_PCI_DEVICE_ID_CN98_VF:
+ dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n",
+ octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct),
+ OCTEP_VF_MINOR_REV(oct));
+ octep_vf_device_setup_cn93(oct);
+ break;
+ case OCTEP_PCI_DEVICE_ID_CNF10KA_VF:
+ case OCTEP_PCI_DEVICE_ID_CN10KA_VF:
+ case OCTEP_PCI_DEVICE_ID_CNF10KB_VF:
+ case OCTEP_PCI_DEVICE_ID_CN10KB_VF:
+ dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n",
+ octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct),
+ OCTEP_VF_MINOR_REV(oct));
+ octep_vf_device_setup_cnxk(oct);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported device\n");
+ goto unsupported_dev;
+ }
+
+ return 0;
+
+unsupported_dev:
+ iounmap(oct->mmio.hw_addr);
+ioremap_err:
+ kfree(oct->conf);
+ return -EOPNOTSUPP;
+}
+
+/**
+ * octep_vf_device_cleanup() - Cleanup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Cleanup Octeon device allocated resources.
+ */
+static void octep_vf_device_cleanup(struct octep_vf_device *oct)
+{
+ dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
+
+ if (oct->mmio.mapped)
+ iounmap(oct->mmio.hw_addr);
+
+ kfree(oct->conf);
+ oct->conf = NULL;
+}
+
+static int octep_vf_get_mac_addr(struct octep_vf_device *oct, u8 *addr)
+{
+ return octep_vf_mbox_get_mac_addr(oct, addr);
+}
+
+/**
+ * octep_vf_probe() - Octeon PCI device probe handler.
+ *
+ * @pdev: PCI device structure.
+ * @ent: entry in Octeon PCI device ID table.
+ *
+ * Initializes and enables the Octeon PCI device for network operations.
+ * Initializes Octeon private data structure and registers a network device.
+ */
+static int octep_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct octep_vf_device *octep_vf_dev;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask !!\n");
+ goto disable_pci_device;
+ }
+
+ err = pci_request_mem_regions(pdev, OCTEP_VF_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map PCI memory regions\n");
+ goto disable_pci_device;
+ }
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mq(sizeof(struct octep_vf_device),
+ OCTEP_VF_MAX_QUEUES);
+ if (!netdev) {
+ dev_err(&pdev->dev, "Failed to allocate netdev\n");
+ err = -ENOMEM;
+ goto mem_regions_release;
+ }
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ octep_vf_dev = netdev_priv(netdev);
+ octep_vf_dev->netdev = netdev;
+ octep_vf_dev->pdev = pdev;
+ octep_vf_dev->dev = &pdev->dev;
+ pci_set_drvdata(pdev, octep_vf_dev);
+
+ err = octep_vf_device_setup(octep_vf_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Device setup failed\n");
+ goto netdevice_free;
+ }
+ INIT_WORK(&octep_vf_dev->tx_timeout_task, octep_vf_tx_timeout_task);
+
+ netdev->netdev_ops = &octep_vf_netdev_ops;
+ octep_vf_set_ethtool_ops(netdev);
+ netif_carrier_off(netdev);
+
+ if (octep_vf_setup_mbox(octep_vf_dev)) {
+ dev_err(&pdev->dev, "VF Mailbox setup failed\n");
+ err = -ENOMEM;
+ goto device_cleanup;
+ }
+
+ if (octep_vf_mbox_version_check(octep_vf_dev)) {
+ dev_err(&pdev->dev, "PF VF Mailbox version mismatch\n");
+ err = -EINVAL;
+ goto delete_mbox;
+ }
+
+ if (octep_vf_mbox_get_fw_info(octep_vf_dev)) {
+ dev_err(&pdev->dev, "unable to get fw info\n");
+ err = -EINVAL;
+ goto delete_mbox;
+ }
+
+ netdev->hw_features = NETIF_F_SG;
+ if (OCTEP_VF_TX_IP_CSUM(octep_vf_dev->fw_info.tx_ol_flags))
+ netdev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+
+ if (OCTEP_VF_RX_IP_CSUM(octep_vf_dev->fw_info.rx_ol_flags))
+ netdev->hw_features |= NETIF_F_RXCSUM;
+
+ netdev->min_mtu = OCTEP_VF_MIN_MTU;
+ netdev->max_mtu = OCTEP_VF_MAX_MTU;
+ netdev->mtu = OCTEP_VF_DEFAULT_MTU;
+
+ if (OCTEP_VF_TX_TSO(octep_vf_dev->fw_info.tx_ol_flags)) {
+ netdev->hw_features |= NETIF_F_TSO;
+ netif_set_tso_max_size(netdev, netdev->max_mtu);
+ }
+
+ netdev->features |= netdev->hw_features;
+ octep_vf_get_mac_addr(octep_vf_dev, octep_vf_dev->mac_addr);
+ eth_hw_addr_set(netdev, octep_vf_dev->mac_addr);
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto delete_mbox;
+ }
+ dev_info(&pdev->dev, "Device probe successful\n");
+ return 0;
+
+delete_mbox:
+ octep_vf_delete_mbox(octep_vf_dev);
+device_cleanup:
+ octep_vf_device_cleanup(octep_vf_dev);
+netdevice_free:
+ free_netdev(netdev);
+mem_regions_release:
+ pci_release_mem_regions(pdev);
+disable_pci_device:
+ pci_disable_device(pdev);
+ dev_err(&pdev->dev, "Device probe failed\n");
+ return err;
+}
+
+/**
+ * octep_vf_remove() - Remove Octeon PCI device from driver control.
+ *
+ * @pdev: PCI device structure of the Octeon device.
+ *
+ * Cleanup all resources allocated for the Octeon device.
+ * Unregister from network device and disable the PCI device.
+ */
+static void octep_vf_remove(struct pci_dev *pdev)
+{
+ struct octep_vf_device *oct = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+
+ if (!oct)
+ return;
+
+ octep_vf_mbox_dev_remove(oct);
+ cancel_work_sync(&oct->tx_timeout_task);
+ netdev = oct->netdev;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
+ octep_vf_delete_mbox(oct);
+ octep_vf_device_cleanup(oct);
+ pci_release_mem_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver octep_vf_driver = {
+ .name = OCTEP_VF_DRV_NAME,
+ .id_table = octep_vf_pci_id_tbl,
+ .probe = octep_vf_probe,
+ .remove = octep_vf_remove,
+};
+
+/**
+ * octep_vf_init_module() - Module initialization.
+ *
+ * create common resource for the driver and register PCI driver.
+ */
+static int __init octep_vf_init_module(void)
+{
+ int ret;
+
+ pr_info("%s: Loading %s ...\n", OCTEP_VF_DRV_NAME, OCTEP_VF_DRV_STRING);
+
+ ret = pci_register_driver(&octep_vf_driver);
+ if (ret < 0) {
+ pr_err("%s: Failed to register PCI driver; err=%d\n",
+ OCTEP_VF_DRV_NAME, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * octep_vf_exit_module() - Module exit routine.
+ *
+ * unregister the driver with PCI subsystem and cleanup common resources.
+ */
+static void __exit octep_vf_exit_module(void)
+{
+ pr_info("%s: Unloading ...\n", OCTEP_VF_DRV_NAME);
+
+ pci_unregister_driver(&octep_vf_driver);
+
+ pr_info("%s: Unloading complete\n", OCTEP_VF_DRV_NAME);
+}
+
+module_init(octep_vf_init_module);
+module_exit(octep_vf_exit_module);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
new file mode 100644
index 0000000000..5769f62545
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_MAIN_H_
+#define _OCTEP_VF_MAIN_H_
+
+#include "octep_vf_tx.h"
+#include "octep_vf_rx.h"
+#include "octep_vf_mbox.h"
+
+#define OCTEP_VF_DRV_NAME "octeon_ep_vf"
+#define OCTEP_VF_DRV_STRING "Marvell Octeon EndPoint NIC VF Driver"
+
+#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203 //93xx VF
+#define OCTEP_PCI_DEVICE_ID_CNF95N_VF 0xB403 //95N VF
+#define OCTEP_PCI_DEVICE_ID_CN98_VF 0xB103
+#define OCTEP_PCI_DEVICE_ID_CN10KA_VF 0xB903
+#define OCTEP_PCI_DEVICE_ID_CNF10KA_VF 0xBA03
+#define OCTEP_PCI_DEVICE_ID_CNF10KB_VF 0xBC03
+#define OCTEP_PCI_DEVICE_ID_CN10KB_VF 0xBD03
+
+#define OCTEP_VF_MAX_QUEUES 63
+#define OCTEP_VF_MAX_IQ OCTEP_VF_MAX_QUEUES
+#define OCTEP_VF_MAX_OQ OCTEP_VF_MAX_QUEUES
+
+#define OCTEP_VF_MAX_MSIX_VECTORS OCTEP_VF_MAX_OQ
+
+#define OCTEP_VF_IQ_INTR_RESEND_BIT 59
+#define OCTEP_VF_OQ_INTR_RESEND_BIT 59
+
+#define IQ_INSTR_PENDING(iq) ({ typeof(iq) iq__ = (iq); \
+ ((iq__)->host_write_index - (iq__)->flush_index) & \
+ (iq__)->ring_size_mask; \
+ })
+#define IQ_INSTR_SPACE(iq) ({ typeof(iq) iq_ = (iq); \
+ (iq_)->max_count - IQ_INSTR_PENDING(iq_); \
+ })
+
+/* PCI address space mapping information.
+ * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
+ * Octeon gets mapped to different physical address spaces in
+ * the kernel.
+ */
+struct octep_vf_mmio {
+ /* The physical address to which the PCI address space is mapped. */
+ u8 __iomem *hw_addr;
+
+ /* Flag indicating the mapping was successful. */
+ int mapped;
+};
+
+struct octep_vf_hw_ops {
+ void (*setup_iq_regs)(struct octep_vf_device *oct, int q);
+ void (*setup_oq_regs)(struct octep_vf_device *oct, int q);
+ void (*setup_mbox_regs)(struct octep_vf_device *oct, int mbox);
+
+ irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ioq_intr_handler)(void *ioq_vector);
+ void (*reinit_regs)(struct octep_vf_device *oct);
+ u32 (*update_iq_read_idx)(struct octep_vf_iq *iq);
+
+ void (*enable_interrupts)(struct octep_vf_device *oct);
+ void (*disable_interrupts)(struct octep_vf_device *oct);
+
+ void (*enable_io_queues)(struct octep_vf_device *oct);
+ void (*disable_io_queues)(struct octep_vf_device *oct);
+ void (*enable_iq)(struct octep_vf_device *oct, int q);
+ void (*disable_iq)(struct octep_vf_device *oct, int q);
+ void (*enable_oq)(struct octep_vf_device *oct, int q);
+ void (*disable_oq)(struct octep_vf_device *oct, int q);
+ void (*reset_io_queues)(struct octep_vf_device *oct);
+ void (*dump_registers)(struct octep_vf_device *oct);
+};
+
+/* Octeon mailbox data */
+struct octep_vf_mbox_data {
+ /* Holds the offset of received data via mailbox. */
+ u32 data_index;
+
+ /* Holds the received data via mailbox. */
+ u8 recv_data[OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE];
+};
+
+/* wrappers around work structs */
+struct octep_vf_mbox_wk {
+ struct work_struct work;
+ void *ctxptr;
+};
+
+/* Octeon device mailbox */
+struct octep_vf_mbox {
+ /* A mutex to protect access to this q_mbox. */
+ struct mutex lock;
+
+ u32 state;
+
+ /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
+ u8 __iomem *mbox_int_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
+ */
+ u8 __iomem *mbox_write_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
+ */
+ u8 __iomem *mbox_read_reg;
+
+ /* Octeon mailbox data */
+ struct octep_vf_mbox_data mbox_data;
+
+ /* Octeon mailbox work handler to process Mbox messages */
+ struct octep_vf_mbox_wk wk;
+};
+
+/* Tx/Rx queue vector per interrupt. */
+struct octep_vf_ioq_vector {
+ char name[OCTEP_VF_MSIX_NAME_SIZE];
+ struct napi_struct napi;
+ struct octep_vf_device *octep_vf_dev;
+ struct octep_vf_iq *iq;
+ struct octep_vf_oq *oq;
+ cpumask_t affinity_mask;
+};
+
+/* Octeon hardware/firmware offload capability flags. */
+#define OCTEP_VF_CAP_TX_CHECKSUM BIT(0)
+#define OCTEP_VF_CAP_RX_CHECKSUM BIT(1)
+#define OCTEP_VF_CAP_TSO BIT(2)
+
+/* Link modes */
+enum octep_vf_link_mode_bit_indices {
+ OCTEP_VF_LINK_MODE_10GBASE_T = 0,
+ OCTEP_VF_LINK_MODE_10GBASE_R,
+ OCTEP_VF_LINK_MODE_10GBASE_CR,
+ OCTEP_VF_LINK_MODE_10GBASE_KR,
+ OCTEP_VF_LINK_MODE_10GBASE_LR,
+ OCTEP_VF_LINK_MODE_10GBASE_SR,
+ OCTEP_VF_LINK_MODE_25GBASE_CR,
+ OCTEP_VF_LINK_MODE_25GBASE_KR,
+ OCTEP_VF_LINK_MODE_25GBASE_SR,
+ OCTEP_VF_LINK_MODE_40GBASE_CR4,
+ OCTEP_VF_LINK_MODE_40GBASE_KR4,
+ OCTEP_VF_LINK_MODE_40GBASE_LR4,
+ OCTEP_VF_LINK_MODE_40GBASE_SR4,
+ OCTEP_VF_LINK_MODE_50GBASE_CR2,
+ OCTEP_VF_LINK_MODE_50GBASE_KR2,
+ OCTEP_VF_LINK_MODE_50GBASE_SR2,
+ OCTEP_VF_LINK_MODE_50GBASE_CR,
+ OCTEP_VF_LINK_MODE_50GBASE_KR,
+ OCTEP_VF_LINK_MODE_50GBASE_LR,
+ OCTEP_VF_LINK_MODE_50GBASE_SR,
+ OCTEP_VF_LINK_MODE_100GBASE_CR4,
+ OCTEP_VF_LINK_MODE_100GBASE_KR4,
+ OCTEP_VF_LINK_MODE_100GBASE_LR4,
+ OCTEP_VF_LINK_MODE_100GBASE_SR4,
+ OCTEP_VF_LINK_MODE_NBITS
+};
+
+/* Hardware interface link state information. */
+struct octep_vf_iface_link_info {
+ /* Bitmap of Supported link speeds/modes. */
+ u64 supported_modes;
+
+ /* Bitmap of Advertised link speeds/modes. */
+ u64 advertised_modes;
+
+ /* Negotiated link speed in Mbps. */
+ u32 speed;
+
+ /* MTU */
+ u16 mtu;
+
+ /* Autonegotiation state. */
+#define OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
+#define OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
+ u8 autoneg;
+
+ /* Pause frames setting. */
+#define OCTEP_VF_LINK_MODE_PAUSE_SUPPORTED BIT(0)
+#define OCTEP_VF_LINK_MODE_PAUSE_ADVERTISED BIT(1)
+ u8 pause;
+
+ /* Admin state of the link (ifconfig <iface> up/down */
+ u8 admin_up;
+
+ /* Operational state of the link: physical link is up down */
+ u8 oper_up;
+};
+
+/* Hardware interface stats information. */
+struct octep_vf_iface_rxtx_stats {
+ /* Hardware Interface Rx statistics */
+ struct octep_vf_iface_rx_stats iface_rx_stats;
+
+ /* Hardware Interface Tx statistics */
+ struct octep_vf_iface_tx_stats iface_tx_stats;
+};
+
+struct octep_vf_fw_info {
+ /* pkind value to be used in every Tx hardware descriptor */
+ u8 pkind;
+ /* front size data */
+ u8 fsz;
+ /* supported rx offloads OCTEP_VF_RX_OFFLOAD_* */
+ u16 rx_ol_flags;
+ /* supported tx offloads OCTEP_VF_TX_OFFLOAD_* */
+ u16 tx_ol_flags;
+};
+
+/* The Octeon device specific private data structure.
+ * Each Octeon device has this structure to represent all its components.
+ */
+struct octep_vf_device {
+ struct octep_vf_config *conf;
+
+ /* Octeon Chip type. */
+ u16 chip_id;
+ u16 rev_id;
+
+ /* Device capabilities enabled */
+ u64 caps_enabled;
+ /* Device capabilities supported */
+ u64 caps_supported;
+
+ /* Pointer to basic Linux device */
+ struct device *dev;
+ /* Linux PCI device pointer */
+ struct pci_dev *pdev;
+ /* Netdev corresponding to the Octeon device */
+ struct net_device *netdev;
+
+ /* memory mapped io range */
+ struct octep_vf_mmio mmio;
+
+ /* MAC address */
+ u8 mac_addr[ETH_ALEN];
+
+ /* Tx queues (IQ: Instruction Queue) */
+ u16 num_iqs;
+ /* Pointers to Octeon Tx queues */
+ struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ];
+
+ /* Rx queues (OQ: Output Queue) */
+ u16 num_oqs;
+ /* Pointers to Octeon Rx queues */
+ struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ];
+
+ /* Hardware port number of the PCIe interface */
+ u16 pcie_port;
+
+ /* Hardware operations */
+ struct octep_vf_hw_ops hw_ops;
+
+ /* IRQ info */
+ u16 num_irqs;
+ u16 num_non_ioq_irqs;
+ char *non_ioq_irq_names;
+ struct msix_entry *msix_entries;
+ /* IOq information of it's corresponding MSI-X interrupt. */
+ struct octep_vf_ioq_vector *ioq_vector[OCTEP_VF_MAX_QUEUES];
+
+ /* Hardware Interface Tx statistics */
+ struct octep_vf_iface_tx_stats iface_tx_stats;
+ /* Hardware Interface Rx statistics */
+ struct octep_vf_iface_rx_stats iface_rx_stats;
+
+ /* Hardware Interface Link info like supported modes, aneg support */
+ struct octep_vf_iface_link_info link_info;
+
+ /* Mailbox to talk to VFs */
+ struct octep_vf_mbox *mbox;
+
+ /* Work entry to handle Tx timeout */
+ struct work_struct tx_timeout_task;
+
+ /* offset for iface stats */
+ u32 ctrl_mbox_ifstats_offset;
+
+ /* Negotiated Mbox version */
+ u32 mbox_neg_ver;
+
+ /* firmware info */
+ struct octep_vf_fw_info fw_info;
+};
+
+static inline u16 OCTEP_VF_MAJOR_REV(struct octep_vf_device *oct)
+{
+ u16 rev = (oct->rev_id & 0xC) >> 2;
+
+ return (rev == 0) ? 1 : rev;
+}
+
+static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct)
+{
+ return (oct->rev_id & 0x3);
+}
+
+/* Octeon CSR read/write access APIs */
+#define octep_vf_write_csr(octep_vf_dev, reg_off, value) \
+ writel(value, (octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+#define octep_vf_write_csr64(octep_vf_dev, reg_off, val64) \
+ writeq(val64, (octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+#define octep_vf_read_csr(octep_vf_dev, reg_off) \
+ readl((octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+#define octep_vf_read_csr64(octep_vf_dev, reg_off) \
+ readq((octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+extern struct workqueue_struct *octep_vf_wq;
+
+int octep_vf_device_setup(struct octep_vf_device *oct);
+int octep_vf_setup_iqs(struct octep_vf_device *oct);
+void octep_vf_free_iqs(struct octep_vf_device *oct);
+void octep_vf_clean_iqs(struct octep_vf_device *oct);
+int octep_vf_setup_oqs(struct octep_vf_device *oct);
+void octep_vf_free_oqs(struct octep_vf_device *oct);
+void octep_vf_oq_dbell_init(struct octep_vf_device *oct);
+void octep_vf_device_setup_cn93(struct octep_vf_device *oct);
+void octep_vf_device_setup_cnxk(struct octep_vf_device *oct);
+int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget);
+int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget);
+void octep_vf_set_ethtool_ops(struct net_device *netdev);
+int octep_vf_get_link_info(struct octep_vf_device *oct);
+int octep_vf_get_if_stats(struct octep_vf_device *oct);
+void octep_vf_mbox_work(struct work_struct *work);
+#endif /* _OCTEP_VF_MAIN_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
new file mode 100644
index 0000000000..2eab21e430
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+/* When a new command is implemented, the below table should be updated
+ * with new command and it's version info.
+ */
+static u32 pfvf_cmd_versions[OCTEP_PFVF_MBOX_CMD_MAX] = {
+ [0 ... OCTEP_PFVF_MBOX_CMD_DEV_REMOVE] = OCTEP_PFVF_MBOX_VERSION_V1,
+ [OCTEP_PFVF_MBOX_CMD_GET_FW_INFO ... OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS] =
+ OCTEP_PFVF_MBOX_VERSION_V2
+};
+
+int octep_vf_setup_mbox(struct octep_vf_device *oct)
+{
+ int ring = 0;
+
+ oct->mbox = vzalloc(sizeof(*oct->mbox));
+ if (!oct->mbox)
+ return -1;
+
+ mutex_init(&oct->mbox->lock);
+
+ oct->hw_ops.setup_mbox_regs(oct, ring);
+ INIT_WORK(&oct->mbox->wk.work, octep_vf_mbox_work);
+ oct->mbox->wk.ctxptr = oct;
+ oct->mbox_neg_ver = OCTEP_PFVF_MBOX_VERSION_CURRENT;
+ dev_info(&oct->pdev->dev, "setup vf mbox successfully\n");
+ return 0;
+}
+
+void octep_vf_delete_mbox(struct octep_vf_device *oct)
+{
+ if (oct->mbox) {
+ if (work_pending(&oct->mbox->wk.work))
+ cancel_work_sync(&oct->mbox->wk.work);
+
+ mutex_destroy(&oct->mbox->lock);
+ vfree(oct->mbox);
+ oct->mbox = NULL;
+ dev_info(&oct->pdev->dev, "Deleted vf mbox successfully\n");
+ }
+}
+
+int octep_vf_mbox_version_check(struct octep_vf_device *oct)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_version.opcode = OCTEP_PFVF_MBOX_CMD_VERSION;
+ cmd.s_version.version = OCTEP_PFVF_MBOX_VERSION_CURRENT;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret == OCTEP_PFVF_MBOX_CMD_STATUS_NACK) {
+ dev_err(&oct->pdev->dev,
+ "VF Mbox version is incompatible with PF\n");
+ return -EINVAL;
+ }
+ oct->mbox_neg_ver = (u32)rsp.s_version.version;
+ dev_dbg(&oct->pdev->dev,
+ "VF Mbox version:%u Negotiated VF version with PF:%u\n",
+ (u32)cmd.s_version.version,
+ (u32)rsp.s_version.version);
+ return 0;
+}
+
+void octep_vf_mbox_work(struct work_struct *work)
+{
+ struct octep_vf_mbox_wk *wk = container_of(work, struct octep_vf_mbox_wk, work);
+ struct octep_vf_iface_link_info *link_info;
+ struct octep_vf_device *oct = NULL;
+ struct octep_vf_mbox *mbox = NULL;
+ union octep_pfvf_mbox_word *notif;
+ u64 pf_vf_data;
+
+ oct = (struct octep_vf_device *)wk->ctxptr;
+ link_info = &oct->link_info;
+ mbox = oct->mbox;
+ pf_vf_data = readq(mbox->mbox_read_reg);
+
+ notif = (union octep_pfvf_mbox_word *)&pf_vf_data;
+
+ switch (notif->s.opcode) {
+ case OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS:
+ if (notif->s_link_status.status) {
+ link_info->oper_up = OCTEP_PFVF_LINK_STATUS_UP;
+ netif_carrier_on(oct->netdev);
+ dev_info(&oct->pdev->dev, "netif_carrier_on\n");
+ } else {
+ link_info->oper_up = OCTEP_PFVF_LINK_STATUS_DOWN;
+ netif_carrier_off(oct->netdev);
+ dev_info(&oct->pdev->dev, "netif_carrier_off\n");
+ }
+ break;
+ default:
+ dev_err(&oct->pdev->dev,
+ "Received unsupported notif %d\n", notif->s.opcode);
+ break;
+ }
+}
+
+static int __octep_vf_mbox_send_cmd(struct octep_vf_device *oct,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+ u64 reg_val = 0ull;
+ int count;
+
+ if (!mbox)
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
+
+ cmd.s.type = OCTEP_PFVF_MBOX_TYPE_CMD;
+ writeq(cmd.u64, mbox->mbox_write_reg);
+
+ /* No response for notification messages */
+ if (!rsp)
+ return 0;
+
+ for (count = 0; count < OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT; count++) {
+ usleep_range(1000, 1500);
+ reg_val = readq(mbox->mbox_write_reg);
+ if (reg_val != cmd.u64) {
+ rsp->u64 = reg_val;
+ break;
+ }
+ }
+ if (count == OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT) {
+ dev_err(&oct->pdev->dev, "mbox send command timed out\n");
+ return OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT;
+ }
+ if (rsp->s.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "mbox_send: Received NACK\n");
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NACK;
+ }
+ rsp->u64 = reg_val;
+ return 0;
+}
+
+int octep_vf_mbox_send_cmd(struct octep_vf_device *oct, union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+ int ret;
+
+ if (!mbox)
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
+ mutex_lock(&mbox->lock);
+ if (pfvf_cmd_versions[cmd.s.opcode] > oct->mbox_neg_ver) {
+ dev_dbg(&oct->pdev->dev, "CMD:%d not supported in Version:%d\n",
+ cmd.s.opcode, oct->mbox_neg_ver);
+ mutex_unlock(&mbox->lock);
+ return -EOPNOTSUPP;
+ }
+ ret = __octep_vf_mbox_send_cmd(oct, cmd, rsp);
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+int octep_vf_mbox_bulk_read(struct octep_vf_device *oct, enum octep_pfvf_mbox_opcode opcode,
+ u8 *data, int *size)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int data_len = 0, tmp_len = 0;
+ int read_cnt, i = 0, ret;
+
+ if (!mbox)
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
+
+ mutex_lock(&mbox->lock);
+ cmd.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 0;
+ /* Send cmd to read data from PF */
+ ret = __octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "send mbox cmd fail for data request\n");
+ mutex_unlock(&mbox->lock);
+ return ret;
+ }
+ /* PF sends the data length of requested CMD
+ * in ACK
+ */
+ data_len = *((int32_t *)rsp.s_data.data);
+ tmp_len = data_len;
+ cmd.u64 = 0;
+ rsp.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 1;
+ while (data_len) {
+ ret = __octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "send mbox cmd fail for data request\n");
+ mutex_unlock(&mbox->lock);
+ mbox->mbox_data.data_index = 0;
+ memset(mbox->mbox_data.recv_data, 0, OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE);
+ return ret;
+ }
+ if (data_len > OCTEP_PFVF_MBOX_MAX_DATA_SIZE) {
+ data_len -= OCTEP_PFVF_MBOX_MAX_DATA_SIZE;
+ read_cnt = OCTEP_PFVF_MBOX_MAX_DATA_SIZE;
+ } else {
+ read_cnt = data_len;
+ data_len = 0;
+ }
+ for (i = 0; i < read_cnt; i++) {
+ mbox->mbox_data.recv_data[mbox->mbox_data.data_index] =
+ rsp.s_data.data[i];
+ mbox->mbox_data.data_index++;
+ }
+ cmd.u64 = 0;
+ rsp.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 1;
+ }
+ memcpy(data, mbox->mbox_data.recv_data, tmp_len);
+ *size = tmp_len;
+ mbox->mbox_data.data_index = 0;
+ memset(mbox->mbox_data.recv_data, 0, OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE);
+ mutex_unlock(&mbox->lock);
+ return 0;
+}
+
+int octep_vf_mbox_set_mtu(struct octep_vf_device *oct, int mtu)
+{
+ int frame_size = mtu + ETH_HLEN + ETH_FCS_LEN;
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret = 0;
+
+ if (mtu < ETH_MIN_MTU || frame_size > ETH_MAX_MTU) {
+ dev_err(&oct->pdev->dev,
+ "Failed to set MTU to %d MIN MTU:%d MAX MTU:%d\n",
+ mtu, ETH_MIN_MTU, ETH_MAX_MTU);
+ return -EINVAL;
+ }
+
+ cmd.u64 = 0;
+ cmd.s_set_mtu.opcode = OCTEP_PFVF_MBOX_CMD_SET_MTU;
+ cmd.s_set_mtu.mtu = mtu;
+
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Mbox send failed; err=%d\n", ret);
+ return ret;
+ }
+ if (rsp.s_set_mtu.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Received Mbox NACK from PF for MTU:%d\n", mtu);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int octep_vf_mbox_set_mac_addr(struct octep_vf_device *oct, char *mac_addr)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int i, ret;
+
+ cmd.u64 = 0;
+ cmd.s_set_mac.opcode = OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR;
+ for (i = 0; i < ETH_ALEN; i++)
+ cmd.s_set_mac.mac_addr[i] = mac_addr[i];
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Mbox send failed; err = %d\n", ret);
+ return ret;
+ }
+ if (rsp.s_set_mac.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int octep_vf_mbox_get_mac_addr(struct octep_vf_device *oct, char *mac_addr)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int i, ret;
+
+ cmd.u64 = 0;
+ cmd.s_set_mac.opcode = OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "get_mac: mbox send failed; err = %d\n", ret);
+ return ret;
+ }
+ if (rsp.s_set_mac.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "get_mac: received NACK\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = rsp.s_set_mac.mac_addr[i];
+ return 0;
+}
+
+int octep_vf_mbox_set_rx_state(struct octep_vf_device *oct, bool state)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_state.opcode = OCTEP_PFVF_MBOX_CMD_SET_RX_STATE;
+ cmd.s_link_state.state = state;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Set Rx state via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_state.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Set Rx state received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int octep_vf_mbox_set_link_status(struct octep_vf_device *oct, bool status)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_status.opcode = OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS;
+ cmd.s_link_status.status = status;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Set link status via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_status.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Set link status received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int octep_vf_mbox_get_link_status(struct octep_vf_device *oct, u8 *oper_up)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_status.opcode = OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Get link status via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_status.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Get link status received NACK\n");
+ return -EINVAL;
+ }
+ *oper_up = rsp.s_link_status.status;
+ return 0;
+}
+
+int octep_vf_mbox_dev_remove(struct octep_vf_device *oct)
+{
+ union octep_pfvf_mbox_word cmd;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s.opcode = OCTEP_PFVF_MBOX_CMD_DEV_REMOVE;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, NULL);
+ return ret;
+}
+
+int octep_vf_mbox_get_fw_info(struct octep_vf_device *oct)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_fw_info.opcode = OCTEP_PFVF_MBOX_CMD_GET_FW_INFO;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Get link status via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_fw_info.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Get link status received NACK\n");
+ return -EINVAL;
+ }
+ oct->fw_info.pkind = rsp.s_fw_info.pkind;
+ oct->fw_info.fsz = rsp.s_fw_info.fsz;
+ oct->fw_info.rx_ol_flags = rsp.s_fw_info.rx_ol_flags;
+ oct->fw_info.tx_ol_flags = rsp.s_fw_info.tx_ol_flags;
+
+ return 0;
+}
+
+int octep_vf_mbox_set_offloads(struct octep_vf_device *oct, u16 tx_offloads,
+ u16 rx_offloads)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_offloads.opcode = OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS;
+ cmd.s_offloads.rx_ol_flags = rx_offloads;
+ cmd.s_offloads.tx_ol_flags = tx_offloads;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Set offloads via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_state.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Set offloads received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h
new file mode 100644
index 0000000000..9b5efad37e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef _OCTEP_VF_MBOX_H_
+#define _OCTEP_VF_MBOX_H_
+
+/* When a new command is implemented, VF Mbox version should be bumped.
+ */
+enum octep_pfvf_mbox_version {
+ OCTEP_PFVF_MBOX_VERSION_V0,
+ OCTEP_PFVF_MBOX_VERSION_V1,
+ OCTEP_PFVF_MBOX_VERSION_V2
+};
+
+#define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2
+
+enum octep_pfvf_mbox_opcode {
+ OCTEP_PFVF_MBOX_CMD_VERSION,
+ OCTEP_PFVF_MBOX_CMD_SET_MTU,
+ OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR,
+ OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR,
+ OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
+ OCTEP_PFVF_MBOX_CMD_GET_STATS,
+ OCTEP_PFVF_MBOX_CMD_SET_RX_STATE,
+ OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_GET_MTU,
+ OCTEP_PFVF_MBOX_CMD_DEV_REMOVE,
+ OCTEP_PFVF_MBOX_CMD_GET_FW_INFO,
+ OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS,
+ OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_MAX,
+};
+
+enum octep_pfvf_mbox_word_type {
+ OCTEP_PFVF_MBOX_TYPE_CMD,
+ OCTEP_PFVF_MBOX_TYPE_RSP_ACK,
+ OCTEP_PFVF_MBOX_TYPE_RSP_NACK,
+};
+
+enum octep_pfvf_mbox_cmd_status {
+ OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP = 1,
+ OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT = 2,
+ OCTEP_PFVF_MBOX_CMD_STATUS_NACK = 3,
+ OCTEP_PFVF_MBOX_CMD_STATUS_BUSY = 4,
+ OCTEP_PFVF_MBOX_CMD_STATUS_ERR = 5
+};
+
+enum octep_pfvf_link_status {
+ OCTEP_PFVF_LINK_STATUS_DOWN,
+ OCTEP_PFVF_LINK_STATUS_UP,
+};
+
+enum octep_pfvf_link_speed {
+ OCTEP_PFVF_LINK_SPEED_NONE,
+ OCTEP_PFVF_LINK_SPEED_1000,
+ OCTEP_PFVF_LINK_SPEED_10000,
+ OCTEP_PFVF_LINK_SPEED_25000,
+ OCTEP_PFVF_LINK_SPEED_40000,
+ OCTEP_PFVF_LINK_SPEED_50000,
+ OCTEP_PFVF_LINK_SPEED_100000,
+ OCTEP_PFVF_LINK_SPEED_LAST,
+};
+
+enum octep_pfvf_link_duplex {
+ OCTEP_PFVF_LINK_HALF_DUPLEX,
+ OCTEP_PFVF_LINK_FULL_DUPLEX,
+};
+
+enum octep_pfvf_link_autoneg {
+ OCTEP_PFVF_LINK_AUTONEG,
+ OCTEP_PFVF_LINK_FIXED,
+};
+
+#define OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT 8000
+#define OCTEP_PFVF_MBOX_TIMEOUT_WAIT_UDELAY 1000
+#define OCTEP_PFVF_MBOX_MAX_RETRIES 2
+#define OCTEP_PFVF_MBOX_VERSION 0
+#define OCTEP_PFVF_MBOX_MAX_DATA_SIZE 6
+#define OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE 320
+#define OCTEP_PFVF_MBOX_MORE_FRAG_FLAG 1
+
+union octep_pfvf_mbox_word {
+ u64 u64;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 data:48;
+ } s;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 frag:1;
+ u64 rsvd:5;
+ u8 data[6];
+ } s_data;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 version:48;
+ } s_version;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u8 mac_addr[6];
+ } s_set_mac;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 mtu:48;
+ } s_set_mtu;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 state:1;
+ u64 rsvd:53;
+ } s_link_state;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 status:1;
+ u64 rsvd:53;
+ } s_link_status;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 pkind:8;
+ u64 fsz:8;
+ u64 rx_ol_flags:16;
+ u64 tx_ol_flags:16;
+ u64 rsvd:6;
+ } s_fw_info;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:22;
+ u64 rx_ol_flags:16;
+ u64 tx_ol_flags:16;
+ } s_offloads;
+} __packed;
+
+int octep_vf_setup_mbox(struct octep_vf_device *oct);
+void octep_vf_delete_mbox(struct octep_vf_device *oct);
+int octep_vf_mbox_send_cmd(struct octep_vf_device *oct, union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp);
+int octep_vf_mbox_bulk_read(struct octep_vf_device *oct, enum octep_pfvf_mbox_opcode opcode,
+ u8 *data, int *size);
+int octep_vf_mbox_set_mtu(struct octep_vf_device *oct, int mtu);
+int octep_vf_mbox_set_mac_addr(struct octep_vf_device *oct, char *mac_addr);
+int octep_vf_mbox_get_mac_addr(struct octep_vf_device *oct, char *mac_addr);
+int octep_vf_mbox_version_check(struct octep_vf_device *oct);
+int octep_vf_mbox_set_rx_state(struct octep_vf_device *oct, bool state);
+int octep_vf_mbox_set_link_status(struct octep_vf_device *oct, bool status);
+int octep_vf_mbox_get_link_status(struct octep_vf_device *oct, u8 *oper_up);
+int octep_vf_mbox_dev_remove(struct octep_vf_device *oct);
+int octep_vf_mbox_get_fw_info(struct octep_vf_device *oct);
+int octep_vf_mbox_set_offloads(struct octep_vf_device *oct, u16 tx_offloads, u16 rx_offloads);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h
new file mode 100644
index 0000000000..25e2a876eb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef _OCTEP_VF_REGS_CN9K_H_
+#define _OCTEP_VF_REGS_CN9K_H_
+
+/*############################ RST #########################*/
+#define CN93_VF_CONFIG_XPANSION_BAR 0x38
+#define CN93_VF_CONFIG_PCIE_CAP 0x70
+#define CN93_VF_CONFIG_PCIE_DEVCAP 0x74
+#define CN93_VF_CONFIG_PCIE_DEVCTL 0x78
+#define CN93_VF_CONFIG_PCIE_LINKCAP 0x7C
+#define CN93_VF_CONFIG_PCIE_LINKCTL 0x80
+#define CN93_VF_CONFIG_PCIE_SLOTCAP 0x84
+#define CN93_VF_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CN93_VF_RING_OFFSET BIT_ULL(17)
+
+/*###################### RING IN REGISTERS #########################*/
+#define CN93_VF_SDP_R_IN_CONTROL_START 0x10000
+#define CN93_VF_SDP_R_IN_ENABLE_START 0x10010
+#define CN93_VF_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CN93_VF_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CN93_VF_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CN93_VF_SDP_R_IN_CNTS_START 0x10050
+#define CN93_VF_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CN93_VF_SDP_R_IN_PKT_CNT_START 0x10080
+#define CN93_VF_SDP_R_IN_BYTE_CNT_START 0x10090
+
+#define CN93_VF_SDP_R_IN_CONTROL(ring) \
+ (CN93_VF_SDP_R_IN_CONTROL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_ENABLE(ring) \
+ (CN93_VF_SDP_R_IN_ENABLE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INSTR_BADDR(ring) \
+ (CN93_VF_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CN93_VF_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INSTR_DBELL(ring) \
+ (CN93_VF_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_CNTS(ring) \
+ (CN93_VF_SDP_R_IN_CNTS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INT_LEVELS(ring) \
+ (CN93_VF_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_PKT_CNT(ring) \
+ (CN93_VF_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_BYTE_CNT(ring) \
+ (CN93_VF_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+/*------------------ R_IN Masks ----------------*/
+
+/** Rings per Virtual Function **/
+#define CN93_VF_R_IN_CTL_RPVF_MASK (0xF)
+#define CN93_VF_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ **/
+#define CN93_VF_R_IN_CTL_IDLE BIT_ULL(28)
+#define CN93_VF_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CN93_VF_R_IN_CTL_IS_64B BIT_ULL(24)
+#define CN93_VF_R_IN_CTL_D_NSR BIT_ULL(8)
+#define CN93_VF_R_IN_CTL_D_ESR BIT_ULL(6)
+#define CN93_VF_R_IN_CTL_D_ROR BIT_ULL(5)
+#define CN93_VF_R_IN_CTL_NSR BIT_ULL(3)
+#define CN93_VF_R_IN_CTL_ESR BIT_ULL(1)
+#define CN93_VF_R_IN_CTL_ROR BIT_ULL(0)
+
+#define CN93_VF_R_IN_CTL_MASK (CN93_VF_R_IN_CTL_RDSIZE | CN93_VF_R_IN_CTL_IS_64B)
+
+/*###################### RING OUT REGISTERS #########################*/
+#define CN93_VF_SDP_R_OUT_CNTS_START 0x10100
+#define CN93_VF_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CN93_VF_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CN93_VF_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CN93_VF_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CN93_VF_SDP_R_OUT_CONTROL_START 0x10150
+#define CN93_VF_SDP_R_OUT_ENABLE_START 0x10160
+#define CN93_VF_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CN93_VF_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CN93_VF_SDP_R_OUT_CONTROL(ring) \
+ (CN93_VF_SDP_R_OUT_CONTROL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_ENABLE(ring) \
+ (CN93_VF_SDP_R_OUT_ENABLE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CN93_VF_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CN93_VF_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CN93_VF_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_CNTS(ring) \
+ (CN93_VF_SDP_R_OUT_CNTS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_INT_LEVELS(ring) \
+ (CN93_VF_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_PKT_CNT(ring) \
+ (CN93_VF_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_BYTE_CNT(ring) \
+ (CN93_VF_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CN93_VF_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CN93_VF_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CN93_VF_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CN93_VF_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CN93_VF_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CN93_VF_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CN93_VF_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CN93_VF_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CN93_VF_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CN93_VF_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CN93_VF_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CN93_VF_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CN93_VF_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ##################### Mail Box Registers ########################## */
+/* SDP PF to VF Mailbox Data Register */
+#define CN93_VF_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+/* SDP Packet PF to VF Mailbox Interrupt Register */
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT_START 0x10220
+/* SDP VF to PF Mailbox Data Register */
+#define CN93_VF_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB BIT_ULL(1)
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS BIT_ULL(0)
+
+#define CN93_VF_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CN93_VF_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CN93_VF_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CN93_VF_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_VF_RING_OFFSET))
+#endif /* _OCTEP_VF_REGS_CN9K_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h
new file mode 100644
index 0000000000..2e156745ef
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef _OCTEP_VF_REGS_CNXK_H_
+#define _OCTEP_VF_REGS_CNXK_H_
+
+/*############################ RST #########################*/
+#define CNXK_VF_CONFIG_XPANSION_BAR 0x38
+#define CNXK_VF_CONFIG_PCIE_CAP 0x70
+#define CNXK_VF_CONFIG_PCIE_DEVCAP 0x74
+#define CNXK_VF_CONFIG_PCIE_DEVCTL 0x78
+#define CNXK_VF_CONFIG_PCIE_LINKCAP 0x7C
+#define CNXK_VF_CONFIG_PCIE_LINKCTL 0x80
+#define CNXK_VF_CONFIG_PCIE_SLOTCAP 0x84
+#define CNXK_VF_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CNXK_VF_RING_OFFSET (0x1ULL << 17)
+
+/*###################### RING IN REGISTERS #########################*/
+#define CNXK_VF_SDP_R_IN_CONTROL_START 0x10000
+#define CNXK_VF_SDP_R_IN_ENABLE_START 0x10010
+#define CNXK_VF_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CNXK_VF_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CNXK_VF_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CNXK_VF_SDP_R_IN_CNTS_START 0x10050
+#define CNXK_VF_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CNXK_VF_SDP_R_IN_PKT_CNT_START 0x10080
+#define CNXK_VF_SDP_R_IN_BYTE_CNT_START 0x10090
+#define CNXK_VF_SDP_R_ERR_TYPE_START 0x10400
+
+#define CNXK_VF_SDP_R_ERR_TYPE(ring) \
+ (CNXK_VF_SDP_R_ERR_TYPE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_CONTROL(ring) \
+ (CNXK_VF_SDP_R_IN_CONTROL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_ENABLE(ring) \
+ (CNXK_VF_SDP_R_IN_ENABLE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INSTR_BADDR(ring) \
+ (CNXK_VF_SDP_R_IN_INSTR_BADDR_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CNXK_VF_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INSTR_DBELL(ring) \
+ (CNXK_VF_SDP_R_IN_INSTR_DBELL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_CNTS(ring) \
+ (CNXK_VF_SDP_R_IN_CNTS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INT_LEVELS(ring) \
+ (CNXK_VF_SDP_R_IN_INT_LEVELS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_PKT_CNT(ring) \
+ (CNXK_VF_SDP_R_IN_PKT_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_BYTE_CNT(ring) \
+ (CNXK_VF_SDP_R_IN_BYTE_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+/*------------------ R_IN Masks ----------------*/
+
+/** Rings per Virtual Function **/
+#define CNXK_VF_R_IN_CTL_RPVF_MASK (0xF)
+#define CNXK_VF_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ **/
+#define CNXK_VF_R_IN_CTL_IDLE (0x1ULL << 28)
+#define CNXK_VF_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CNXK_VF_R_IN_CTL_IS_64B (0x1ULL << 24)
+#define CNXK_VF_R_IN_CTL_D_NSR (0x1ULL << 8)
+#define CNXK_VF_R_IN_CTL_D_ESR (0x1ULL << 6)
+#define CNXK_VF_R_IN_CTL_D_ROR (0x1ULL << 5)
+#define CNXK_VF_R_IN_CTL_NSR (0x1ULL << 3)
+#define CNXK_VF_R_IN_CTL_ESR (0x1ULL << 1)
+#define CNXK_VF_R_IN_CTL_ROR (0x1ULL << 0)
+
+#define CNXK_VF_R_IN_CTL_MASK (CNXK_VF_R_IN_CTL_RDSIZE | CNXK_VF_R_IN_CTL_IS_64B)
+
+/*###################### RING OUT REGISTERS #########################*/
+#define CNXK_VF_SDP_R_OUT_CNTS_START 0x10100
+#define CNXK_VF_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CNXK_VF_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CNXK_VF_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CNXK_VF_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CNXK_VF_SDP_R_OUT_CONTROL_START 0x10150
+#define CNXK_VF_SDP_R_OUT_WMARK_START 0x10160
+#define CNXK_VF_SDP_R_OUT_ENABLE_START 0x10170
+#define CNXK_VF_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CNXK_VF_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CNXK_VF_SDP_R_OUT_CONTROL(ring) \
+ (CNXK_VF_SDP_R_OUT_CONTROL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_ENABLE(ring) \
+ (CNXK_VF_SDP_R_OUT_ENABLE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CNXK_VF_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CNXK_VF_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CNXK_VF_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_WMARK(ring) \
+ (CNXK_VF_SDP_R_OUT_WMARK_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_CNTS(ring) \
+ (CNXK_VF_SDP_R_OUT_CNTS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_INT_LEVELS(ring) \
+ (CNXK_VF_SDP_R_OUT_INT_LEVELS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_PKT_CNT(ring) \
+ (CNXK_VF_SDP_R_OUT_PKT_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_BYTE_CNT(ring) \
+ (CNXK_VF_SDP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CNXK_VF_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CNXK_VF_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CNXK_VF_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CNXK_VF_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CNXK_VF_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CNXK_VF_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CNXK_VF_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CNXK_VF_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CNXK_VF_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CNXK_VF_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CNXK_VF_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CNXK_VF_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CNXK_VF_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ##################### Mail Box Registers ########################## */
+/* SDP PF to VF Mailbox Data Register */
+#define CNXK_VF_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+/* SDP Packet PF to VF Mailbox Interrupt Register */
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_START 0x10220
+/* SDP VF to PF Mailbox Data Register */
+#define CNXK_VF_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB BIT_ULL(1)
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS BIT_ULL(0)
+
+#define CNXK_VF_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CNXK_VF_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CNXK_VF_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CNXK_VF_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_VF_RING_OFFSET))
+#endif /* _OCTEP_VF_REGS_CNXK_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
new file mode 100644
index 0000000000..82821bc286
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+static void octep_vf_oq_reset_indices(struct octep_vf_oq *oq)
+{
+ oq->host_read_idx = 0;
+ oq->host_refill_idx = 0;
+ oq->refill_count = 0;
+ oq->last_pkt_count = 0;
+ oq->pkts_pending = 0;
+}
+
+/**
+ * octep_vf_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: 0, if successfully filled receive buffers for all descriptors.
+ * -ENOMEM, if failed to allocate a buffer or failed to map for DMA.
+ */
+static int octep_vf_oq_fill_ring_buffers(struct octep_vf_oq *oq)
+{
+ struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 i;
+
+ for (i = 0; i < oq->max_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "Rx buffer alloc failed\n");
+ goto rx_buf_alloc_err;
+ }
+ desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer alloc: DMA mapping error!\n",
+ oq->q_no);
+ goto dma_map_err;
+ }
+ oq->buff_info[i].page = page;
+ }
+
+ return 0;
+
+dma_map_err:
+ put_page(page);
+rx_buf_alloc_err:
+ while (i) {
+ i--;
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * octep_vf_oq_refill() - refill buffers for used Rx ring descriptors.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: number of descriptors successfully refilled with receive buffers.
+ */
+static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *oq)
+{
+ struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 refill_idx, i;
+
+ refill_idx = oq->host_refill_idx;
+ for (i = 0; i < oq->refill_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "refill: rx buffer alloc failed\n");
+ oq->stats.alloc_failures++;
+ break;
+ }
+
+ desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer refill: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ oq->stats.alloc_failures++;
+ break;
+ }
+ oq->buff_info[refill_idx].page = page;
+ refill_idx++;
+ if (refill_idx == oq->max_count)
+ refill_idx = 0;
+ }
+ oq->host_refill_idx = refill_idx;
+ oq->refill_count -= i;
+
+ return i;
+}
+
+/**
+ * octep_vf_setup_oq() - Setup a Rx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Rx queue number to be setup.
+ *
+ * Allocate resources for a Rx queue.
+ */
+static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
+{
+ struct octep_vf_oq *oq;
+ u32 desc_ring_size;
+
+ oq = vzalloc(sizeof(*oq));
+ if (!oq)
+ goto create_oq_fail;
+ oct->oq[q_no] = oq;
+
+ oq->octep_vf_dev = oct;
+ oq->netdev = oct->netdev;
+ oq->dev = &oct->pdev->dev;
+ oq->q_no = q_no;
+ oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
+ oq->ring_size_mask = oq->max_count - 1;
+ oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
+ oq->max_single_buffer_size = oq->buffer_size - OCTEP_VF_OQ_RESP_HW_SIZE;
+
+ /* When the hardware/firmware supports additional capabilities,
+ * additional header is filled-in by Octeon after length field in
+ * Rx packets. this header contains additional packet information.
+ */
+ if (oct->fw_info.rx_ol_flags)
+ oq->max_single_buffer_size -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
+
+ oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
+
+ desc_ring_size = oq->max_count * OCTEP_VF_OQ_DESC_SIZE;
+ oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size,
+ &oq->desc_ring_dma, GFP_KERNEL);
+
+ if (unlikely(!oq->desc_ring)) {
+ dev_err(oq->dev,
+ "Failed to allocate DMA memory for OQ-%d !!\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ oq->buff_info = vzalloc(oq->max_count * OCTEP_VF_OQ_RECVBUF_SIZE);
+
+ if (unlikely(!oq->buff_info)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to allocate buffer info for OQ-%d\n", q_no);
+ goto buf_list_err;
+ }
+
+ if (octep_vf_oq_fill_ring_buffers(oq))
+ goto oq_fill_buff_err;
+
+ octep_vf_oq_reset_indices(oq);
+ oct->hw_ops.setup_oq_regs(oct, q_no);
+ oct->num_oqs++;
+
+ return 0;
+
+oq_fill_buff_err:
+ vfree(oq->buff_info);
+ oq->buff_info = NULL;
+buf_list_err:
+ dma_free_coherent(oq->dev, desc_ring_size,
+ oq->desc_ring, oq->desc_ring_dma);
+ oq->desc_ring = NULL;
+desc_dma_alloc_err:
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+create_oq_fail:
+ return -ENOMEM;
+}
+
+/**
+ * octep_vf_oq_free_ring_buffers() - Free ring buffers.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free receive buffers in unused Rx queue descriptors.
+ */
+static void octep_vf_oq_free_ring_buffers(struct octep_vf_oq *oq)
+{
+ struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
+ int i;
+
+ if (!oq->desc_ring || !oq->buff_info)
+ return;
+
+ for (i = 0; i < oq->max_count; i++) {
+ if (oq->buff_info[i].page) {
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ desc_ring[i].buffer_ptr = 0;
+ }
+ }
+ octep_vf_oq_reset_indices(oq);
+}
+
+/**
+ * octep_vf_free_oq() - Free Rx queue resources.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free all resources of a Rx queue.
+ */
+static int octep_vf_free_oq(struct octep_vf_oq *oq)
+{
+ struct octep_vf_device *oct = oq->octep_vf_dev;
+ int q_no = oq->q_no;
+
+ octep_vf_oq_free_ring_buffers(oq);
+
+ vfree(oq->buff_info);
+
+ if (oq->desc_ring)
+ dma_free_coherent(oq->dev,
+ oq->max_count * OCTEP_VF_OQ_DESC_SIZE,
+ oq->desc_ring, oq->desc_ring_dma);
+
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+ oct->num_oqs--;
+ return 0;
+}
+
+/**
+ * octep_vf_setup_oqs() - setup resources for all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_vf_setup_oqs(struct octep_vf_device *oct)
+{
+ int i, retval = 0;
+
+ oct->num_oqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ retval = octep_vf_setup_oq(oct, i);
+ if (retval) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup OQ(RxQ)-%d.\n", i);
+ goto oq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+oq_setup_err:
+ while (i) {
+ i--;
+ octep_vf_free_oq(oct->oq[i]);
+ }
+ return retval;
+}
+
+/**
+ * octep_vf_oq_dbell_init() - Initialize Rx queue doorbell.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Write number of descriptors to Rx queue doorbell register.
+ */
+void octep_vf_oq_dbell_init(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/**
+ * octep_vf_free_oqs() - Free resources of all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_vf_free_oqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (!oct->oq[i])
+ continue;
+ octep_vf_free_oq(oct->oq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully freed OQ(RxQ)-%d.\n", i);
+ }
+}
+
+/**
+ * octep_vf_oq_check_hw_for_pkts() - Check for new Rx packets.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: packets received after previous check.
+ */
+static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct,
+ struct octep_vf_oq *oq)
+{
+ u32 pkt_count, new_pkts;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts = pkt_count - oq->last_pkt_count;
+
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+ * reached half its max value.
+ * this counter is not cleared every time read, to save write cycles.
+ */
+ if (unlikely(pkt_count > 0xF0000000U)) {
+ writel(pkt_count, oq->pkts_sent_reg);
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+ oq->last_pkt_count = pkt_count;
+ oq->pkts_pending += new_pkts;
+ return new_pkts;
+}
+
+/**
+ * __octep_vf_oq_process_rx() - Process hardware Rx queue and push to stack.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ * @pkts_to_process: number of packets to be processed.
+ *
+ * Process the new packets in Rx queue.
+ * Packets larger than single Rx buffer arrive in consecutive descriptors.
+ * But, count returned by the API only accounts full packets, not fragments.
+ *
+ * Return: number of packets processed and pushed to stack.
+ */
+static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
+ struct octep_vf_oq *oq, u16 pkts_to_process)
+{
+ struct octep_vf_oq_resp_hw_ext *resp_hw_ext = NULL;
+ netdev_features_t feat = oq->netdev->features;
+ struct octep_vf_rx_buffer *buff_info;
+ struct octep_vf_oq_resp_hw *resp_hw;
+ u32 pkt, rx_bytes, desc_used;
+ u16 data_offset, rx_ol_flags;
+ struct sk_buff *skb;
+ u32 read_idx;
+
+ read_idx = oq->host_read_idx;
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+ buff_info = (struct octep_vf_rx_buffer *)&oq->buff_info[read_idx];
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ resp_hw = page_address(buff_info->page);
+ buff_info->page = NULL;
+
+ /* Swap the length field that is in Big-Endian to CPU */
+ buff_info->len = be64_to_cpu(resp_hw->length);
+ if (oct->fw_info.rx_ol_flags) {
+ /* Extended response header is immediately after
+ * response header (resp_hw)
+ */
+ resp_hw_ext = (struct octep_vf_oq_resp_hw_ext *)
+ (resp_hw + 1);
+ buff_info->len -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
+ /* Packet Data is immediately after
+ * extended response header.
+ */
+ data_offset = OCTEP_VF_OQ_RESP_HW_SIZE +
+ OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
+ rx_ol_flags = resp_hw_ext->rx_ol_flags;
+ } else {
+ /* Data is immediately after
+ * Hardware Rx response header.
+ */
+ data_offset = OCTEP_VF_OQ_RESP_HW_SIZE;
+ rx_ol_flags = 0;
+ }
+ rx_bytes += buff_info->len;
+
+ if (buff_info->len <= oq->max_single_buffer_size) {
+ skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ skb_put(skb, buff_info->len);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ } else {
+ struct skb_shared_info *shinfo;
+ u16 data_len;
+
+ skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ /* Head fragment includes response header(s);
+ * subsequent fragments contains only data.
+ */
+ skb_put(skb, oq->max_single_buffer_size);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+
+ shinfo = skb_shinfo(skb);
+ data_len = buff_info->len - oq->max_single_buffer_size;
+ while (data_len) {
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ buff_info = (struct octep_vf_rx_buffer *)
+ &oq->buff_info[read_idx];
+ if (data_len < oq->buffer_size) {
+ buff_info->len = data_len;
+ data_len = 0;
+ } else {
+ buff_info->len = oq->buffer_size;
+ data_len -= oq->buffer_size;
+ }
+
+ skb_add_rx_frag(skb, shinfo->nr_frags,
+ buff_info->page, 0,
+ buff_info->len,
+ buff_info->len);
+ buff_info->page = NULL;
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ }
+ }
+
+ skb->dev = oq->netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (feat & NETIF_F_RXCSUM &&
+ OCTEP_VF_RX_CSUM_VERIFIED(rx_ol_flags))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ napi_gro_receive(oq->napi, skb);
+ }
+
+ oq->host_read_idx = read_idx;
+ oq->refill_count += desc_used;
+ oq->stats.packets += pkt;
+ oq->stats.bytes += rx_bytes;
+
+ return pkt;
+}
+
+/**
+ * octep_vf_oq_process_rx() - Process Rx queue.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @budget: max number of packets can be processed in one invocation.
+ *
+ * Check for newly received packets and process them.
+ * Keeps checking for new packets until budget is used or no new packets seen.
+ *
+ * Return: number of packets processed.
+ */
+int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget)
+{
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_vf_device *oct = oq->octep_vf_dev;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+ if (oq->pkts_pending == 0)
+ octep_vf_oq_check_hw_for_pkts(oct, oq);
+ pkts_available = min(budget - total_pkts_processed,
+ oq->pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_vf_oq_process_rx(oct, oq,
+ pkts_available);
+ oq->pkts_pending -= pkts_processed;
+ total_pkts_processed += pkts_processed;
+ }
+
+ if (oq->refill_count >= oq->refill_threshold) {
+ u32 desc_refilled = octep_vf_oq_refill(oct, oq);
+
+ /* flush pending writes before updating credits */
+ smp_wmb();
+ writel(desc_refilled, oq->pkts_credit_reg);
+ }
+
+ return total_pkts_processed;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
new file mode 100644
index 0000000000..fe46838b52
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_RX_H_
+#define _OCTEP_VF_RX_H_
+
+/* struct octep_vf_oq_desc_hw - Octeon Hardware OQ descriptor format.
+ *
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ *
+ * @buffer_ptr: DMA address of the skb->data
+ * @info_ptr: DMA address of host memory, used to update pkt count by hw.
+ * This is currently unused to save pci writes.
+ */
+struct octep_vf_oq_desc_hw {
+ dma_addr_t buffer_ptr;
+ u64 info_ptr;
+};
+
+static_assert(sizeof(struct octep_vf_oq_desc_hw) == 16);
+
+#define OCTEP_VF_OQ_DESC_SIZE (sizeof(struct octep_vf_oq_desc_hw))
+
+/* Rx offload flags */
+#define OCTEP_VF_RX_OFFLOAD_VLAN_STRIP BIT(0)
+#define OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM BIT(1)
+#define OCTEP_VF_RX_OFFLOAD_UDP_CKSUM BIT(2)
+#define OCTEP_VF_RX_OFFLOAD_TCP_CKSUM BIT(3)
+
+#define OCTEP_VF_RX_OFFLOAD_CKSUM (OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_UDP_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_TCP_CKSUM)
+
+#define OCTEP_VF_RX_IP_CSUM(flags) ((flags) & \
+ (OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_TCP_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_UDP_CKSUM))
+
+/* bit 0 is vlan strip */
+#define OCTEP_VF_RX_CSUM_IP_VERIFIED BIT(1)
+#define OCTEP_VF_RX_CSUM_L4_VERIFIED BIT(2)
+
+#define OCTEP_VF_RX_CSUM_VERIFIED(flags) ((flags) & \
+ (OCTEP_VF_RX_CSUM_L4_VERIFIED | \
+ OCTEP_VF_RX_CSUM_IP_VERIFIED))
+
+/* Extended Response Header in packet data received from Hardware.
+ * Includes metadata like checksum status.
+ * this is valid only if hardware/firmware published support for this.
+ * This is at offset 0 of packet data (skb->data).
+ */
+struct octep_vf_oq_resp_hw_ext {
+ /* Reserved. */
+ u64 rsvd:48;
+
+ /* rx offload flags */
+ u16 rx_ol_flags;
+};
+
+static_assert(sizeof(struct octep_vf_oq_resp_hw_ext) == 8);
+
+#define OCTEP_VF_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_vf_oq_resp_hw_ext))
+
+/* Length of Rx packet DMA'ed by Octeon to Host.
+ * this is in bigendian; so need to be converted to cpu endian.
+ * Octeon writes this at the beginning of Rx buffer (skb->data).
+ */
+struct octep_vf_oq_resp_hw {
+ /* The Length of the packet. */
+ __be64 length;
+};
+
+static_assert(sizeof(struct octep_vf_oq_resp_hw) == 8);
+
+#define OCTEP_VF_OQ_RESP_HW_SIZE (sizeof(struct octep_vf_oq_resp_hw))
+
+/* Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers. The fields are operated by
+ * OS-dependent routines.
+ */
+struct octep_vf_rx_buffer {
+ struct page *page;
+
+ /* length from rx hardware descriptor after converting to cpu endian */
+ u64 len;
+};
+
+#define OCTEP_VF_OQ_RECVBUF_SIZE (sizeof(struct octep_vf_rx_buffer))
+
+/* Output Queue statistics. Each output queue has four stats fields. */
+struct octep_vf_oq_stats {
+ /* Number of packets received from the Device. */
+ u64 packets;
+
+ /* Number of bytes received from the Device. */
+ u64 bytes;
+
+ /* Number of times failed to allocate buffers. */
+ u64 alloc_failures;
+};
+
+#define OCTEP_VF_OQ_STATS_SIZE (sizeof(struct octep_vf_oq_stats))
+
+/* Hardware interface Rx statistics */
+struct octep_vf_iface_rx_stats {
+ /* Received packets */
+ u64 pkts;
+
+ /* Octets of received packets */
+ u64 octets;
+
+ /* Received PAUSE and Control packets */
+ u64 pause_pkts;
+
+ /* Received PAUSE and Control octets */
+ u64 pause_octets;
+
+ /* Filtered DMAC0 packets */
+ u64 dmac0_pkts;
+
+ /* Filtered DMAC0 octets */
+ u64 dmac0_octets;
+
+ /* Packets dropped due to RX FIFO full */
+ u64 dropped_pkts_fifo_full;
+
+ /* Octets dropped due to RX FIFO full */
+ u64 dropped_octets_fifo_full;
+
+ /* Error packets */
+ u64 err_pkts;
+
+ /* Filtered DMAC1 packets */
+ u64 dmac1_pkts;
+
+ /* Filtered DMAC1 octets */
+ u64 dmac1_octets;
+
+ /* NCSI-bound packets dropped */
+ u64 ncsi_dropped_pkts;
+
+ /* NCSI-bound octets dropped */
+ u64 ncsi_dropped_octets;
+
+ /* Multicast packets received. */
+ u64 mcast_pkts;
+
+ /* Broadcast packets received. */
+ u64 bcast_pkts;
+
+};
+
+/* The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * Octeon OQ.
+ */
+struct octep_vf_oq {
+ u32 q_no;
+
+ struct octep_vf_device *octep_vf_dev;
+ struct net_device *netdev;
+ struct device *dev;
+
+ struct napi_struct *napi;
+
+ /* The receive buffer list. This list has the virtual addresses
+ * of the buffers.
+ */
+ struct octep_vf_rx_buffer *buff_info;
+
+ /* Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ u8 __iomem *pkts_credit_reg;
+
+ /* Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ u8 __iomem *pkts_sent_reg;
+
+ /* Statistics for this OQ. */
+ struct octep_vf_oq_stats stats;
+
+ /* Packets pending to be processed */
+ u32 pkts_pending;
+ u32 last_pkt_count;
+
+ /* Index in the ring where the driver should read the next packet */
+ u32 host_read_idx;
+
+ /* Number of descriptors in this ring. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ /* The number of descriptors pending refill. */
+ u32 refill_count;
+
+ /* Index in the ring where the driver will refill the
+ * descriptor's buffer
+ */
+ u32 host_refill_idx;
+ u32 refill_threshold;
+
+ /* The size of each buffer pointed by the buffer pointer. */
+ u32 buffer_size;
+ u32 max_single_buffer_size;
+
+ /* The 8B aligned descriptor ring starts at this address. */
+ struct octep_vf_oq_desc_hw *desc_ring;
+
+ /* DMA mapped address of the OQ descriptor ring. */
+ dma_addr_t desc_ring_dma;
+};
+
+#define OCTEP_VF_OQ_SIZE (sizeof(struct octep_vf_oq))
+#endif /* _OCTEP_VF_RX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
new file mode 100644
index 0000000000..47a5c054fd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <net/netdev_queues.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+/* Reset various index of Tx queue data structure. */
+static void octep_vf_iq_reset_indices(struct octep_vf_iq *iq)
+{
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->octep_vf_read_index = 0;
+ iq->flush_index = 0;
+ iq->pkts_processed = 0;
+ iq->pkt_in_done = 0;
+}
+
+/**
+ * octep_vf_iq_process_completions() - Process Tx queue completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @budget: max number of completions to be processed in one invocation.
+ */
+int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget)
+{
+ u32 compl_pkts, compl_bytes, compl_sg;
+ struct octep_vf_device *oct = iq->octep_vf_dev;
+ struct octep_vf_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ compl_pkts = 0;
+ compl_sg = 0;
+ compl_bytes = 0;
+ iq->octep_vf_read_index = oct->hw_ops.update_iq_read_idx(iq);
+
+ while (likely(budget && (fi != iq->octep_vf_read_index))) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+ compl_bytes += skb->len;
+ compl_pkts++;
+ budget--;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+ compl_sg++;
+
+ dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[3], DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ iq->pkts_processed += compl_pkts;
+ iq->stats.instr_completed += compl_pkts;
+ iq->stats.bytes_sent += compl_bytes;
+ iq->stats.sgentry_sent += compl_sg;
+ iq->flush_index = fi;
+
+ netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts,
+ compl_bytes, IQ_INSTR_SPACE(iq),
+ OCTEP_VF_WAKE_QUEUE_THRESHOLD);
+
+ return !budget;
+}
+
+/**
+ * octep_vf_iq_free_pending() - Free Tx buffers for pending completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ */
+static void octep_vf_iq_free_pending(struct octep_vf_iq *iq)
+{
+ struct octep_vf_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ while (fi != iq->host_write_index) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+
+ dma_unmap_single(iq->dev,
+ tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[0],
+ DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ iq->flush_index = fi;
+ netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
+}
+
+/**
+ * octep_vf_clean_iqs() - Clean Tx queues to shutdown the device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free the buffers in Tx queue descriptors pending completion and
+ * reset queue indices
+ */
+void octep_vf_clean_iqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_iqs; i++) {
+ octep_vf_iq_free_pending(oct->iq[i]);
+ octep_vf_iq_reset_indices(oct->iq[i]);
+ }
+}
+
+/**
+ * octep_vf_setup_iq() - Setup a Tx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Tx queue number to be setup.
+ *
+ * Allocate resources for a Tx queue.
+ */
+static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no)
+{
+ u32 desc_ring_size, buff_info_size, sglist_size;
+ struct octep_vf_iq *iq;
+ int i;
+
+ iq = vzalloc(sizeof(*iq));
+ if (!iq)
+ goto iq_alloc_err;
+ oct->iq[q_no] = iq;
+
+ iq->octep_vf_dev = oct;
+ iq->netdev = oct->netdev;
+ iq->dev = &oct->pdev->dev;
+ iq->q_no = q_no;
+ iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->ring_size_mask = iq->max_count - 1;
+ iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
+ iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no);
+
+ /* Allocate memory for hardware queue descriptors */
+ desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size,
+ &iq->desc_ring_dma, GFP_KERNEL);
+ if (unlikely(!iq->desc_ring)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ /* Allocate memory for hardware SGLIST descriptors */
+ sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->sglist = dma_alloc_coherent(iq->dev, sglist_size,
+ &iq->sglist_dma, GFP_KERNEL);
+ if (unlikely(!iq->sglist)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d SGLIST\n",
+ q_no);
+ goto sglist_alloc_err;
+ }
+
+ /* allocate memory to manage Tx packets pending completion */
+ buff_info_size = OCTEP_VF_IQ_TXBUFF_INFO_SIZE * iq->max_count;
+ iq->buff_info = vzalloc(buff_info_size);
+ if (!iq->buff_info) {
+ dev_err(iq->dev,
+ "Failed to allocate buff info for IQ-%d\n", q_no);
+ goto buff_info_err;
+ }
+
+ /* Setup sglist addresses in tx_buffer entries */
+ for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) {
+ struct octep_vf_tx_buffer *tx_buffer;
+
+ tx_buffer = &iq->buff_info[i];
+ tx_buffer->sglist =
+ &iq->sglist[i * OCTEP_VF_SGLIST_ENTRIES_PER_PKT];
+ tx_buffer->sglist_dma =
+ iq->sglist_dma + (i * OCTEP_VF_SGLIST_SIZE_PER_PKT);
+ }
+
+ octep_vf_iq_reset_indices(iq);
+ oct->hw_ops.setup_iq_regs(oct, q_no);
+
+ oct->num_iqs++;
+ return 0;
+
+buff_info_err:
+ dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma);
+sglist_alloc_err:
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+desc_dma_alloc_err:
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+iq_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_vf_free_iq() - Free Tx queue resources.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Free all the resources allocated for a Tx queue.
+ */
+static void octep_vf_free_iq(struct octep_vf_iq *iq)
+{
+ struct octep_vf_device *oct = iq->octep_vf_dev;
+ u64 desc_ring_size, sglist_size;
+ int q_no = iq->q_no;
+
+ desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+
+ vfree(iq->buff_info);
+
+ if (iq->desc_ring)
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+
+ sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ if (iq->sglist)
+ dma_free_coherent(iq->dev, sglist_size,
+ iq->sglist, iq->sglist_dma);
+
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+ oct->num_iqs--;
+}
+
+/**
+ * octep_vf_setup_iqs() - setup resources for all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_vf_setup_iqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ oct->num_iqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (octep_vf_setup_iq(oct, i)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup IQ(TxQ)-%d.\n", i);
+ goto iq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+iq_setup_err:
+ while (i) {
+ i--;
+ octep_vf_free_iq(oct->iq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_vf_free_iqs() - Free resources of all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_vf_free_iqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ octep_vf_free_iq(oct->iq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully destroyed IQ(TxQ)-%d.\n", i);
+ }
+ oct->num_iqs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
new file mode 100644
index 0000000000..f338b97510
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_TX_H_
+#define _OCTEP_VF_TX_H_
+
+#define IQ_SEND_OK 0
+#define IQ_SEND_STOP 1
+#define IQ_SEND_FAILED -1
+
+#define TX_BUFTYPE_NONE 0
+#define TX_BUFTYPE_NET 1
+#define TX_BUFTYPE_NET_SG 2
+#define NUM_TX_BUFTYPES 3
+
+/* Hardware format for Scatter/Gather list
+ *
+ * 63 48|47 32|31 16|15 0
+ * -----------------------------------------
+ * | Len 0 | Len 1 | Len 2 | Len 3 |
+ * -----------------------------------------
+ * | Ptr 0 |
+ * -----------------------------------------
+ * | Ptr 1 |
+ * -----------------------------------------
+ * | Ptr 2 |
+ * -----------------------------------------
+ * | Ptr 3 |
+ * -----------------------------------------
+ */
+struct octep_vf_tx_sglist_desc {
+ u16 len[4];
+ dma_addr_t dma_ptr[4];
+};
+
+static_assert(sizeof(struct octep_vf_tx_sglist_desc) == 40);
+
+/* Each Scatter/Gather entry sent to hardwar hold four pointers.
+ * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1'
+ * is for main skb which also goes as a gather buffer to Octeon hardware.
+ * To allocate sufficient SGLIST entries for a packet with max fragments,
+ * align by adding 3 before calcuating max SGLIST entries per packet.
+ */
+#define OCTEP_VF_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4)
+#define OCTEP_VF_SGLIST_SIZE_PER_PKT \
+ (OCTEP_VF_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_vf_tx_sglist_desc))
+
+struct octep_vf_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct octep_vf_tx_sglist_desc *sglist;
+ dma_addr_t sglist_dma;
+ u8 gather;
+};
+
+#define OCTEP_VF_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_vf_tx_buffer))
+
+/* VF Hardware interface Tx statistics */
+struct octep_vf_iface_tx_stats {
+ /* Total frames sent on the interface */
+ u64 pkts;
+
+ /* Total octets sent on the interface */
+ u64 octs;
+
+ /* Packets sent to a broadcast DMAC */
+ u64 bcst;
+
+ /* Packets sent to the multicast DMAC */
+ u64 mcst;
+
+ /* Packets dropped */
+ u64 dropped;
+
+ /* Reserved */
+ u64 reserved[13];
+};
+
+/* VF Input Queue statistics */
+struct octep_vf_iq_stats {
+ /* Instructions posted to this queue. */
+ u64 instr_posted;
+
+ /* Instructions copied by hardware for processing. */
+ u64 instr_completed;
+
+ /* Instructions that could not be processed. */
+ u64 instr_dropped;
+
+ /* Bytes sent through this queue. */
+ u64 bytes_sent;
+
+ /* Gather entries sent through this queue. */
+ u64 sgentry_sent;
+
+ /* Number of transmit failures due to TX_BUSY */
+ u64 tx_busy;
+
+ /* Number of times the queue is restarted */
+ u64 restart_cnt;
+};
+
+/* The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue (up to 4) for
+ * a Octeon device has one such structure to represent it.
+ */
+struct octep_vf_iq {
+ u32 q_no;
+
+ struct octep_vf_device *octep_vf_dev;
+ struct net_device *netdev;
+ struct device *dev;
+ struct netdev_queue *netdev_q;
+
+ /* Index in input ring where driver should write the next packet */
+ u16 host_write_index;
+
+ /* Index in input ring where Octeon is expected to read next packet */
+ u16 octep_vf_read_index;
+
+ /* This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ u16 flush_index;
+
+ /* Statistics for this input queue. */
+ struct octep_vf_iq_stats stats;
+
+ /* Pointer to the Virtual Base addr of the input ring. */
+ struct octep_vf_tx_desc_hw *desc_ring;
+
+ /* DMA mapped base address of the input descriptor ring. */
+ dma_addr_t desc_ring_dma;
+
+ /* Info of Tx buffers pending completion. */
+ struct octep_vf_tx_buffer *buff_info;
+
+ /* Base pointer to Scatter/Gather lists for all ring descriptors. */
+ struct octep_vf_tx_sglist_desc *sglist;
+
+ /* DMA mapped addr of Scatter Gather Lists */
+ dma_addr_t sglist_dma;
+
+ /* Octeon doorbell register for the ring. */
+ u8 __iomem *doorbell_reg;
+
+ /* Octeon instruction count register for this ring. */
+ u8 __iomem *inst_cnt_reg;
+
+ /* interrupt level register for this ring */
+ u8 __iomem *intr_lvl_reg;
+
+ /* Maximum no. of instructions in this queue. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ u32 pkt_in_done;
+ u32 pkts_processed;
+
+ u32 status;
+
+ /* Number of instructions pending to be posted to Octeon. */
+ u32 fill_cnt;
+
+ /* The max. number of instructions that can be held pending by the
+ * driver before ringing doorbell.
+ */
+ u32 fill_threshold;
+};
+
+/* Hardware Tx Instruction Header */
+struct octep_vf_instr_hdr {
+ /* Data Len */
+ u64 tlen:16;
+
+ /* Reserved */
+ u64 rsvd:20;
+
+ /* PKIND for SDP */
+ u64 pkind:6;
+
+ /* Front Data size */
+ u64 fsz:6;
+
+ /* No. of entries in gather list */
+ u64 gsz:14;
+
+ /* Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /* Reserved3 */
+ u64 reserved3:1;
+};
+
+static_assert(sizeof(struct octep_vf_instr_hdr) == 8);
+
+/* Tx offload flags */
+#define OCTEP_VF_TX_OFFLOAD_VLAN_INSERT BIT(0)
+#define OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM BIT(1)
+#define OCTEP_VF_TX_OFFLOAD_UDP_CKSUM BIT(2)
+#define OCTEP_VF_TX_OFFLOAD_TCP_CKSUM BIT(3)
+#define OCTEP_VF_TX_OFFLOAD_SCTP_CKSUM BIT(4)
+#define OCTEP_VF_TX_OFFLOAD_TCP_TSO BIT(5)
+#define OCTEP_VF_TX_OFFLOAD_UDP_TSO BIT(6)
+
+#define OCTEP_VF_TX_OFFLOAD_CKSUM (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_UDP_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_TCP_CKSUM)
+
+#define OCTEP_VF_TX_OFFLOAD_TSO (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \
+ OCTEP_VF_TX_OFFLOAD_UDP_TSO)
+
+#define OCTEP_VF_TX_IP_CSUM(flags) ((flags) & \
+ (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_TCP_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_UDP_CKSUM))
+
+#define OCTEP_VF_TX_TSO(flags) ((flags) & \
+ (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \
+ OCTEP_VF_TX_OFFLOAD_UDP_TSO))
+
+struct tx_mdata {
+ /* offload flags */
+ u16 ol_flags;
+
+ /* gso size */
+ u16 gso_size;
+
+ /* gso flags */
+ u16 gso_segs;
+
+ /* reserved */
+ u16 rsvd1;
+
+ /* reserved */
+ u64 rsvd2;
+};
+
+static_assert(sizeof(struct tx_mdata) == 16);
+
+/* 64-byte Tx instruction format.
+ * Format of instruction for a 64-byte mode input queue.
+ *
+ * only first 16-bytes (dptr and ih) are mandatory; rest are optional
+ * and filled by the driver based on firmware/hardware capabilities.
+ * These optional headers together called Front Data and its size is
+ * described by ih->fsz.
+ */
+struct octep_vf_tx_desc_hw {
+ /* Pointer where the input data is available. */
+ u64 dptr;
+
+ /* Instruction Header. */
+ union {
+ struct octep_vf_instr_hdr ih;
+ u64 ih64;
+ };
+
+ union {
+ u64 txm64[2];
+ struct tx_mdata txm;
+ };
+
+ /* Additional headers available in a 64-byte instruction. */
+ u64 exhdr[4];
+};
+
+static_assert(sizeof(struct octep_vf_tx_desc_hw) == 64);
+
+#define OCTEP_VF_IQ_DESC_SIZE (sizeof(struct octep_vf_tx_desc_hw))
+#endif /* _OCTEP_VF_TX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 98e203a0e2..eb2a20b5a0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -843,6 +843,8 @@ enum nix_af_status {
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
NIX_AF_ERR_LINK_CREDITS = -431,
+ NIX_AF_ERR_INVALID_BPID = -434,
+ NIX_AF_ERR_INVALID_BPID_REQ = -435,
NIX_AF_ERR_INVALID_MCAST_GRP = -436,
NIX_AF_ERR_INVALID_MCAST_DEL_REQ = -437,
NIX_AF_ERR_NON_CONTIG_MCE_LIST = -438,
@@ -1120,6 +1122,7 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
+#define NIX_FLOW_KEY_TYPE_CUSTOM0 BIT(19)
#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
#define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21)
#define NIX_FLOW_KEY_TYPE_AH BIT(22)
@@ -1559,6 +1562,7 @@ struct flow_msg {
u32 mpls_lse[4];
u8 icmp_type;
u8 icmp_code;
+ __be16 tcp_flags;
};
struct npc_install_flow_req {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index b0b4dea548..d883157393 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -85,8 +85,7 @@ enum npc_kpu_lc_ltype {
enum npc_kpu_ld_ltype {
NPC_LT_LD_TCP = 1,
NPC_LT_LD_UDP,
- NPC_LT_LD_ICMP,
- NPC_LT_LD_SCTP,
+ NPC_LT_LD_SCTP = 4,
NPC_LT_LD_ICMP6,
NPC_LT_LD_CUSTOM0,
NPC_LT_LD_CUSTOM1,
@@ -97,6 +96,7 @@ enum npc_kpu_ld_ltype {
NPC_LT_LD_NSH,
NPC_LT_LD_TU_MPLS_IN_NSH,
NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_LT_LD_ICMP,
};
enum npc_kpu_le_ltype {
@@ -140,14 +140,14 @@ enum npc_kpu_lg_ltype {
enum npc_kpu_lh_ltype {
NPC_LT_LH_TU_TCP = 1,
NPC_LT_LH_TU_UDP,
- NPC_LT_LH_TU_ICMP,
- NPC_LT_LH_TU_SCTP,
+ NPC_LT_LH_TU_SCTP = 4,
NPC_LT_LH_TU_ICMP6,
+ NPC_LT_LH_CUSTOM0,
+ NPC_LT_LH_CUSTOM1,
NPC_LT_LH_TU_IGMP = 8,
NPC_LT_LH_TU_ESP,
NPC_LT_LH_TU_AH,
- NPC_LT_LH_CUSTOM0 = 0xE,
- NPC_LT_LH_CUSTOM1 = 0xF,
+ NPC_LT_LH_TU_ICMP = 0xF,
};
/* NPC port kind defines how the incoming or outgoing packets
@@ -155,10 +155,11 @@ enum npc_kpu_lh_ltype {
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
-#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CPT_HDR_PTP_PKIND
enum npc_pkind_type {
NPC_RX_LBK_PKIND = 0ULL,
+ NPC_RX_CPT_HDR_PTP_PKIND = 54ULL,
NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL,
NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
NPC_RX_CHLEN24B_PKIND = 57ULL,
@@ -216,6 +217,7 @@ enum key_fields {
NPC_MPLS4_TTL,
NPC_TYPE_ICMP,
NPC_CODE_ICMP,
+ NPC_TCP_FLAGS,
NPC_HEADER_FIELDS_MAX,
NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
NPC_PF_FUNC, /* Valid when Tx */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index a820bad3ab..41de72c860 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -35,6 +35,7 @@
#define NPC_ETYPE_NSH 0x894f
#define NPC_ETYPE_DSA 0xdada
#define NPC_ETYPE_PPPOE 0x8864
+#define NPC_ETYPE_ERSPA 0x88be
#define NPC_PPP_IP 0x0021
#define NPC_PPP_IP6 0x0057
@@ -59,6 +60,9 @@
#define NPC_IPNH_MPLS 137
#define NPC_IPNH_HOSTID 139
#define NPC_IPNH_SHIM6 140
+#define NPC_IPNH_CUSTOM 253
+
+#define NPC_IP6_ROUTE_TYPE 4
#define NPC_UDP_PORT_PTP_E 319
#define NPC_UDP_PORT_PTP_G 320
@@ -187,6 +191,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU2_EXDSA,
NPC_S_KPU2_CPT_CTAG,
NPC_S_KPU2_CPT_QINQ,
+ NPC_S_KPU2_MT,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
@@ -231,6 +236,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU8_ICMP6,
NPC_S_KPU8_GRE,
NPC_S_KPU8_AH,
+ NPC_S_KPU8_CUSTOM,
NPC_S_KPU9_TU_MPLS_IN_GRE,
NPC_S_KPU9_TU_MPLS_IN_NSH,
NPC_S_KPU9_TU_MPLS_IN_IP,
@@ -242,6 +248,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU9_GTPC,
NPC_S_KPU9_GTPU,
NPC_S_KPU9_ESP,
+ NPC_S_KPU9_CUSTOM,
NPC_S_KPU10_TU_MPLS_IN_VXLANGPE,
NPC_S_KPU10_TU_MPLS_PL,
NPC_S_KPU10_TU_MPLS,
@@ -318,10 +325,10 @@ enum npc_kpu_lc_uflag {
NPC_F_LC_U_UNK_PROTO = 0x10,
NPC_F_LC_U_IP_FRAG = 0x20,
NPC_F_LC_U_IP6_FRAG = 0x40,
+ NPC_F_LC_L_6TO4 = 0x80,
};
enum npc_kpu_lc_lflag {
NPC_F_LC_L_IP_IN_IP = 1,
- NPC_F_LC_L_6TO4,
NPC_F_LC_L_MPLS_IN_IP,
NPC_F_LC_L_IP6_TUN_IP6,
NPC_F_LC_L_IP6_MPLS_IN_IP,
@@ -334,6 +341,8 @@ enum npc_kpu_lc_lflag {
NPC_F_LC_L_EXT_MOBILITY,
NPC_F_LC_L_EXT_HOSTID,
NPC_F_LC_L_EXT_SHIM6,
+ NPC_F_LC_L_IP6_SRH_SEG_1,
+ NPC_F_LC_L_IP6_SRH_SEG_2,
};
enum npc_kpu_ld_lflag {
@@ -970,10 +979,10 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ NPC_S_KPU1_CPT_HDR, 48, 0,
NPC_LID_LA, NPC_LT_NA,
0,
- 0, 0, 0, 0,
+ 0, 7, 0, 0,
},
{
@@ -2786,6 +2795,24 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU2_MT, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_MT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4501,6 +4528,24 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
0xff00,
NPC_IP_VER_6,
NPC_IP_VER_MASK,
+ (NPC_IP6_ROUTE_TYPE << 8) | 1,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ (NPC_IP6_ROUTE_TYPE << 8) | 2,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
0x0000,
0x0000,
},
@@ -4776,6 +4821,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
{
NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_CUSTOM,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
0x0000,
0x0000,
NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
@@ -4884,6 +4938,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
{
NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_CUSTOM,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
0x0000,
0x0000,
NPC_IP_VER_4,
@@ -5064,6 +5127,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
{
NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
0x0000,
0x0000,
NPC_IP_VER_6,
@@ -5208,6 +5280,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5325,6 +5406,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5433,6 +5523,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5532,6 +5631,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5649,6 +5757,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5757,6 +5874,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5883,6 +6009,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
{
NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5982,6 +6117,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
{
NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -6081,6 +6225,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
{
NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -6310,6 +6463,15 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0xffff,
0x0000,
0x0000,
+ 0x0009,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_ESP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
0x0000,
0x0000,
},
@@ -6756,6 +6918,78 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
},
{
NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
0x0000,
0xffff,
NPC_GRE_F_ROUTE,
@@ -6836,6 +7070,15 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU8_CUSTOM, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -7304,6 +7547,24 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU9_CUSTOM, 0xff,
+ 0x4000,
+ 0xf000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_CUSTOM, 0xff,
+ 0x6000,
+ 0xf000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -8384,7 +8645,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
0,
@@ -8536,7 +8797,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
NPC_F_LA_U_HAS_IH_NIX,
@@ -8693,7 +8954,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 30, 1,
NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
NPC_F_LA_U_HAS_HIGIG2,
@@ -8818,7 +9079,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 38, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
@@ -8947,7 +9208,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 14, 0,
NPC_LID_LA, NPC_LT_NA,
0,
@@ -9124,7 +9385,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 6, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
0,
@@ -9204,7 +9465,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
@@ -9213,7 +9474,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
+ NPC_S_NA, 6, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
NPC_F_LB_U_UNK_ETYPE,
0, 0, 0, 0,
@@ -9228,7 +9489,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
@@ -9324,7 +9585,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
@@ -9428,7 +9689,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
@@ -9532,7 +9793,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
0,
@@ -9628,7 +9889,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
@@ -9684,7 +9945,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -9757,7 +10018,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
+ NPC_S_NA, 8, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
NPC_F_LB_U_UNK_ETYPE,
0, 0, 0, 0,
@@ -9772,7 +10033,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 18, 1,
NPC_LID_LB, NPC_LT_LB_EDSA,
NPC_F_LB_L_EDSA,
@@ -9836,7 +10097,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_EXDSA,
NPC_F_LB_L_EXDSA,
@@ -9923,6 +10184,22 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG_C, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -9949,7 +10226,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10029,7 +10306,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10101,7 +10378,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10165,7 +10442,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10237,7 +10514,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10310,80 +10587,80 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_IP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ 6, 0, 42, 1, 0,
+ NPC_S_KPU5_IP6, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_ARP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_RARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_RARP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_PTP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_PTP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_FCOE, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_FCOE, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU4_MPLS, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU4_MPLS, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 0, 0, 0, 0,
- NPC_S_KPU4_NSH, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU4_NSH, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
@@ -10397,7 +10674,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10469,7 +10746,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10533,7 +10810,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10605,7 +10882,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10685,7 +10962,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_DSA,
NPC_F_LB_L_DSA,
@@ -10733,7 +11010,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
NPC_F_LB_L_DSA_VLAN,
@@ -10894,7 +11171,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 6, 1,
NPC_LID_LB, NPC_LT_LB_FDSA,
NPC_F_LB_L_FDSA,
@@ -10942,7 +11219,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_FDSA,
NPC_F_LB_L_FDSA,
@@ -10990,7 +11267,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
@@ -11014,7 +11291,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 2, 0,
NPC_LID_LC, NPC_LT_NA,
0,
@@ -11063,15 +11340,15 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 0, 0,
- NPC_S_KPU5_IP, 10, 0,
+ NPC_S_KPU5_IP, 10, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
- NPC_S_KPU5_IP6, 10, 0,
+ 6, 0, 42, 0, 0,
+ NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
0, 0, 0, 0,
@@ -11119,7 +11396,7 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 0, 0, 2, 0,
+ 2, 0, 4, 2, 0,
NPC_S_KPU8_UDP, 20, 1,
NPC_LID_LC, NPC_LT_LC_IP,
0,
@@ -11223,7 +11500,7 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 8, 10, 2, 0,
+ 2, 8, 4, 2, 0,
NPC_S_KPU8_UDP, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP_OPT,
0,
@@ -11450,6 +11727,22 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
0, 0, 0, 0, 0,
NPC_S_KPU6_IP6_ROUT, 40, 1,
NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_IP6_SRH_SEG_1,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_IP6_SRH_SEG_2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
NPC_F_LC_L_EXT_ROUT,
0, 0, 0, 0,
},
@@ -11695,6 +11988,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_CUSTOM, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP,
@@ -11791,6 +12092,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_CUSTOM, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP_OPT,
@@ -11951,6 +12260,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_CUSTOM, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP6,
@@ -12080,6 +12397,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12184,6 +12509,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12280,6 +12613,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12368,6 +12709,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12472,6 +12821,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12568,6 +12925,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12681,6 +13046,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12769,6 +13142,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12857,6 +13238,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -13058,6 +13447,14 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
NPC_S_KPU9_ESP, 8, 1,
NPC_LID_LD, NPC_LT_LD_UDP,
@@ -13458,6 +13855,70 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 24, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LD, NPC_LT_LD_GRE,
@@ -13529,6 +13990,14 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_LD, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU9_CUSTOM, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_CUSTOM0,
+ 0,
+ 0, 0xff, 0, 0,
+ },
+ {
NPC_ERRLEV_LD, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -13946,6 +14415,22 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LE, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -15105,7 +15590,9 @@ static struct npc_lt_def_cfg npc_lt_defaults = {
},
.rx_et = {
{
- .lid = NPC_LID_LB,
+ .offset = -2,
+ .valid = 1,
+ .lid = NPC_LID_LC,
.ltype_match = NPC_LT_NA,
.ltype_mask = 0x0,
},
@@ -15139,6 +15626,12 @@ static struct npc_mcam_kex npc_mkex_default = {
/* Ethertype: 2 bytes, KW0[55:40] */
KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
},
+ [NPC_LT_LA_CPT_HDR] = {
+ /* DMAC: 6 bytes, KW1[55:8] */
+ KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
+ },
/* Layer A: HiGig2: */
[NPC_LT_LA_HIGIG2_ETHER] = {
/* Classification: 2 bytes, KW1[23:8] */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 6a911ea0cf..ff78251f92 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -817,6 +817,8 @@ static int rvu_fwdata_init(struct rvu *rvu)
err = cgx_get_fwdata_base(&fwdbase);
if (err)
goto fail;
+
+ BUILD_BUG_ON(offsetof(struct rvu_fwdata, cgx_fw_data) > FWDATA_CGX_LMAC_OFFSET);
rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
if (!rvu->fwdata)
goto fail;
@@ -1484,7 +1486,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
/* All CGX mapped PFs are set with assigned NIX block during init */
if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
blkaddr = pf->nix_blkaddr;
- } else if (is_afvf(pcifunc)) {
+ } else if (is_lbk_vf(rvu, pcifunc)) {
vf = pcifunc - 1;
/* Assign NIX based on VF number. All even numbered VFs get
* NIX0 and odd numbered gets NIX1
@@ -2034,7 +2036,7 @@ int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
u16 target;
/* Only PF can add VF permissions */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_lbk_vf(rvu, pcifunc))
return -EOPNOTSUPP;
target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
@@ -2637,6 +2639,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* 3. Cleanup pools (NPA)
*/
+ /* Free allocated BPIDs */
+ rvu_nix_flr_free_bpids(rvu, pcifunc);
+
/* Free multicast/mirror node associated with the 'pcifunc' */
rvu_nix_mcast_flr_free_entries(rvu, pcifunc);
@@ -3170,6 +3175,7 @@ static int rvu_enable_sriov(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
int err, chans, vfs;
+ int pos = 0;
if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
dev_warn(&pdev->dev,
@@ -3177,6 +3183,12 @@ static int rvu_enable_sriov(struct rvu *rvu)
return 0;
}
+ /* Get RVU VFs device id */
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &rvu->vf_devid);
+
chans = rvu_get_num_lbk_chans();
if (chans < 0)
return chans;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index d44a400e1b..35834687e4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -288,6 +288,16 @@ enum rvu_pfvf_flags {
#define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC)
+struct nix_bp {
+ struct rsrc_bmap bpids; /* free bpids bitmap */
+ u16 cgx_bpid_cnt;
+ u16 sdp_bpid_cnt;
+ u16 free_pool_base;
+ u16 *fn_map; /* pcifunc mapping */
+ u8 *intf_map; /* interface type map */
+ u8 *ref_cnt;
+};
+
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
@@ -363,6 +373,7 @@ struct nix_hw {
struct nix_lso lso;
struct nix_txvlan txvlan;
struct nix_ipolicer *ipolicer;
+ struct nix_bp bp;
u64 *tx_credits;
u8 cc_mcs_cnt;
};
@@ -432,6 +443,13 @@ struct mbox_wq_info {
struct workqueue_struct *mbox_wq;
};
+struct channel_fwdata {
+ struct sdp_node_info info;
+ u8 valid;
+#define RVU_CHANL_INFO_RESERVED 379
+ u8 reserved[RVU_CHANL_INFO_RESERVED];
+};
+
struct rvu_fwdata {
#define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/
#define RVU_FWDATA_VERSION 0x0001
@@ -450,11 +468,13 @@ struct rvu_fwdata {
u64 msixtr_base;
u32 ptp_ext_clk_rate;
u32 ptp_ext_tstamp;
-#define FWDATA_RESERVED_MEM 1022
+ struct channel_fwdata channel_data;
+#define FWDATA_RESERVED_MEM 958
u64 reserved[FWDATA_RESERVED_MEM];
#define CGX_MAX 9
#define CGX_LMACS_MAX 4
#define CGX_LMACS_USX 8
+#define FWDATA_CGX_LMAC_OFFSET 10536
union {
struct cgx_lmac_fwdata_s
cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
@@ -503,6 +523,7 @@ struct rvu {
struct mutex rsrc_lock; /* Serialize resource alloc/free */
struct mutex alias_lock; /* Serialize bar2 alias access */
int vfs; /* Number of VFs attached to RVU */
+ u16 vf_devid; /* VF devices id */
int nix_blkaddr[MAX_NIX_BLKS];
/* Mbox */
@@ -734,9 +755,11 @@ static inline bool is_rvu_supports_nix1(struct rvu *rvu)
/* Function Prototypes
* RVU
*/
-static inline bool is_afvf(u16 pcifunc)
+#define RVU_LBK_VF_DEVID 0xA0F8
+static inline bool is_lbk_vf(struct rvu *rvu, u16 pcifunc)
{
- return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
+ return (!(pcifunc & ~RVU_PFVF_FUNC_MASK) &&
+ (rvu->vf_devid == RVU_LBK_VF_DEVID));
}
static inline bool is_vf(u16 pcifunc)
@@ -796,7 +819,7 @@ void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
int rvu_sdp_init(struct rvu *rvu);
bool is_sdp_pfvf(u16 pcifunc);
bool is_sdp_pf(u16 pcifunc);
-bool is_sdp_vf(u16 pcifunc);
+bool is_sdp_vf(struct rvu *rvu, u16 pcifunc);
/* CGX APIs */
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
@@ -875,6 +898,7 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx);
int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx, u16 mcam_index);
+void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc);
/* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index e7eca8141e..881d704644 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -2868,6 +2868,10 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "%d ", ntohs(rule->packet.dport));
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
break;
+ case NPC_TCP_FLAGS:
+ seq_printf(s, "%d ", rule->packet.tcp_flags);
+ seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags);
+ break;
case NPC_IPSEC_SPI:
seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 42db213fb6..00af8888e3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -499,29 +499,115 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
rvu_cgx_disable_dmac_entries(rvu, pcifunc);
}
+#define NIX_BPIDS_PER_LMAC 8
+#define NIX_BPIDS_PER_CPT 1
+static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr)
+{
+ struct nix_bp *bp = &hw->bp;
+ int err, max_bpids;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ max_bpids = FIELD_GET(NIX_CONST_MAX_BPIDS, cfg);
+
+ /* Reserve the BPIds for CGX and SDP */
+ bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC;
+ bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg);
+ bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt +
+ NIX_BPIDS_PER_CPT;
+ bp->bpids.max = max_bpids - bp->free_pool_base;
+
+ err = rvu_alloc_bitmap(&bp->bpids);
+ if (err)
+ return err;
+
+ bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!bp->fn_map)
+ return -ENOMEM;
+
+ bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max,
+ sizeof(u8), GFP_KERNEL);
+ if (!bp->intf_map)
+ return -ENOMEM;
+
+ bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max,
+ sizeof(u8), GFP_KERNEL);
+ if (!bp->ref_cnt)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, bpid, err;
+ struct nix_hw *nix_hw;
+ struct nix_bp *bp;
+
+ if (!is_lbk_vf(rvu, pcifunc))
+ return;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return;
+
+ bp = &nix_hw->bp;
+
+ mutex_lock(&rvu->rsrc_lock);
+ for (bpid = 0; bpid < bp->bpids.max; bpid++) {
+ if (bp->fn_map[bpid] == pcifunc) {
+ bp->ref_cnt[bpid]--;
+ if (bp->ref_cnt[bpid])
+ continue;
+ rvu_free_rsrc(&bp->bpids, bpid);
+ bp->fn_map[bpid] = 0;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+}
+
int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
struct nix_bp_cfg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, pf, type, err;
+ u16 chan_base, chan, bpid;
struct rvu_pfvf *pfvf;
- int blkaddr, pf, type;
- u16 chan_base, chan;
+ struct nix_hw *nix_hw;
+ struct nix_bp *bp;
u64 cfg;
pf = rvu_get_pf(pcifunc);
- type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
return 0;
pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+ bp = &nix_hw->bp;
chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
cfg & ~BIT_ULL(16));
+
+ if (type == NIX_INTF_TYPE_LBK) {
+ bpid = cfg & GENMASK(8, 0);
+ mutex_lock(&rvu->rsrc_lock);
+ rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base);
+ for (bpid = 0; bpid < bp->bpids.max; bpid++) {
+ if (bp->fn_map[bpid] == pcifunc) {
+ bp->fn_map[bpid] = 0;
+ bp->ref_cnt[bpid] = 0;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ }
}
return 0;
}
@@ -529,25 +615,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
- int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
- u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
+ int bpid, blkaddr, sdp_chan_base, err;
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_pfvf *pfvf;
+ struct nix_hw *nix_hw;
u8 cgx_id, lmac_id;
- u64 cfg;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
- lmac_chan_cnt = cfg & 0xFF;
+ struct nix_bp *bp;
- cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
- lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
- sdp_chan_cnt = cfg & 0xFFF;
- sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
- pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ bp = &nix_hw->bp;
/* Backpressure IDs range division
* CGX channles are mapped to (0 - 191) BPIDs
@@ -561,38 +642,48 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
*/
switch (type) {
case NIX_INTF_TYPE_CGX:
- if ((req->chan_base + req->chan_cnt) > 16)
- return -EINVAL;
+ if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC)
+ return NIX_AF_ERR_INVALID_BPID_REQ;
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
/* Assign bpid based on cgx, lmac and chan id */
- bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
- (lmac_id * lmac_chan_cnt) + req->chan_base;
+ bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) +
+ (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base;
if (req->bpid_per_chan)
bpid += chan_id;
- if (bpid > cgx_bpid_cnt)
- return -EINVAL;
+ if (bpid > bp->cgx_bpid_cnt)
+ return NIX_AF_ERR_INVALID_BPID;
break;
case NIX_INTF_TYPE_LBK:
- if ((req->chan_base + req->chan_cnt) > 63)
- return -EINVAL;
- bpid = cgx_bpid_cnt + req->chan_base;
- if (req->bpid_per_chan)
- bpid += chan_id;
- if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
- return -EINVAL;
+ /* Alloc bpid from the free pool */
+ mutex_lock(&rvu->rsrc_lock);
+ bpid = rvu_alloc_rsrc(&bp->bpids);
+ if (bpid < 0) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return NIX_AF_ERR_INVALID_BPID;
+ }
+ bp->fn_map[bpid] = req->hdr.pcifunc;
+ bp->ref_cnt[bpid]++;
+ bpid += bp->free_pool_base;
+ mutex_unlock(&rvu->rsrc_lock);
break;
case NIX_INTF_TYPE_SDP:
- if ((req->chan_base + req->chan_cnt) > 255)
- return -EINVAL;
+ if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt)
+ return NIX_AF_ERR_INVALID_BPID_REQ;
- bpid = sdp_bpid_cnt + req->chan_base;
+ /* Handle usecase of 2 SDP blocks */
+ if (!hw->cap.programmable_chans)
+ sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START;
+ else
+ sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base;
+
+ bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base;
if (req->bpid_per_chan)
bpid += chan_id;
- if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
- return -EINVAL;
+ if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt))
+ return NIX_AF_ERR_INVALID_BPID;
break;
default:
return -EINVAL;
@@ -612,7 +703,7 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
u64 cfg;
pf = rvu_get_pf(pcifunc);
- type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (is_sdp_pfvf(pcifunc))
type = NIX_INTF_TYPE_SDP;
@@ -1523,7 +1614,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
cfg = NPC_TX_DEF_PKIND;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
- intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (is_sdp_pfvf(pcifunc))
intf = NIX_INTF_TYPE_SDP;
@@ -1899,7 +1990,7 @@ static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
- if (is_afvf(pcifunc)) {/* LBK links */
+ if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */
return hw->cgx_links;
} else if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -1916,7 +2007,7 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
struct rvu_hwinfo *hw = rvu->hw;
int pf = rvu_get_pf(pcifunc);
- if (is_afvf(pcifunc)) { /* LBK links */
+ if (is_lbk_vf(rvu, pcifunc)) { /* LBK links */
*start = hw->cap.nix_txsch_per_cgx_lmac * link;
*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
@@ -3356,7 +3447,7 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
int pf;
/* skip multicast pkt replication for AF's VFs & SDP links */
- if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc))
return 0;
if (!hw->cap.nix_rx_multicast)
@@ -3703,7 +3794,7 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
- if (is_afvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc))
rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
else
rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
@@ -4039,6 +4130,13 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->ltype_match = NPC_LT_LE_GTPU;
field->ltype_mask = 0xF;
break;
+ case NIX_FLOW_KEY_TYPE_CUSTOM0:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 6;
+ field->bytesm1 = 1; /* 2 Bytes*/
+ field->ltype_match = NPC_LT_LC_CUSTOM0;
+ field->ltype_mask = 0xF;
+ break;
case NIX_FLOW_KEY_TYPE_VLAN:
field->lid = NPC_LID_LB;
field->hdr_offset = 2; /* Skip TPID (2-bytes) */
@@ -4420,7 +4518,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
- if (is_afvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc))
rvu_get_lbk_link_max_frs(rvu, &max_mtu);
else
rvu_get_lmac_link_max_frs(rvu, &max_mtu);
@@ -4784,6 +4882,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
if (err)
return err;
+ err = nix_setup_bpids(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
/* Configure segmentation offload formats */
nix_setup_lso(rvu, nix_hw, blkaddr);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index d94b7b88e1..97722ce8c4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -395,7 +395,7 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
owner = mcam->entry2pfvf_map[index];
target_func = (entry->action >> 4) & 0xffff;
/* do nothing when target is LBK/PF or owner is not PF */
- if (is_pffunc_af(owner) || is_afvf(target_func) ||
+ if (is_pffunc_af(owner) || is_lbk_vf(rvu, target_func) ||
(owner & RVU_PFVF_FUNC_MASK) ||
!(target_func & RVU_PFVF_FUNC_MASK))
return;
@@ -608,7 +608,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int blkaddr, index;
/* AF's and SDP VFs work in promiscuous mode */
- if (is_afvf(pcifunc) || is_sdp_vf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_sdp_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -773,7 +773,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
return;
/* Skip LBK VFs */
- if (is_afvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc))
return;
/* If pkt replication is not supported,
@@ -853,7 +853,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u16 vf_func;
/* Only CGX PF/VF can add allmulticast entry */
- if (is_afvf(pcifunc) && is_sdp_vf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) && is_sdp_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -2519,7 +2519,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
* - when available free entries are less.
* Lower priority ones out of avaialble free entries are always
* chosen when 'high vs low' question arises.
+ *
+ * For a VF base MCAM match rule is set by its PF. And all the
+ * further MCAM rules installed by VF on its own are
+ * concatenated with the base rule set by its PF. Hence PF entries
+ * should be at lower priority compared to VF entries. Otherwise
+ * base rule is hit always and rules installed by VF will be of
+ * no use. Hence if the request is from PF then allocate low
+ * priority entries.
*/
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ goto lprio_alloc;
/* Get the search range for priority allocation request */
if (req->priority) {
@@ -2528,17 +2538,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
goto alloc;
}
- /* For a VF base MCAM match rule is set by its PF. And all the
- * further MCAM rules installed by VF on its own are
- * concatenated with the base rule set by its PF. Hence PF entries
- * should be at lower priority compared to VF entries. Otherwise
- * base rule is hit always and rules installed by VF will be of
- * no use. Hence if the request is from PF and NOT a priority
- * allocation request then allocate low priority entries.
- */
- if (!(pcifunc & RVU_PFVF_FUNC_MASK))
- goto lprio_alloc;
-
/* Find out the search range for non-priority allocation request
*
* Get MCAM free entry count in middle zone.
@@ -2568,6 +2567,18 @@ lprio_alloc:
reverse = true;
start = 0;
end = mcam->bmap_entries;
+ /* Ensure PF requests are always at bottom and if PF requests
+ * for higher/lower priority entry wrt reference entry then
+ * honour that criteria and start search for entries from bottom
+ * and not in mid zone.
+ */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
+ req->priority == NPC_MCAM_HIGHER_PRIO)
+ end = req->ref_entry;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
+ req->priority == NPC_MCAM_LOWER_PRIO)
+ start = req->ref_entry;
}
alloc:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index c75669c8fd..c181e7aa9e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -53,6 +53,7 @@ static const char * const npc_flow_names[] = {
[NPC_MPLS4_TTL] = "lse depth 4",
[NPC_TYPE_ICMP] = "icmp type",
[NPC_CODE_ICMP] = "icmp code",
+ [NPC_TCP_FLAGS] = "tcp flags",
[NPC_UNKNOWN] = "unknown",
};
@@ -530,6 +531,7 @@ do { \
NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
NPC_SCAN_HDR(NPC_TYPE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 0, 1);
NPC_SCAN_HDR(NPC_CODE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 1, 1);
+ NPC_SCAN_HDR(NPC_TCP_FLAGS, NPC_LID_LD, NPC_LT_LD_TCP, 12, 2);
NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
@@ -574,7 +576,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
- BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP);
+ BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP) |
+ BIT_ULL(NPC_TCP_FLAGS);
/* for tcp/udp/sctp corresponding layer type should be in the key */
if (*features & proto_flags) {
@@ -982,7 +985,8 @@ do { \
mask->icmp_type, 0);
NPC_WRITE_FLOW(NPC_CODE_ICMP, icmp_code, pkt->icmp_code, 0,
mask->icmp_code, 0);
-
+ NPC_WRITE_FLOW(NPC_TCP_FLAGS, tcp_flags, ntohs(pkt->tcp_flags), 0,
+ ntohs(mask->tcp_flags), 0);
NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0,
ntohl(mask->spi), 0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 6f73ad9807..086f05c037 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -439,6 +439,9 @@
#define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16)
#define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32)
+#define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12)
+#define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0)
+
/* SSO */
#define SSO_AF_CONST (0x1000)
#define SSO_AF_CONST1 (0x1008)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
index ae50d56258..38cfe148f4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -40,8 +40,12 @@ bool is_sdp_pf(u16 pcifunc)
!(pcifunc & RVU_PFVF_FUNC_MASK));
}
-bool is_sdp_vf(u16 pcifunc)
+#define RVU_SDP_VF_DEVID 0xA0F7
+bool is_sdp_vf(struct rvu *rvu, u16 pcifunc)
{
+ if (!(pcifunc & ~RVU_PFVF_FUNC_MASK))
+ return (rvu->vf_devid == RVU_SDP_VF_DEVID);
+
return (is_sdp_pfvf(pcifunc) &&
!!(pcifunc & RVU_PFVF_FUNC_MASK));
}
@@ -52,6 +56,14 @@ int rvu_sdp_init(struct rvu *rvu)
struct rvu_pfvf *pfvf;
u32 i = 0;
+ if (rvu->fwdata->channel_data.valid) {
+ sdp_pf_num[0] = 0;
+ pfvf = &rvu->pf[sdp_pf_num[0]];
+ pfvf->sdp_info = &rvu->fwdata->channel_data.info;
+
+ return 0;
+ }
+
while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OTX2_SDP_PF,
pdev)) != NULL) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 5664f768cb..64a97a0a10 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -9,10 +9,9 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
otx2_devlink.o qos_sq.o qos.o
-rvu_nicvf-y := otx2_vf.o otx2_devlink.o
+rvu_nicvf-y := otx2_vf.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
-rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index 28fb643d29..aa01110f04 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -54,6 +54,7 @@ int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_config);
static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
{
@@ -122,6 +123,7 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_alloc);
static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
{
@@ -260,6 +262,7 @@ update_sq_smq_map:
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_update);
int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
{
@@ -282,6 +285,7 @@ int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_stop);
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
{
@@ -321,6 +325,7 @@ unlock:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_config_priority_flow_ctrl);
void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
bool pfc_enable)
@@ -385,6 +390,7 @@ out:
"Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
qidx, err);
}
+EXPORT_SYMBOL(otx2_update_bpid_in_rqctx);
static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
@@ -472,3 +478,4 @@ int otx2_dcbnl_set_ops(struct net_device *dev)
return 0;
}
+EXPORT_SYMBOL(otx2_dcbnl_set_ops);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 4e11304965..05956bf03c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -112,6 +112,7 @@ err_dl:
devlink_free(dl);
return err;
}
+EXPORT_SYMBOL(otx2_register_dl);
void otx2_unregister_dl(struct otx2_nic *pfvf)
{
@@ -123,3 +124,4 @@ void otx2_unregister_dl(struct otx2_nic *pfvf)
ARRAY_SIZE(otx2_dl_params));
devlink_free(dl);
}
+EXPORT_SYMBOL(otx2_unregister_dl);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 60ee7ae2c4..f4655a8c07 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -638,6 +638,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
BIT(FLOW_DISSECTOR_KEY_IPSEC) |
BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_TCP) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) {
netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
dissector->used_keys);
@@ -860,6 +861,16 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp match;
+
+ flow_rule_match_tcp(rule, &match);
+
+ flow_spec->tcp_flags = match.key->flags;
+ flow_mask->tcp_flags = match.mask->flags;
+ req->features |= BIT_ULL(NPC_TCP_FLAGS);
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
struct flow_match_mpls match;
u8 bit;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index f828d32737..04a49b9b54 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -1171,8 +1171,11 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
/* Insert vlan tag before giving pkt to tso */
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
skb = __vlan_hwaccel_push_inside(skb);
+ if (!skb)
+ return true;
+ }
otx2_sq_append_tso(pfvf, sq, skb, qidx);
return true;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 1723e9912a..6cddb4da85 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -1407,7 +1407,10 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
/* delete the txschq nodes allocated for this node */
+ otx2_qos_disable_sq(pfvf, qid);
+ otx2_qos_free_hw_node_schq(pfvf, node);
otx2_qos_free_sw_node_schq(pfvf, node);
+ pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
/* mark this node as htb inner node */
WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
@@ -1554,6 +1557,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
dwrr_del_node = true;
/* destroy the leaf node */
+ otx2_qos_disable_sq(pfvf, qid);
otx2_qos_destroy_node(pfvf, node);
pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;