summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
commitdc50eab76b709d68175a358d6e23a5a3890764d3 (patch)
treec754d0390db060af0213ff994f0ac310e4cfd6e9 /drivers/net/ethernet/marvell
parentAdding debian version 6.6.15-2. (diff)
downloadlinux-dc50eab76b709d68175a358d6e23a5a3890764d3.tar.xz
linux-dc50eab76b709d68175a358d6e23a5a3890764d3.zip
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c11
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c59
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c12
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c168
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_config.h22
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c24
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h18
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c229
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h13
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h3
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c86
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c53
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c464
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c62
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c7
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c5
30 files changed, 889 insertions, 529 deletions
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 3b129a1c33..f0bdc06d25 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2892,19 +2892,18 @@ err_put_clk:
return ret;
}
-static int mv643xx_eth_shared_remove(struct platform_device *pdev)
+static void mv643xx_eth_shared_remove(struct platform_device *pdev)
{
struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
mv643xx_eth_shared_of_remove();
if (!IS_ERR(msp->clk))
clk_disable_unprepare(msp->clk);
- return 0;
}
static struct platform_driver mv643xx_eth_shared_driver = {
.probe = mv643xx_eth_shared_probe,
- .remove = mv643xx_eth_shared_remove,
+ .remove_new = mv643xx_eth_shared_remove,
.driver = {
.name = MV643XX_ETH_SHARED_NAME,
.of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
@@ -3279,7 +3278,7 @@ out:
return err;
}
-static int mv643xx_eth_remove(struct platform_device *pdev)
+static void mv643xx_eth_remove(struct platform_device *pdev)
{
struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
struct net_device *dev = mp->dev;
@@ -3293,8 +3292,6 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
clk_disable_unprepare(mp->clk);
free_netdev(mp->dev);
-
- return 0;
}
static void mv643xx_eth_shutdown(struct platform_device *pdev)
@@ -3311,7 +3308,7 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev)
static struct platform_driver mv643xx_eth_driver = {
.probe = mv643xx_eth_probe,
- .remove = mv643xx_eth_remove,
+ .remove_new = mv643xx_eth_remove,
.shutdown = mv643xx_eth_shutdown,
.driver = {
.name = MV643XX_ETH_NAME,
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 674913184e..5f66f779e5 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -23,6 +23,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
@@ -58,11 +59,6 @@
* - Armada 370 (Globalscale Mirabox): 41us to 43us (Polled)
*/
#define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */
-#define MVMDIO_SMI_POLL_INTERVAL_MIN 45
-#define MVMDIO_SMI_POLL_INTERVAL_MAX 55
-
-#define MVMDIO_XSMI_POLL_INTERVAL_MIN 150
-#define MVMDIO_XSMI_POLL_INTERVAL_MAX 160
struct orion_mdio_dev {
void __iomem *regs;
@@ -84,8 +80,6 @@ enum orion_mdio_bus_type {
struct orion_mdio_ops {
int (*is_done)(struct orion_mdio_dev *);
- unsigned int poll_interval_min;
- unsigned int poll_interval_max;
};
/* Wait for the SMI unit to be ready for another operation
@@ -94,34 +88,23 @@ static int orion_mdio_wait_ready(const struct orion_mdio_ops *ops,
struct mii_bus *bus)
{
struct orion_mdio_dev *dev = bus->priv;
- unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT);
- unsigned long end = jiffies + timeout;
- int timedout = 0;
+ unsigned long timeout;
+ int done;
- while (1) {
- if (ops->is_done(dev))
+ if (dev->err_interrupt <= 0) {
+ if (!read_poll_timeout_atomic(ops->is_done, done, done, 2,
+ MVMDIO_SMI_TIMEOUT, false, dev))
+ return 0;
+ } else {
+ /* wait_event_timeout does not guarantee a delay of at
+ * least one whole jiffie, so timeout must be no less
+ * than two.
+ */
+ timeout = max(usecs_to_jiffies(MVMDIO_SMI_TIMEOUT), 2);
+
+ if (wait_event_timeout(dev->smi_busy_wait,
+ ops->is_done(dev), timeout))
return 0;
- else if (timedout)
- break;
-
- if (dev->err_interrupt <= 0) {
- usleep_range(ops->poll_interval_min,
- ops->poll_interval_max);
-
- if (time_is_before_jiffies(end))
- ++timedout;
- } else {
- /* wait_event_timeout does not guarantee a delay of at
- * least one whole jiffie, so timeout must be no less
- * than two.
- */
- if (timeout < 2)
- timeout = 2;
- wait_event_timeout(dev->smi_busy_wait,
- ops->is_done(dev), timeout);
-
- ++timedout;
- }
}
dev_err(bus->parent, "Timeout: SMI busy for too long\n");
@@ -135,8 +118,6 @@ static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev)
static const struct orion_mdio_ops orion_mdio_smi_ops = {
.is_done = orion_mdio_smi_is_done,
- .poll_interval_min = MVMDIO_SMI_POLL_INTERVAL_MIN,
- .poll_interval_max = MVMDIO_SMI_POLL_INTERVAL_MAX,
};
static int orion_mdio_smi_read(struct mii_bus *bus, int mii_id,
@@ -194,8 +175,6 @@ static int orion_mdio_xsmi_is_done(struct orion_mdio_dev *dev)
static const struct orion_mdio_ops orion_mdio_xsmi_ops = {
.is_done = orion_mdio_xsmi_is_done,
- .poll_interval_min = MVMDIO_XSMI_POLL_INTERVAL_MIN,
- .poll_interval_max = MVMDIO_XSMI_POLL_INTERVAL_MAX,
};
static int orion_mdio_xsmi_read_c45(struct mii_bus *bus, int mii_id,
@@ -388,7 +367,7 @@ out_clk:
return ret;
}
-static int orion_mdio_remove(struct platform_device *pdev)
+static void orion_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
struct orion_mdio_dev *dev = bus->priv;
@@ -404,8 +383,6 @@ static int orion_mdio_remove(struct platform_device *pdev)
clk_disable_unprepare(dev->clk[i]);
clk_put(dev->clk[i]);
}
-
- return 0;
}
static const struct of_device_id orion_mdio_match[] = {
@@ -426,7 +403,7 @@ MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match);
static struct platform_driver orion_mdio_driver = {
.probe = orion_mdio_probe,
- .remove = orion_mdio_remove,
+ .remove_new = orion_mdio_remove,
.driver = {
.name = "orion-mdio",
.of_match_table = orion_mdio_match,
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 165f76d123..29aac32757 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2520,7 +2520,7 @@ next:
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
if (ps.xdp_redirect)
- xdp_do_flush_map();
+ xdp_do_flush();
if (ps.rx_packets)
mvneta_update_stats(pp, &ps);
@@ -5737,7 +5737,7 @@ err_free_irq:
}
/* Device removal routine */
-static int mvneta_remove(struct platform_device *pdev)
+static void mvneta_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct mvneta_port *pp = netdev_priv(dev);
@@ -5756,8 +5756,6 @@ static int mvneta_remove(struct platform_device *pdev)
1 << pp->id);
mvneta_bm_put(pp->bm_priv);
}
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -5883,7 +5881,7 @@ MODULE_DEVICE_TABLE(of, mvneta_match);
static struct platform_driver mvneta_driver = {
.probe = mvneta_probe,
- .remove = mvneta_remove,
+ .remove_new = mvneta_remove,
.driver = {
.name = MVNETA_DRIVER_NAME,
.of_match_table = mvneta_match,
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 46c942ef22..3f46a0fed0 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -457,7 +457,7 @@ err_clk:
return err;
}
-static int mvneta_bm_remove(struct platform_device *pdev)
+static void mvneta_bm_remove(struct platform_device *pdev)
{
struct mvneta_bm *priv = platform_get_drvdata(pdev);
u8 all_ports_map = 0xff;
@@ -475,8 +475,6 @@ static int mvneta_bm_remove(struct platform_device *pdev)
mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK);
clk_disable_unprepare(priv->clk);
-
- return 0;
}
static const struct of_device_id mvneta_bm_match[] = {
@@ -487,7 +485,7 @@ MODULE_DEVICE_TABLE(of, mvneta_bm_match);
static struct platform_driver mvneta_bm_driver = {
.probe = mvneta_bm_probe,
- .remove = mvneta_bm_remove,
+ .remove_new = mvneta_bm_remove,
.driver = {
.name = MVNETA_BM_DRIVER_NAME,
.of_match_table = mvneta_bm_match,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index aca17082b9..065f07392c 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4052,7 +4052,7 @@ err_drop_frame:
}
if (xdp_ret & MVPP2_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (ps.rx_packets) {
struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
@@ -5856,7 +5856,7 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
v->type = MVPP2_QUEUE_VECTOR_SHARED;
if (port->flags & MVPP2_F_DT_COMPAT)
- strncpy(irqname, "rx-shared", sizeof(irqname));
+ strscpy(irqname, "rx-shared", sizeof(irqname));
}
if (port_node)
@@ -7687,7 +7687,7 @@ err_pp_clk:
return err;
}
-static int mvpp2_remove(struct platform_device *pdev)
+static void mvpp2_remove(struct platform_device *pdev)
{
struct mvpp2 *priv = platform_get_drvdata(pdev);
struct fwnode_handle *fwnode = pdev->dev.fwnode;
@@ -7725,15 +7725,13 @@ static int mvpp2_remove(struct platform_device *pdev)
}
if (is_acpi_node(port_fwnode))
- return 0;
+ return;
clk_disable_unprepare(priv->axi_clk);
clk_disable_unprepare(priv->mg_core_clk);
clk_disable_unprepare(priv->mg_clk);
clk_disable_unprepare(priv->pp_clk);
clk_disable_unprepare(priv->gop_clk);
-
- return 0;
}
static const struct of_device_id mvpp2_match[] = {
@@ -7759,7 +7757,7 @@ MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
static struct platform_driver mvpp2_driver = {
.probe = mvpp2_probe,
- .remove = mvpp2_remove,
+ .remove_new = mvpp2_remove,
.driver = {
.name = MVPP2_DRIVER_NAME,
.of_match_table = mvpp2_match,
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
index 90c3a41993..d4ee245467 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
@@ -16,9 +16,6 @@
#define CTRL_MBOX_MAX_PF 128
#define CTRL_MBOX_SZ ((size_t)(0x400000 / CTRL_MBOX_MAX_PF))
-#define FW_HB_INTERVAL_IN_SECS 1
-#define FW_HB_MISS_COUNT 10
-
/* Names of Hardware non-queue generic interrupts */
static char *cn93_non_ioq_msix_names[] = {
"epf_ire_rint",
@@ -250,12 +247,11 @@ static void octep_init_config_cn93_pf(struct octep_device *oct)
link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link);
}
conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr +
- (0x400000ull * 7) +
+ CN93_PEM_BAR4_INDEX_OFFSET +
(link * CTRL_MBOX_SZ);
- conf->hb_interval = FW_HB_INTERVAL_IN_SECS;
- conf->max_hb_miss_cnt = FW_HB_MISS_COUNT;
-
+ conf->fw_info.hb_interval = OCTEP_DEFAULT_FW_HB_INTERVAL;
+ conf->fw_info.hb_miss_count = OCTEP_DEFAULT_FW_HB_MISS_COUNT;
}
/* Setup registers for a hardware Tx Queue */
@@ -373,34 +369,40 @@ static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no)
mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no);
}
-/* Process non-ioq interrupts required to keep pf interface running.
- * OEI_RINT is needed for control mailbox
- */
-static bool octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct)
-{
- bool handled = false;
- u64 reg0;
-
- /* Check for OEI INTR */
- reg0 = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT);
- if (reg0) {
- dev_info(&oct->pdev->dev,
- "Received OEI_RINT intr: 0x%llx\n",
- reg0);
- octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg0);
- if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX)
+/* Poll OEI events like heartbeat */
+static void octep_poll_oei_cn93_pf(struct octep_device *oct)
+{
+ u64 reg;
+
+ reg = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT);
+ if (reg) {
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg);
+ if (reg & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX)
queue_work(octep_wq, &oct->ctrl_mbox_task);
- else if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT)
+ else if (reg & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT)
atomic_set(&oct->hb_miss_cnt, 0);
-
- handled = true;
}
+}
+
+/* OEI interrupt handler */
+static irqreturn_t octep_oei_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+
+ octep_poll_oei_cn93_pf(oct);
+ return IRQ_HANDLED;
+}
- return handled;
+/* Process non-ioq interrupts required to keep pf interface running.
+ * OEI_RINT is needed for control mailbox
+ */
+static void octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct)
+{
+ octep_poll_oei_cn93_pf(oct);
}
-/* Interrupts handler for all non-queue generic interrupts. */
-static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
+/* Interrupt handler for input ring error interrupts. */
+static irqreturn_t octep_ire_intr_handler_cn93_pf(void *dev)
{
struct octep_device *oct = (struct octep_device *)dev;
struct pci_dev *pdev = oct->pdev;
@@ -425,8 +427,17 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
reg_val);
}
}
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for output ring error interrupts. */
+static irqreturn_t octep_ore_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+ int i = 0;
/* Check for ORERR INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT);
@@ -444,9 +455,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
reg_val);
}
}
-
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for vf input ring error interrupts. */
+static irqreturn_t octep_vfire_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
/* Check for VFIRE INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0));
@@ -454,8 +472,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
dev_info(&pdev->dev,
"Received VFIRE_RINT intr: 0x%llx\n", reg_val);
octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for vf output ring error interrupts. */
+static irqreturn_t octep_vfore_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
/* Check for VFORE INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0));
@@ -463,19 +489,30 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
dev_info(&pdev->dev,
"Received VFORE_RINT intr: 0x%llx\n", reg_val);
octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
- /* Check for MBOX INTR and OEI INTR */
- if (octep_poll_non_ioq_interrupts_cn93_pf(oct))
- goto irq_handled;
+/* Interrupt handler for dpi dma related interrupts. */
+static irqreturn_t octep_dma_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ u64 reg_val = 0;
/* Check for DMA INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT);
if (reg_val) {
octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for dpi dma transaction error interrupts for VFs */
+static irqreturn_t octep_dma_vf_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
/* Check for DMA VF INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0));
@@ -483,8 +520,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
dev_info(&pdev->dev,
"Received DMA_VF_RINT intr: 0x%llx\n", reg_val);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for pp transaction error interrupts for VFs */
+static irqreturn_t octep_pp_vf_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
/* Check for PPVF INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0));
@@ -492,8 +537,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
dev_info(&pdev->dev,
"Received PP_VF_RINT intr: 0x%llx\n", reg_val);
octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for mac related interrupts. */
+static irqreturn_t octep_misc_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
/* Check for MISC INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT);
@@ -501,11 +554,17 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
dev_info(&pdev->dev,
"Received MISC_RINT intr: 0x%llx\n", reg_val);
octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupts handler for all reserved interrupts. */
+static irqreturn_t octep_rsvd_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n");
-irq_handled:
return IRQ_HANDLED;
}
@@ -569,8 +628,15 @@ static void octep_enable_interrupts_cn93_pf(struct octep_device *oct)
octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL);
+
+ octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT_ENA_W1S(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT_ENA_W1S(0), -1ULL);
+
octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
+
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(0), -1ULL);
}
/* Disable all interrupts */
@@ -588,8 +654,15 @@ static void octep_disable_interrupts_cn93_pf(struct octep_device *oct)
octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL);
+
+ octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT_ENA_W1C(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT_ENA_W1C(0), -1ULL);
+
octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
+
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(0), -1ULL);
}
/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
@@ -722,7 +795,16 @@ void octep_device_setup_cn93_pf(struct octep_device *oct)
oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf;
oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf;
- oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf;
+ oct->hw_ops.oei_intr_handler = octep_oei_intr_handler_cn93_pf;
+ oct->hw_ops.ire_intr_handler = octep_ire_intr_handler_cn93_pf;
+ oct->hw_ops.ore_intr_handler = octep_ore_intr_handler_cn93_pf;
+ oct->hw_ops.vfire_intr_handler = octep_vfire_intr_handler_cn93_pf;
+ oct->hw_ops.vfore_intr_handler = octep_vfore_intr_handler_cn93_pf;
+ oct->hw_ops.dma_intr_handler = octep_dma_intr_handler_cn93_pf;
+ oct->hw_ops.dma_vf_intr_handler = octep_dma_vf_intr_handler_cn93_pf;
+ oct->hw_ops.pp_vf_intr_handler = octep_pp_vf_intr_handler_cn93_pf;
+ oct->hw_ops.misc_intr_handler = octep_misc_intr_handler_cn93_pf;
+ oct->hw_ops.rsvd_intr_handler = octep_rsvd_intr_handler_cn93_pf;
oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf;
oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf;
oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
index df7cd39d9f..1622a6ebf0 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
@@ -49,6 +49,11 @@
/* Default MTU */
#define OCTEP_DEFAULT_MTU 1500
+/* pf heartbeat interval in milliseconds */
+#define OCTEP_DEFAULT_FW_HB_INTERVAL 1000
+/* pf heartbeat miss count */
+#define OCTEP_DEFAULT_FW_HB_MISS_COUNT 20
+
/* Macros to get octeon config params */
#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
@@ -181,6 +186,16 @@ struct octep_ctrl_mbox_config {
void __iomem *barmem_addr;
};
+/* Info from firmware */
+struct octep_fw_info {
+ /* interface pkind */
+ u16 pkind;
+ /* heartbeat interval in milliseconds */
+ u16 hb_interval;
+ /* heartbeat miss count */
+ u16 hb_miss_count;
+};
+
/* Data Structure to hold configuration limits and active config */
struct octep_config {
/* Input Queue attributes. */
@@ -201,10 +216,7 @@ struct octep_config {
/* ctrl mbox config */
struct octep_ctrl_mbox_config ctrl_mbox_cfg;
- /* Configured maximum heartbeat miss count */
- u32 max_hb_miss_cnt;
-
- /* Configured firmware heartbeat interval in secs */
- u32 hb_interval;
+ /* fw info */
+ struct octep_fw_info fw_info;
};
#endif /* _OCTEP_CONFIG_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
index 17bfd5cdf4..0594607a25 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
@@ -26,7 +26,7 @@ static atomic_t ctrl_net_msg_id;
/* Control plane version in which OCTEP_CTRL_NET_H2F_CMD was added */
static const u32 octep_ctrl_net_h2f_cmd_versions[OCTEP_CTRL_NET_H2F_CMD_MAX] = {
- [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_LINK_INFO] =
+ [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_GET_INFO] =
OCTEP_CP_VERSION(1, 0, 0)
};
@@ -353,6 +353,28 @@ void octep_ctrl_net_recv_fw_messages(struct octep_device *oct)
}
}
+int octep_ctrl_net_get_info(struct octep_device *oct, int vfid,
+ struct octep_fw_info *info)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_resp *resp;
+ struct octep_ctrl_net_h2f_req *req;
+ int err;
+
+ req = &d.data.req;
+ init_send_req(&d.msg, req, 0, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_INFO;
+ req->link_info.cmd = OCTEP_CTRL_NET_CMD_GET;
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
+ return err;
+
+ resp = &d.data.resp;
+ memcpy(info, &resp->info.fw_info, sizeof(struct octep_fw_info));
+
+ return 0;
+}
+
int octep_ctrl_net_uninit(struct octep_device *oct)
{
struct octep_ctrl_net_wait_data *pos, *n;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
index 1c2ef4ee31..b330f37013 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
@@ -41,6 +41,7 @@ enum octep_ctrl_net_h2f_cmd {
OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS,
OCTEP_CTRL_NET_H2F_CMD_RX_STATE,
OCTEP_CTRL_NET_H2F_CMD_LINK_INFO,
+ OCTEP_CTRL_NET_H2F_CMD_GET_INFO,
OCTEP_CTRL_NET_H2F_CMD_MAX
};
@@ -161,6 +162,11 @@ struct octep_ctrl_net_h2f_resp_cmd_state {
u16 state;
};
+/* get info request */
+struct octep_ctrl_net_h2f_resp_cmd_get_info {
+ struct octep_fw_info fw_info;
+};
+
/* Host to fw response data */
struct octep_ctrl_net_h2f_resp {
union octep_ctrl_net_resp_hdr hdr;
@@ -171,6 +177,7 @@ struct octep_ctrl_net_h2f_resp {
struct octep_ctrl_net_h2f_resp_cmd_state link;
struct octep_ctrl_net_h2f_resp_cmd_state rx;
struct octep_ctrl_net_link_info link_info;
+ struct octep_ctrl_net_h2f_resp_cmd_get_info info;
};
} __packed;
@@ -330,6 +337,17 @@ int octep_ctrl_net_set_link_info(struct octep_device *oct,
*/
void octep_ctrl_net_recv_fw_messages(struct octep_device *oct);
+/** Get info from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param info: non-null pointer to struct octep_fw_info.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_get_info(struct octep_device *oct, int vfid,
+ struct octep_fw_info *info);
+
/** Uninitialize data for ctrl net.
*
* @param oct: non-null pointer to struct octep_device.
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 2ee1374db4..a9bdf3283a 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -155,18 +155,153 @@ static void octep_disable_msix(struct octep_device *oct)
}
/**
- * octep_non_ioq_intr_handler() - common handler for all generic interrupts.
+ * octep_oei_intr_handler() - common handler for output endpoint interrupts.
*
* @irq: Interrupt number.
* @data: interrupt data.
*
- * this is common handler for all non-queue (generic) interrupts.
+ * this is common handler for all output endpoint interrupts.
*/
-static irqreturn_t octep_non_ioq_intr_handler(int irq, void *data)
+static irqreturn_t octep_oei_intr_handler(int irq, void *data)
{
struct octep_device *oct = data;
- return oct->hw_ops.non_ioq_intr_handler(oct);
+ return oct->hw_ops.oei_intr_handler(oct);
+}
+
+/**
+ * octep_ire_intr_handler() - common handler for input ring error interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for input ring error interrupts.
+ */
+static irqreturn_t octep_ire_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.ire_intr_handler(oct);
+}
+
+/**
+ * octep_ore_intr_handler() - common handler for output ring error interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for output ring error interrupts.
+ */
+static irqreturn_t octep_ore_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.ore_intr_handler(oct);
+}
+
+/**
+ * octep_vfire_intr_handler() - common handler for vf input ring error interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for vf input ring error interrupts.
+ */
+static irqreturn_t octep_vfire_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.vfire_intr_handler(oct);
+}
+
+/**
+ * octep_vfore_intr_handler() - common handler for vf output ring error interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for vf output ring error interrupts.
+ */
+static irqreturn_t octep_vfore_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.vfore_intr_handler(oct);
+}
+
+/**
+ * octep_dma_intr_handler() - common handler for dpi dma related interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for dpi dma related interrupts.
+ */
+static irqreturn_t octep_dma_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.dma_intr_handler(oct);
+}
+
+/**
+ * octep_dma_vf_intr_handler() - common handler for dpi dma transaction error interrupts for VFs.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for dpi dma transaction error interrupts for VFs.
+ */
+static irqreturn_t octep_dma_vf_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.dma_vf_intr_handler(oct);
+}
+
+/**
+ * octep_pp_vf_intr_handler() - common handler for pp transaction error interrupts for VFs.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for pp transaction error interrupts for VFs.
+ */
+static irqreturn_t octep_pp_vf_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.pp_vf_intr_handler(oct);
+}
+
+/**
+ * octep_misc_intr_handler() - common handler for mac related interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for mac related interrupts.
+ */
+static irqreturn_t octep_misc_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.misc_intr_handler(oct);
+}
+
+/**
+ * octep_rsvd_intr_handler() - common handler for reserved interrupts (future use).
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for all reserved interrupts.
+ */
+static irqreturn_t octep_rsvd_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.rsvd_intr_handler(oct);
}
/**
@@ -222,9 +357,57 @@ static int octep_request_irqs(struct octep_device *oct)
snprintf(irq_name, OCTEP_MSIX_NAME_SIZE,
"%s-%s", netdev->name, non_ioq_msix_names[i]);
- ret = request_irq(msix_entry->vector,
- octep_non_ioq_intr_handler, 0,
- irq_name, oct);
+ if (!strncmp(non_ioq_msix_names[i], "epf_oei_rint",
+ strlen("epf_oei_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_oei_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_ire_rint",
+ strlen("epf_ire_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_ire_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_ore_rint",
+ strlen("epf_ore_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_ore_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_vfire_rint",
+ strlen("epf_vfire_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_vfire_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_vfore_rint",
+ strlen("epf_vfore_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_vfore_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_dma_rint",
+ strlen("epf_dma_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_dma_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_dma_vf_rint",
+ strlen("epf_dma_vf_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_dma_vf_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_pp_vf_rint",
+ strlen("epf_pp_vf_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_pp_vf_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_misc_rint",
+ strlen("epf_misc_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_misc_intr_handler, 0,
+ irq_name, oct);
+ } else {
+ ret = request_irq(msix_entry->vector,
+ octep_rsvd_intr_handler, 0,
+ irq_name, oct);
+ }
+
if (ret) {
netdev_err(netdev,
"request_irq failed for %s; err=%d",
@@ -917,9 +1100,9 @@ static void octep_hb_timeout_task(struct work_struct *work)
int miss_cnt;
miss_cnt = atomic_inc_return(&oct->hb_miss_cnt);
- if (miss_cnt < oct->conf->max_hb_miss_cnt) {
+ if (miss_cnt < oct->conf->fw_info.hb_miss_count) {
queue_delayed_work(octep_wq, &oct->hb_task,
- msecs_to_jiffies(oct->conf->hb_interval * 1000));
+ msecs_to_jiffies(oct->conf->fw_info.hb_interval));
return;
}
@@ -1010,10 +1193,16 @@ int octep_device_setup(struct octep_device *oct)
if (ret)
return ret;
+ INIT_WORK(&oct->tx_timeout_task, octep_tx_timeout_task);
+ INIT_WORK(&oct->ctrl_mbox_task, octep_ctrl_mbox_task);
+ INIT_DELAYED_WORK(&oct->intr_poll_task, octep_intr_poll_task);
+ oct->poll_non_ioq_intr = true;
+ queue_delayed_work(octep_wq, &oct->intr_poll_task,
+ msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
+
atomic_set(&oct->hb_miss_cnt, 0);
INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task);
- queue_delayed_work(octep_wq, &oct->hb_task,
- msecs_to_jiffies(oct->conf->hb_interval * 1000));
+
return 0;
unsupported_dev:
@@ -1143,12 +1332,18 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "Device setup failed\n");
goto err_octep_config;
}
- INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task);
- INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task);
- INIT_DELAYED_WORK(&octep_dev->intr_poll_task, octep_intr_poll_task);
- octep_dev->poll_non_ioq_intr = true;
- queue_delayed_work(octep_wq, &octep_dev->intr_poll_task,
- msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
+
+ err = octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
+ &octep_dev->conf->fw_info);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get firmware info\n");
+ goto register_dev_err;
+ }
+ dev_info(&octep_dev->pdev->dev, "Heartbeat interval %u msecs Heartbeat miss count %u\n",
+ octep_dev->conf->fw_info.hb_interval,
+ octep_dev->conf->fw_info.hb_miss_count);
+ queue_delayed_work(octep_wq, &octep_dev->hb_task,
+ msecs_to_jiffies(octep_dev->conf->fw_info.hb_interval));
netdev->netdev_ops = &octep_netdev_ops;
octep_set_ethtool_ops(netdev);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
index e0907a7191..6df902ebb7 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -65,7 +65,16 @@ struct octep_hw_ops {
void (*setup_oq_regs)(struct octep_device *oct, int q);
void (*setup_mbox_regs)(struct octep_device *oct, int mbox);
- irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
+ irqreturn_t (*oei_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ire_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ore_intr_handler)(void *ioq_vector);
+ irqreturn_t (*vfire_intr_handler)(void *ioq_vector);
+ irqreturn_t (*vfore_intr_handler)(void *ioq_vector);
+ irqreturn_t (*dma_intr_handler)(void *ioq_vector);
+ irqreturn_t (*dma_vf_intr_handler)(void *ioq_vector);
+ irqreturn_t (*pp_vf_intr_handler)(void *ioq_vector);
+ irqreturn_t (*misc_intr_handler)(void *ioq_vector);
+ irqreturn_t (*rsvd_intr_handler)(void *ioq_vector);
irqreturn_t (*ioq_intr_handler)(void *ioq_vector);
int (*soft_reset)(struct octep_device *oct);
void (*reinit_regs)(struct octep_device *oct);
@@ -73,7 +82,7 @@ struct octep_hw_ops {
void (*enable_interrupts)(struct octep_device *oct);
void (*disable_interrupts)(struct octep_device *oct);
- bool (*poll_non_ioq_interrupts)(struct octep_device *oct);
+ void (*poll_non_ioq_interrupts)(struct octep_device *oct);
void (*enable_io_queues)(struct octep_device *oct);
void (*disable_io_queues)(struct octep_device *oct);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
index b25c3093dc..0a43983e91 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -370,4 +370,8 @@
/* bit 1 for firmware heartbeat interrupt */
#define CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1)
+#define CN93_PEM_BAR4_INDEX 7
+#define CN93_PEM_BAR4_INDEX_SIZE 0x400000ULL
+#define CN93_PEM_BAR4_INDEX_OFFSET (CN93_PEM_BAR4_INDEX * CN93_PEM_BAR4_INDEX_SIZE)
+
#endif /* _OCTEP_REGS_CN9K_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
index 782a24f27f..49feae80d7 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
@@ -20,6 +20,7 @@ struct octep_oq_desc_hw {
dma_addr_t buffer_ptr;
u64 info_ptr;
};
+static_assert(sizeof(struct octep_oq_desc_hw) == 16);
#define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw))
@@ -39,6 +40,7 @@ struct octep_oq_resp_hw_ext {
/* checksum verified. */
u64 csum_verified:2;
};
+static_assert(sizeof(struct octep_oq_resp_hw_ext) == 8);
#define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext))
@@ -50,6 +52,7 @@ struct octep_oq_resp_hw {
/* The Length of the packet. */
__be64 length;
};
+static_assert(sizeof(struct octep_oq_resp_hw) == 8);
#define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw))
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
index 21e75ff9f5..86c98b13fc 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -36,6 +36,7 @@ struct octep_tx_sglist_desc {
u16 len[4];
dma_addr_t dma_ptr[4];
};
+static_assert(sizeof(struct octep_tx_sglist_desc) == 40);
/* Each Scatter/Gather entry sent to hardwar hold four pointers.
* So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1'
@@ -239,6 +240,7 @@ struct octep_instr_hdr {
/* Reserved3 */
u64 reserved3:1;
};
+static_assert(sizeof(struct octep_instr_hdr) == 8);
/* Hardware Tx completion response header */
struct octep_instr_resp_hdr {
@@ -263,6 +265,7 @@ struct octep_instr_resp_hdr {
/* Opcode for the return packet */
u64 opcode:16;
};
+static_assert(sizeof(struct octep_instr_hdr) == 8);
/* 64-byte Tx instruction format.
* Format of instruction for a 64-byte mode input queue.
@@ -293,6 +296,7 @@ struct octep_tx_desc_hw {
/* Additional headers available in a 64-byte instruction. */
u64 exhdr[4];
};
+static_assert(sizeof(struct octep_tx_desc_hw) == 64);
#define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw))
#endif /* _OCTEP_TX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index e06f77ad61..6c70c84986 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1218,8 +1218,6 @@ static inline void link_status_user_format(u64 lstat,
struct cgx_link_user_info *linfo,
struct cgx *cgx, u8 lmac_id)
{
- const char *lmac_string;
-
linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
@@ -1230,12 +1228,12 @@ static inline void link_status_user_format(u64 lstat,
if (linfo->lmac_type_id >= LMAC_MODE_MAX) {
dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d",
linfo->lmac_type_id, cgx->cgx_id, lmac_id);
- strncpy(linfo->lmac_type, "Unknown", LMACTYPE_STR_LEN - 1);
+ strscpy(linfo->lmac_type, "Unknown", sizeof(linfo->lmac_type));
return;
}
- lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
- strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
+ strscpy(linfo->lmac_type, cgx_lmactype_string[linfo->lmac_type_id],
+ sizeof(linfo->lmac_type));
}
/* Hardware event handlers */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 31bd9aeb41..5df42634ce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -1473,6 +1473,12 @@ struct flow_msg {
u8 next_header;
};
__be16 vlan_itci;
+#define OTX2_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
+#define OTX2_FLOWER_MASK_MPLS_TC GENMASK(11, 9)
+#define OTX2_FLOWER_MASK_MPLS_BOS BIT(8)
+#define OTX2_FLOWER_MASK_MPLS_TTL GENMASK(7, 0)
+#define OTX2_FLOWER_MASK_MPLS_NON_TTL GENMASK(31, 8)
+ u32 mpls_lse[4];
};
struct npc_install_flow_req {
@@ -1574,7 +1580,7 @@ enum ptp_op {
PTP_OP_GET_CLOCK = 1,
PTP_OP_GET_TSTMP = 2,
PTP_OP_SET_THRESH = 3,
- PTP_OP_EXTTS_ON = 4,
+ PTP_OP_PPS_ON = 4,
PTP_OP_ADJTIME = 5,
PTP_OP_SET_CLOCK = 6,
};
@@ -1584,7 +1590,8 @@ struct ptp_req {
u8 op;
s64 scaled_ppm;
u64 thresh;
- int extts_on;
+ u64 period;
+ int pps_on;
s64 delta;
u64 clk;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index c92c3f4631..8c0732c9a7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -206,6 +206,14 @@ enum key_fields {
NPC_SPORT_SCTP,
NPC_DPORT_SCTP,
NPC_IPSEC_SPI,
+ NPC_MPLS1_LBTCBOS,
+ NPC_MPLS1_TTL,
+ NPC_MPLS2_LBTCBOS,
+ NPC_MPLS2_TTL,
+ NPC_MPLS3_LBTCBOS,
+ NPC_MPLS3_TTL,
+ NPC_MPLS4_LBTCBOS,
+ NPC_MPLS4_TTL,
NPC_HEADER_FIELDS_MAX,
NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
NPC_PF_FUNC, /* Valid when Tx */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index ffbd227971..bcc96eed24 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -46,6 +46,7 @@
#define PTP_PPS_HI_INCR 0xF60ULL
#define PTP_PPS_LO_INCR 0xF68ULL
+#define PTP_PPS_THRESH_LO 0xF50ULL
#define PTP_PPS_THRESH_HI 0xF58ULL
#define PTP_CLOCK_LO 0xF08ULL
@@ -411,29 +412,12 @@ void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts)
}
clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
- clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
clock_cfg &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
clock_cfg |= (ATOMIC_SET << 26);
writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
- /* Set 50% duty cycle for 1Hz output */
- writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
- writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
- if (cn10k_ptp_errata(ptp)) {
- /* The ptp_clock_hi rollsover to zero once clock cycle before it
- * reaches one second boundary. so, program the pps_lo_incr in
- * such a way that the pps threshold value comparison at one
- * second boundary will succeed and pps edge changes. After each
- * one second boundary, the hrtimer handler will be invoked and
- * reprograms the pps threshold value.
- */
- ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
- writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
- ptp->reg_base + PTP_PPS_LO_INCR);
- }
-
if (cn10k_ptp_errata(ptp))
clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
else
@@ -465,20 +449,68 @@ static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
return 0;
}
-static int ptp_extts_on(struct ptp *ptp, int on)
+static int ptp_config_hrtimer(struct ptp *ptp, int on)
{
u64 ptp_clock_hi;
- if (cn10k_ptp_errata(ptp)) {
- if (on) {
- ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
- ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
- } else {
- if (hrtimer_active(&ptp->hrtimer))
- hrtimer_cancel(&ptp->hrtimer);
+ if (on) {
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
+ } else {
+ if (hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+ }
+
+ return 0;
+}
+
+static int ptp_pps_on(struct ptp *ptp, int on, u64 period)
+{
+ u64 clock_cfg;
+
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ if (on) {
+ if (cn10k_ptp_errata(ptp) && period != NSEC_PER_SEC) {
+ dev_err(&ptp->pdev->dev, "Supports max period value as 1 second\n");
+ return -EINVAL;
}
+
+ if (period > (8 * NSEC_PER_SEC)) {
+ dev_err(&ptp->pdev->dev, "Supports max period as 8 seconds\n");
+ return -EINVAL;
+ }
+
+ clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+
+ writeq(0, ptp->reg_base + PTP_PPS_THRESH_HI);
+ writeq(0, ptp->reg_base + PTP_PPS_THRESH_LO);
+
+ /* Configure high/low phase time */
+ period = period / 2;
+ writeq(((u64)period << 32), ptp->reg_base + PTP_PPS_HI_INCR);
+ writeq(((u64)period << 32), ptp->reg_base + PTP_PPS_LO_INCR);
+ } else {
+ clock_cfg &= ~(PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV);
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
}
+ if (on && cn10k_ptp_errata(ptp)) {
+ /* The ptp_clock_hi rollsover to zero once clock cycle before it
+ * reaches one second boundary. so, program the pps_lo_incr in
+ * such a way that the pps threshold value comparison at one
+ * second boundary will succeed and pps edge changes. After each
+ * one second boundary, the hrtimer handler will be invoked and
+ * reprograms the pps threshold value.
+ */
+ ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
+ writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
+ ptp->reg_base + PTP_PPS_LO_INCR);
+ }
+
+ if (cn10k_ptp_errata(ptp))
+ ptp_config_hrtimer(ptp, on);
+
return 0;
}
@@ -613,8 +645,8 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
case PTP_OP_SET_THRESH:
err = ptp_set_thresh(rvu->ptp, req->thresh);
break;
- case PTP_OP_EXTTS_ON:
- err = ptp_extts_on(rvu->ptp, req->extts_on);
+ case PTP_OP_PPS_ON:
+ err = ptp_pps_on(rvu->ptp, req->pps_on, req->period);
break;
case PTP_OP_ADJTIME:
ptp_atomic_adjtime(rvu->ptp, req->delta);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index ce987ccd43..38acdc7a73 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -773,12 +773,11 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
return 0;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ /* This msg is expected only from PF/VFs that are mapped to CGX/RPM LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
- if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index d30e848034..bd817ee887 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -2756,6 +2756,27 @@ static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
+#define RVU_DBG_PRINT_MPLS_TTL(pkt, mask) \
+do { \
+ seq_printf(s, "%ld ", FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, pkt)); \
+ seq_printf(s, "mask 0x%lx\n", \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, mask)); \
+} while (0) \
+
+#define RVU_DBG_PRINT_MPLS_LBTCBOS(_pkt, _mask) \
+do { \
+ typeof(_pkt) (pkt) = (_pkt); \
+ typeof(_mask) (mask) = (_mask); \
+ seq_printf(s, "%ld %ld %ld\n", \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, pkt), \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, pkt), \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, pkt)); \
+ seq_printf(s, "\tmask 0x%lx 0x%lx 0x%lx\n", \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, mask), \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, mask), \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, mask)); \
+} while (0) \
+
static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
struct rvu_npc_mcam_rule *rule)
{
@@ -2836,6 +2857,38 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
break;
+ case NPC_MPLS1_LBTCBOS:
+ RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[0],
+ rule->mask.mpls_lse[0]);
+ break;
+ case NPC_MPLS1_TTL:
+ RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[0],
+ rule->mask.mpls_lse[0]);
+ break;
+ case NPC_MPLS2_LBTCBOS:
+ RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[1],
+ rule->mask.mpls_lse[1]);
+ break;
+ case NPC_MPLS2_TTL:
+ RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[1],
+ rule->mask.mpls_lse[1]);
+ break;
+ case NPC_MPLS3_LBTCBOS:
+ RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[2],
+ rule->mask.mpls_lse[2]);
+ break;
+ case NPC_MPLS3_TTL:
+ RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[2],
+ rule->mask.mpls_lse[2]);
+ break;
+ case NPC_MPLS4_LBTCBOS:
+ RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[3],
+ rule->mask.mpls_lse[3]);
+ break;
+ case NPC_MPLS4_TTL:
+ RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
+ rule->mask.mpls_lse[3]);
+ break;
default:
seq_puts(s, "\n");
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index bffe04e6d0..21b5d71c1e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -14,26 +14,16 @@
#define DRV_NAME "octeontx2-af"
-static int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
+static void rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
{
- int err;
-
- err = devlink_fmsg_pair_nest_start(fmsg, name);
- if (err)
- return err;
-
- return devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_pair_nest_start(fmsg, name);
+ devlink_fmsg_obj_nest_start(fmsg);
}
-static int rvu_report_pair_end(struct devlink_fmsg *fmsg)
+static void rvu_report_pair_end(struct devlink_fmsg *fmsg)
{
- int err;
-
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return devlink_fmsg_pair_nest_end(fmsg);
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
}
static bool rvu_common_request_irq(struct rvu *rvu, int offset,
@@ -284,175 +274,81 @@ static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
{
struct rvu_nix_event_ctx *nix_event_context;
u64 intr_val;
- int err;
nix_event_context = ctx;
switch (health_reporter) {
case NIX_AF_RVU_INTR:
intr_val = nix_event_context->nix_af_rvu_int;
- err = rvu_report_pair_start(fmsg, "NIX_AF_RVU");
- if (err)
- return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
- nix_event_context->nix_af_rvu_int);
- if (err)
- return err;
- if (intr_val & BIT_ULL(0)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
- if (err)
- return err;
- }
- err = rvu_report_pair_end(fmsg);
- if (err)
- return err;
+ rvu_report_pair_start(fmsg, "NIX_AF_RVU");
+ devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
+ nix_event_context->nix_af_rvu_int);
+ if (intr_val & BIT_ULL(0))
+ devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
+ rvu_report_pair_end(fmsg);
break;
case NIX_AF_RVU_GEN:
intr_val = nix_event_context->nix_af_rvu_gen;
- err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
- if (err)
- return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
- nix_event_context->nix_af_rvu_gen);
- if (err)
- return err;
- if (intr_val & BIT_ULL(0)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(1)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(4)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
- if (err)
- return err;
- }
- err = rvu_report_pair_end(fmsg);
- if (err)
- return err;
+ rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
+ devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
+ nix_event_context->nix_af_rvu_gen);
+ if (intr_val & BIT_ULL(0))
+ devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
+ if (intr_val & BIT_ULL(1))
+ devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
+ if (intr_val & BIT_ULL(4))
+ devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
+ rvu_report_pair_end(fmsg);
break;
case NIX_AF_RVU_ERR:
intr_val = nix_event_context->nix_af_rvu_err;
- err = rvu_report_pair_start(fmsg, "NIX_AF_ERR");
- if (err)
- return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
- nix_event_context->nix_af_rvu_err);
- if (err)
- return err;
- if (intr_val & BIT_ULL(14)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(13)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(12)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(6)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(5)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(4)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(3)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(2)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(1)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(0)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
- if (err)
- return err;
- }
- err = rvu_report_pair_end(fmsg);
- if (err)
- return err;
+ rvu_report_pair_start(fmsg, "NIX_AF_ERR");
+ devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ if (intr_val & BIT_ULL(14))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
+ if (intr_val & BIT_ULL(13))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
+ if (intr_val & BIT_ULL(12))
+ devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
+ if (intr_val & BIT_ULL(6))
+ devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
+ if (intr_val & BIT_ULL(5))
+ devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
+ if (intr_val & BIT_ULL(4))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
+ if (intr_val & BIT_ULL(3))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
+ if (intr_val & BIT_ULL(2))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
+ if (intr_val & BIT_ULL(1))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
+ if (intr_val & BIT_ULL(0))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
+ rvu_report_pair_end(fmsg);
break;
case NIX_AF_RVU_RAS:
intr_val = nix_event_context->nix_af_rvu_err;
- err = rvu_report_pair_start(fmsg, "NIX_AF_RAS");
- if (err)
- return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
- nix_event_context->nix_af_rvu_err);
- if (err)
- return err;
- err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
- if (err)
- return err;
- if (intr_val & BIT_ULL(34)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(33)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(32)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(4)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(3)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
-
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(2)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(1)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(0)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
- if (err)
- return err;
- }
- err = rvu_report_pair_end(fmsg);
- if (err)
- return err;
+ rvu_report_pair_start(fmsg, "NIX_AF_RAS");
+ devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
+ if (intr_val & BIT_ULL(34))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
+ if (intr_val & BIT_ULL(33))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
+ if (intr_val & BIT_ULL(32))
+ devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
+ if (intr_val & BIT_ULL(4))
+ devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
+ if (intr_val & BIT_ULL(3))
+ devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
+ if (intr_val & BIT_ULL(2))
+ devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
+ if (intr_val & BIT_ULL(1))
+ devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
+ if (intr_val & BIT_ULL(0))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
+ rvu_report_pair_end(fmsg);
break;
default:
return -EINVAL;
@@ -919,181 +815,87 @@ static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
struct rvu_npa_event_ctx *npa_event_context;
unsigned int alloc_dis, free_dis;
u64 intr_val;
- int err;
npa_event_context = ctx;
switch (health_reporter) {
case NPA_AF_RVU_GEN:
intr_val = npa_event_context->npa_af_rvu_gen;
- err = rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
- if (err)
- return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
- npa_event_context->npa_af_rvu_gen);
- if (err)
- return err;
- if (intr_val & BIT_ULL(32)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
- if (err)
- return err;
- }
+ rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
+ devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
+ npa_event_context->npa_af_rvu_gen);
+ if (intr_val & BIT_ULL(32))
+ devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
- if (free_dis & BIT(NPA_INPQ_NIX0_RX)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
- if (err)
- return err;
- }
- if (free_dis & BIT(NPA_INPQ_NIX0_TX)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
- if (err)
- return err;
- }
- if (free_dis & BIT(NPA_INPQ_NIX1_RX)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
- if (err)
- return err;
- }
- if (free_dis & BIT(NPA_INPQ_NIX1_TX)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
- if (err)
- return err;
- }
- if (free_dis & BIT(NPA_INPQ_SSO)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
- if (err)
- return err;
- }
- if (free_dis & BIT(NPA_INPQ_TIM)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
- if (err)
- return err;
- }
- if (free_dis & BIT(NPA_INPQ_DPI)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
- if (err)
- return err;
- }
- if (free_dis & BIT(NPA_INPQ_AURA_OP)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
- if (err)
- return err;
- }
+ if (free_dis & BIT(NPA_INPQ_NIX0_RX))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
+ if (free_dis & BIT(NPA_INPQ_NIX0_TX))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
+ if (free_dis & BIT(NPA_INPQ_NIX1_RX))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
+ if (free_dis & BIT(NPA_INPQ_NIX1_TX))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
+ if (free_dis & BIT(NPA_INPQ_SSO))
+ devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
+ if (free_dis & BIT(NPA_INPQ_TIM))
+ devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
+ if (free_dis & BIT(NPA_INPQ_DPI))
+ devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
+ if (free_dis & BIT(NPA_INPQ_AURA_OP))
+ devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
- if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
- if (err)
- return err;
- }
- if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
- if (err)
- return err;
- }
- if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
- if (err)
- return err;
- }
- if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
- if (err)
- return err;
- }
- if (alloc_dis & BIT(NPA_INPQ_SSO)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
- if (err)
- return err;
- }
- if (alloc_dis & BIT(NPA_INPQ_TIM)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
- if (err)
- return err;
- }
- if (alloc_dis & BIT(NPA_INPQ_DPI)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
- if (err)
- return err;
- }
- if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
- if (err)
- return err;
- }
- err = rvu_report_pair_end(fmsg);
- if (err)
- return err;
+ if (alloc_dis & BIT(NPA_INPQ_NIX0_RX))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
+ if (alloc_dis & BIT(NPA_INPQ_NIX0_TX))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
+ if (alloc_dis & BIT(NPA_INPQ_NIX1_RX))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
+ if (alloc_dis & BIT(NPA_INPQ_NIX1_TX))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
+ if (alloc_dis & BIT(NPA_INPQ_SSO))
+ devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
+ if (alloc_dis & BIT(NPA_INPQ_TIM))
+ devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
+ if (alloc_dis & BIT(NPA_INPQ_DPI))
+ devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
+ if (alloc_dis & BIT(NPA_INPQ_AURA_OP))
+ devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
+
+ rvu_report_pair_end(fmsg);
break;
case NPA_AF_RVU_ERR:
- err = rvu_report_pair_start(fmsg, "NPA_AF_ERR");
- if (err)
- return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
- npa_event_context->npa_af_rvu_err);
- if (err)
- return err;
-
- if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
- if (err)
- return err;
- }
- if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
- if (err)
- return err;
- }
- if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
- if (err)
- return err;
- }
- err = rvu_report_pair_end(fmsg);
- if (err)
- return err;
+ rvu_report_pair_start(fmsg, "NPA_AF_ERR");
+ devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
+ npa_event_context->npa_af_rvu_err);
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(14))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(13))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(12))
+ devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
+ rvu_report_pair_end(fmsg);
break;
case NPA_AF_RVU_RAS:
- err = rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
- if (err)
- return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
- npa_event_context->npa_af_rvu_ras);
- if (err)
- return err;
- if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
- if (err)
- return err;
- }
- if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
- if (err)
- return err;
- }
- if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
- if (err)
- return err;
- }
- err = rvu_report_pair_end(fmsg);
- if (err)
- return err;
+ rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
+ devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
+ npa_event_context->npa_af_rvu_ras);
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34))
+ devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33))
+ devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32))
+ devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
+ rvu_report_pair_end(fmsg);
break;
case NPA_AF_RVU_INTR:
- err = rvu_report_pair_start(fmsg, "NPA_AF_RVU");
- if (err)
- return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
- npa_event_context->npa_af_rvu_int);
- if (err)
- return err;
- if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
- if (err)
- return err;
- }
- return rvu_report_pair_end(fmsg);
+ rvu_report_pair_start(fmsg, "NPA_AF_RVU");
+ devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
+ npa_event_context->npa_af_rvu_int);
+ if (npa_event_context->npa_af_rvu_int & BIT_ULL(0))
+ devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
+ rvu_report_pair_end(fmsg);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 0bcf3e5592..55639c133d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -437,6 +437,10 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
return;
}
+ /* AF modifies given action iff PF/VF has requested for it */
+ if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT)
+ return;
+
/* copy VF default entry action to the VF mcam entry */
rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
target_func);
@@ -2678,18 +2682,17 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
rsp->entry = NPC_MCAM_ENTRY_INVALID;
rsp->free_count = 0;
- /* Check if ref_entry is within range */
- if (req->priority && req->ref_entry >= mcam->bmap_entries) {
- dev_err(rvu->dev, "%s: reference entry %d is out of range\n",
- __func__, req->ref_entry);
- return NPC_MCAM_INVALID_REQ;
- }
+ /* Check if ref_entry is greater that the range
+ * then set it to max value.
+ */
+ if (req->ref_entry > mcam->bmap_entries)
+ req->ref_entry = mcam->bmap_entries;
/* ref_entry can't be '0' if requested priority is high.
* Can't be last entry if requested priority is low.
*/
if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) ||
- ((req->ref_entry == (mcam->bmap_entries - 1)) &&
+ ((req->ref_entry == mcam->bmap_entries) &&
req->priority == NPC_MCAM_LOWER_PRIO))
return NPC_MCAM_INVALID_REQ;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 237f82082e..114e4ec218 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -43,6 +43,14 @@ static const char * const npc_flow_names[] = {
[NPC_DPORT_SCTP] = "sctp destination port",
[NPC_LXMB] = "Mcast/Bcast header ",
[NPC_IPSEC_SPI] = "SPI ",
+ [NPC_MPLS1_LBTCBOS] = "lse depth 1 label tc bos",
+ [NPC_MPLS1_TTL] = "lse depth 1 ttl",
+ [NPC_MPLS2_LBTCBOS] = "lse depth 2 label tc bos",
+ [NPC_MPLS2_TTL] = "lse depth 2 ttl",
+ [NPC_MPLS3_LBTCBOS] = "lse depth 3 label tc bos",
+ [NPC_MPLS3_TTL] = "lse depth 3 ttl",
+ [NPC_MPLS4_LBTCBOS] = "lse depth 4 label tc bos",
+ [NPC_MPLS4_TTL] = "lse depth 4",
[NPC_UNKNOWN] = "unknown",
};
@@ -528,6 +536,14 @@ do { \
NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LD, NPC_LT_LD_AH, 4, 4);
NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LE, NPC_LT_LE_ESP, 0, 4);
+ NPC_SCAN_HDR(NPC_MPLS1_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 0, 3);
+ NPC_SCAN_HDR(NPC_MPLS1_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 3, 1);
+ NPC_SCAN_HDR(NPC_MPLS2_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 4, 3);
+ NPC_SCAN_HDR(NPC_MPLS2_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 7, 1);
+ NPC_SCAN_HDR(NPC_MPLS3_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 8, 3);
+ NPC_SCAN_HDR(NPC_MPLS3_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 11, 1);
+ NPC_SCAN_HDR(NPC_MPLS4_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 12, 3);
+ NPC_SCAN_HDR(NPC_MPLS4_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 15, 1);
/* SMAC follows the DMAC(which is 6 bytes) */
NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6);
@@ -593,6 +609,11 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
/* for L2M/L2B/L3M/L3B, check if the type is present in the key */
if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf))
*features |= BIT_ULL(NPC_LXMB);
+
+ for (hdr = NPC_MPLS1_LBTCBOS; hdr <= NPC_MPLS4_TTL; hdr++) {
+ if (npc_check_field(rvu, blkaddr, hdr, intf))
+ *features |= BIT_ULL(hdr);
+ }
}
/* Scan key extraction profile and record how fields of our interest
@@ -959,6 +980,47 @@ do { \
NPC_WRITE_FLOW(NPC_INNER_VID, vlan_itci, ntohs(pkt->vlan_itci), 0,
ntohs(mask->vlan_itci), 0);
+ NPC_WRITE_FLOW(NPC_MPLS1_LBTCBOS, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ pkt->mpls_lse[0]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ mask->mpls_lse[0]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS1_TTL, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ pkt->mpls_lse[0]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ mask->mpls_lse[0]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS2_LBTCBOS, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ pkt->mpls_lse[1]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ mask->mpls_lse[1]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS2_TTL, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ pkt->mpls_lse[1]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ mask->mpls_lse[1]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS3_LBTCBOS, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ pkt->mpls_lse[2]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ mask->mpls_lse[2]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS3_TTL, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ pkt->mpls_lse[2]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ mask->mpls_lse[2]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS4_LBTCBOS, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ pkt->mpls_lse[3]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ mask->mpls_lse[3]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS4_TTL, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ pkt->mpls_lse[3]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ mask->mpls_lse[3]), 0);
+
NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0,
mask->next_header, 0);
npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 629cf1659e..02d0b707ae 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -951,8 +951,11 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
sizeof(*sq->timestamps));
- if (err)
+ if (err) {
+ kfree(sq->sg);
+ sq->sg = NULL;
return err;
+ }
}
sq->head = 0;
@@ -968,7 +971,14 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->stats.bytes = 0;
sq->stats.pkts = 0;
- return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
+ err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
+ if (err) {
+ kfree(sq->sg);
+ sq->sg = NULL;
+ return err;
+ }
+
+ return 0;
}
@@ -1399,7 +1409,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
}
pp_params.order = get_order(buf_size);
- pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
+ pp_params.flags = PP_FLAG_DMA_MAP;
pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
pp_params.nid = NUMA_NO_NODE;
pp_params.dev = pfvf->dev;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 53f6258a97..8b7fc0af91 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -314,7 +314,6 @@ static int otx2_set_channels(struct net_device *dev,
pfvf->hw.tx_queues = channel->tx_count;
if (pfvf->xdp_prog)
pfvf->hw.xdp_queues = channel->rx_count;
- pfvf->hw.non_qos_queues = pfvf->hw.tx_queues + pfvf->hw.xdp_queues;
if (if_up)
err = dev->netdev_ops->ndo_open(dev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index a57455aebf..e5fe67e738 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1744,6 +1744,7 @@ int otx2_open(struct net_device *netdev)
/* RQ and SQs are mapped to different CQs,
* so find out max CQ IRQs (i.e CINTs) needed.
*/
+ pf->hw.non_qos_queues = pf->hw.tx_queues + pf->hw.xdp_queues;
pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues,
pf->hw.tc_tx_queues);
@@ -2643,8 +2644,6 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
xdp_features_clear_redirect_target(dev);
}
- pf->hw.non_qos_queues += pf->hw.xdp_queues;
-
if (if_up)
otx2_open(pf->netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 3a72b0793d..63130ba37e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -175,7 +175,7 @@ static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
-static int ptp_extts_on(struct otx2_ptp *ptp, int on)
+static int ptp_pps_on(struct otx2_ptp *ptp, int on, u64 period)
{
struct ptp_req *req;
@@ -186,8 +186,9 @@ static int ptp_extts_on(struct otx2_ptp *ptp, int on)
if (!req)
return -ENOMEM;
- req->op = PTP_OP_EXTTS_ON;
- req->extts_on = on;
+ req->op = PTP_OP_PPS_ON;
+ req->pps_on = on;
+ req->period = period;
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
@@ -276,8 +277,8 @@ static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
switch (func) {
case PTP_PF_NONE:
case PTP_PF_EXTTS:
- break;
case PTP_PF_PEROUT:
+ break;
case PTP_PF_PHYSYNC:
return -1;
}
@@ -340,6 +341,7 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
+ u64 period = 0;
int pin;
if (!ptp->nic)
@@ -351,12 +353,24 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
rq->extts.index);
if (pin < 0)
return -EBUSY;
- if (on) {
- ptp_extts_on(ptp, on);
+ if (on)
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
- } else {
- ptp_extts_on(ptp, on);
+ else
cancel_delayed_work_sync(&ptp->extts_work);
+
+ return 0;
+ case PTP_CLK_REQ_PEROUT:
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
+ if (rq->perout.index >= ptp_info->n_pins)
+ return -EINVAL;
+ if (on) {
+ period = rq->perout.period.sec * NSEC_PER_SEC +
+ rq->perout.period.nsec;
+ ptp_pps_on(ptp, on, period);
+ } else {
+ ptp_pps_on(ptp, on, period);
}
return 0;
default:
@@ -411,6 +425,7 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
.name = "OcteonTX2 PTP",
.max_adj = 1000000000ull,
.n_ext_ts = 1,
+ .n_per_out = 1,
.n_pins = 1,
.pps = 0,
.pin_config = &ptp_ptr->extts_config,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 423ce54eae..db1e0e0e81 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -27,6 +27,8 @@
#define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29)
#define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44)
+#define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4)
+
struct otx2_tc_flow_stats {
u64 bytes;
u64 pkts;
@@ -538,6 +540,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_IPSEC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) {
netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
dissector->used_keys);
@@ -757,6 +760,61 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
+ struct flow_match_mpls match;
+ u8 bit;
+
+ flow_rule_match_mpls(rule, &match);
+
+ if (match.mask->used_lses & OTX2_UNSUPP_LSE_DEPTH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported LSE depth for MPLS match offload");
+ return -EOPNOTSUPP;
+ }
+
+ for_each_set_bit(bit, (unsigned long *)&match.mask->used_lses,
+ FLOW_DIS_MPLS_MAX) {
+ /* check if any of the fields LABEL,TC,BOS are set */
+ if (*((u32 *)&match.mask->ls[bit]) &
+ OTX2_FLOWER_MASK_MPLS_NON_TTL) {
+ /* Hardware will capture 4 byte MPLS header into
+ * two fields NPC_MPLSX_LBTCBOS and NPC_MPLSX_TTL.
+ * Derive the associated NPC key based on header
+ * index and offset.
+ */
+
+ req->features |= BIT_ULL(NPC_MPLS1_LBTCBOS +
+ 2 * bit);
+ flow_spec->mpls_lse[bit] =
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB,
+ match.key->ls[bit].mpls_label) |
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC,
+ match.key->ls[bit].mpls_tc) |
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS,
+ match.key->ls[bit].mpls_bos);
+
+ flow_mask->mpls_lse[bit] =
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB,
+ match.mask->ls[bit].mpls_label) |
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC,
+ match.mask->ls[bit].mpls_tc) |
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS,
+ match.mask->ls[bit].mpls_bos);
+ }
+
+ if (match.mask->ls[bit].mpls_ttl) {
+ req->features |= BIT_ULL(NPC_MPLS1_TTL +
+ 2 * bit);
+ flow_spec->mpls_lse[bit] |=
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL,
+ match.key->ls[bit].mpls_ttl);
+ flow_mask->mpls_lse[bit] |=
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL,
+ match.mask->ls[bit].mpls_ttl);
+ }
+ }
+ }
+
return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 4d519ea833..f828d32737 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -1403,7 +1403,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq,
bool *need_xdp_flush)
{
- unsigned char *hard_start, *data;
+ unsigned char *hard_start;
int qidx = cq->cq_idx;
struct xdp_buff xdp;
struct page *page;
@@ -1417,9 +1417,8 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
- data = (unsigned char *)phys_to_virt(pa);
- hard_start = page_address(page);
- xdp_prepare_buff(&xdp, hard_start, data - hard_start,
+ hard_start = (unsigned char *)phys_to_virt(pa);
+ xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM,
cqe->sg.seg_size, false);
act = bpf_prog_run_xdp(prog, &xdp);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index d5691b6a2b..dd6ca2e4fd 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1528,7 +1528,7 @@ err_clk:
return err;
}
-static int pxa168_eth_remove(struct platform_device *pdev)
+static void pxa168_eth_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -1547,7 +1547,6 @@ static int pxa168_eth_remove(struct platform_device *pdev)
mdiobus_free(pep->smi_bus);
unregister_netdev(dev);
free_netdev(dev);
- return 0;
}
static void pxa168_eth_shutdown(struct platform_device *pdev)
@@ -1580,7 +1579,7 @@ MODULE_DEVICE_TABLE(of, pxa168_eth_of_match);
static struct platform_driver pxa168_eth_driver = {
.probe = pxa168_eth_probe,
- .remove = pxa168_eth_remove,
+ .remove_new = pxa168_eth_remove,
.shutdown = pxa168_eth_shutdown,
.resume = pxa168_eth_resume,
.suspend = pxa168_eth_suspend,