summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:12 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:12 +0000
commit8665bd53f2f2e27e5511d90428cb3f60e6d0ce15 (patch)
tree8d58900dc0ebd4a3011f92c128d2fe45bc7c4bf2 /drivers/net/ethernet/marvell
parentAdding debian version 6.7.12-1. (diff)
downloadlinux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.tar.xz
linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.zip
Merging upstream version 6.8.9.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c44
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c25
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c102
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c84
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c925
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_config.h48
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c86
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h173
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c241
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h65
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c449
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h167
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h13
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h416
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c12
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h34
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c5
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h99
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h74
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c96
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c746
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c123
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c96
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c80
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c134
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c1
39 files changed, 4018 insertions, 443 deletions
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 5f66f779e5..9190eff6c0 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -53,6 +53,13 @@
#define MVMDIO_XSMI_BUSY BIT(30)
#define MVMDIO_XSMI_ADDR_REG 0x8
+#define MVMDIO_XSMI_CFG_REG 0xc
+#define MVMDIO_XSMI_CLKDIV_MASK 0x3
+#define MVMDIO_XSMI_CLKDIV_256 0x0
+#define MVMDIO_XSMI_CLKDIV_64 0x1
+#define MVMDIO_XSMI_CLKDIV_32 0x2
+#define MVMDIO_XSMI_CLKDIV_8 0x3
+
/*
* SMI Timeout measurements:
* - Kirkwood 88F6281 (Globalscale Dreamplug): 45us to 95us (Interrupt)
@@ -225,6 +232,40 @@ static int orion_mdio_xsmi_write_c45(struct mii_bus *bus, int mii_id,
return 0;
}
+static void orion_mdio_xsmi_set_mdc_freq(struct mii_bus *bus)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ struct clk *mg_core;
+ u32 div, freq, cfg;
+
+ if (device_property_read_u32(bus->parent, "clock-frequency", &freq))
+ return;
+
+ mg_core = of_clk_get_by_name(bus->parent->of_node, "mg_core_clk");
+ if (IS_ERR(mg_core)) {
+ dev_err(bus->parent,
+ "MG core clock unknown, not changing MDC frequency");
+ return;
+ }
+
+ div = clk_get_rate(mg_core) / (freq + 1) + 1;
+ clk_put(mg_core);
+
+ if (div <= 8)
+ div = MVMDIO_XSMI_CLKDIV_8;
+ else if (div <= 32)
+ div = MVMDIO_XSMI_CLKDIV_32;
+ else if (div <= 64)
+ div = MVMDIO_XSMI_CLKDIV_64;
+ else
+ div = MVMDIO_XSMI_CLKDIV_256;
+
+ cfg = readl(dev->regs + MVMDIO_XSMI_CFG_REG);
+ cfg &= ~MVMDIO_XSMI_CLKDIV_MASK;
+ cfg |= div;
+ writel(cfg, dev->regs + MVMDIO_XSMI_CFG_REG);
+}
+
static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
{
struct orion_mdio_dev *dev = dev_id;
@@ -303,6 +344,9 @@ static int orion_mdio_probe(struct platform_device *pdev)
dev_warn(&pdev->dev,
"unsupported number of clocks, limiting to the first "
__stringify(ARRAY_SIZE(dev->clk)) "\n");
+
+ if (type == BUS_TYPE_XSMI)
+ orion_mdio_xsmi_set_mdc_freq(bus);
} else {
dev->clk[0] = clk_get(&pdev->dev, NULL);
if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) {
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 29aac32757..a641b3534c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -5030,8 +5030,9 @@ static int mvneta_config_rss(struct mvneta_port *pp)
return 0;
}
-static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int mvneta_ethtool_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct mvneta_port *pp = netdev_priv(dev);
@@ -5042,20 +5043,21 @@ static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
/* We require at least one supported parameter to be changed
* and no change in any of the unsupported parameters
*/
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
- memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
+ memcpy(pp->indir, rxfh->indir, MVNETA_RSS_LU_TABLE_SIZE);
return mvneta_config_rss(pp);
}
-static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int mvneta_ethtool_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct mvneta_port *pp = netdev_priv(dev);
@@ -5063,13 +5065,12 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
if (pp->neta_armada3700)
return -EOPNOTSUPP;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
- memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
+ memcpy(rxfh->indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 065f07392c..23adf53c2a 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1538,10 +1538,21 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
- if (port->gop_id == 2)
+ if (port->gop_id == 2) {
val |= GENCONF_CTRL0_PORT2_RGMII;
- else if (port->gop_id == 3)
+ } else if (port->gop_id == 3) {
val |= GENCONF_CTRL0_PORT3_RGMII_MII;
+
+ /* According to the specification, GENCONF_CTRL0_PORT3_RGMII
+ * should be set to 1 for RGMII and 0 for MII. However, tests
+ * show that it is the other way around. This is also what
+ * U-Boot does for mvpp2, so it is assumed to be correct.
+ */
+ if (port->phy_interface == PHY_INTERFACE_MODE_MII)
+ val |= GENCONF_CTRL0_PORT3_RGMII;
+ else
+ val &= ~GENCONF_CTRL0_PORT3_RGMII;
+ }
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
}
@@ -1640,6 +1651,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface)
return 0;
switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -5659,49 +5671,11 @@ static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0;
}
-static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- int ret = 0;
-
- if (!mvpp22_rss_is_supported(port))
- return -EOPNOTSUPP;
-
- if (indir)
- ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
-
- if (hfunc)
- *hfunc = ETH_RSS_HASH_CRC32;
-
- return ret;
-}
-
-static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- int ret = 0;
-
- if (!mvpp22_rss_is_supported(port))
- return -EOPNOTSUPP;
-
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
- return -EOPNOTSUPP;
-
- if (key)
- return -EOPNOTSUPP;
-
- if (indir)
- ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
-
- return ret;
-}
-
-static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context)
+static int mvpp2_ethtool_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct mvpp2_port *port = netdev_priv(dev);
+ u32 rss_context = rxfh->rss_context;
int ret = 0;
if (!mvpp22_rss_is_supported(port))
@@ -5709,33 +5683,34 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
if (rss_context >= MVPP22_N_RSS_TABLES)
return -EINVAL;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_CRC32;
+ rxfh->hfunc = ETH_RSS_HASH_CRC32;
- if (indir)
- ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
+ if (rxfh->indir)
+ ret = mvpp22_port_rss_ctx_indir_get(port, rss_context,
+ rxfh->indir);
return ret;
}
-static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
- const u32 *indir, const u8 *key,
- const u8 hfunc, u32 *rss_context,
- bool delete)
+static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct mvpp2_port *port = netdev_priv(dev);
- int ret;
+ u32 *rss_context = &rxfh->rss_context;
+ int ret = 0;
if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_CRC32)
return -EOPNOTSUPP;
- if (key)
+ if (rxfh->key)
return -EOPNOTSUPP;
- if (delete)
+ if (*rss_context && rxfh->rss_delete)
return mvpp22_port_rss_ctx_delete(port, *rss_context);
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
@@ -5744,8 +5719,13 @@ static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
return ret;
}
- return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
+ if (rxfh->indir)
+ ret = mvpp22_port_rss_ctx_indir_set(port, *rss_context,
+ rxfh->indir);
+
+ return ret;
}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -5765,6 +5745,7 @@ static const struct net_device_ops mvpp2_netdev_ops = {
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = mvpp2_ethtool_nway_reset,
@@ -5787,8 +5768,6 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
.get_rxfh = mvpp2_ethtool_get_rxfh,
.set_rxfh = mvpp2_ethtool_set_rxfh,
- .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
- .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -6923,7 +6902,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->min_mtu = ETH_MIN_MTU;
/* 9704 == 9728 - 20 and rounding to 8 */
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
- dev->dev.of_node = port_node;
+ device_set_node(&dev->dev, port_fwnode);
port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
port->pcs_gmac.neg_mode = true;
@@ -6973,8 +6952,11 @@ static int mvpp2_port_probe(struct platform_device *pdev,
MAC_10000FD;
}
- if (mvpp2_port_supports_rgmii(port))
+ if (mvpp2_port_supports_rgmii(port)) {
phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ port->phylink_config.supported_interfaces);
+ }
if (comphy) {
/* If a COMPHY is present, we can support any of the
diff --git a/drivers/net/ethernet/marvell/octeon_ep/Makefile b/drivers/net/ethernet/marvell/octeon_ep/Makefile
index 2026c81181..62162ed63f 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/Makefile
+++ b/drivers/net/ethernet/marvell/octeon_ep/Makefile
@@ -6,4 +6,5 @@
obj-$(CONFIG_OCTEON_EP) += octeon_ep.o
octeon_ep-y := octep_main.o octep_cn9k_pf.o octep_tx.o octep_rx.o \
- octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o
+ octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o \
+ octep_pfvf_mbox.o octep_cnxk_pf.o
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
index d4ee245467..b580596940 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
@@ -216,16 +216,21 @@ static void octep_init_config_cn93_pf(struct octep_device *oct)
conf->sriov_cfg.vf_srn = CN93_SDP_EPF_RINFO_SRN(val);
val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port));
- conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val);
- conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val);
- conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ if (oct->chip_id == OCTEP_PCI_DEVICE_ID_CN98_PF) {
+ conf->pf_ring_cfg.srn = CN98_SDP_MAC_PF_RING_CTL_SRN(val);
+ conf->pf_ring_cfg.max_io_rings = CN98_SDP_MAC_PF_RING_CTL_RPPF(val);
+ conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ } else {
+ conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val);
+ conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val);
+ conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ }
dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n",
conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf,
conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings);
conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS;
conf->iq.instr_type = OCTEP_64BYTE_INSTR;
- conf->iq.pkind = 0;
conf->iq.db_min = OCTEP_DB_MIN;
conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD;
@@ -357,16 +362,55 @@ static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no)
{
struct octep_mbox *mbox = oct->mbox[q_no];
- mbox->q_no = q_no;
-
- /* PF mbox interrupt reg */
- mbox->mbox_int_reg = oct->mmio[0].hw_addr + CN93_SDP_EPF_MBOX_RINT(0);
-
/* PF to VF DATA reg. PF writes into this reg */
- mbox->mbox_write_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_PF_VF_DATA(q_no);
+ mbox->pf_vf_data_reg = oct->mmio[0].hw_addr + CN93_SDP_MBOX_PF_VF_DATA(q_no);
/* VF to PF DATA reg. PF reads from this reg */
- mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no);
+ mbox->vf_pf_data_reg = oct->mmio[0].hw_addr + CN93_SDP_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Poll for mailbox messages from VF */
+static void octep_poll_pfvf_mailbox(struct octep_device *oct)
+{
+ u32 vf, active_vfs, active_rings_per_vf, vf_mbox_queue;
+ u64 reg0, reg1;
+
+ reg0 = octep_read_csr64(oct, CN93_SDP_EPF_MBOX_RINT(0));
+ reg1 = octep_read_csr64(oct, CN93_SDP_EPF_MBOX_RINT(1));
+ if (reg0 || reg1) {
+ active_vfs = CFG_GET_ACTIVE_VFS(oct->conf);
+ active_rings_per_vf = CFG_GET_ACTIVE_RPVF(oct->conf);
+ for (vf = 0; vf < active_vfs; vf++) {
+ vf_mbox_queue = vf * active_rings_per_vf;
+
+ if (vf_mbox_queue < 64) {
+ if (!(reg0 & (0x1UL << vf_mbox_queue)))
+ continue;
+ } else {
+ if (!(reg1 & (0x1UL << (vf_mbox_queue - 64))))
+ continue;
+ }
+
+ if (!oct->mbox[vf_mbox_queue]) {
+ dev_err(&oct->pdev->dev, "bad mbox vf %d\n", vf);
+ continue;
+ }
+ schedule_work(&oct->mbox[vf_mbox_queue]->wk.work);
+ }
+ if (reg0)
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT(0), reg0);
+ if (reg1)
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT(1), reg1);
+ }
+}
+
+/* PF-VF mailbox interrupt handler */
+static irqreturn_t octep_pfvf_mbox_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+
+ octep_poll_pfvf_mailbox(oct);
+ return IRQ_HANDLED;
}
/* Poll OEI events like heartbeat */
@@ -398,6 +442,7 @@ static irqreturn_t octep_oei_intr_handler_cn93_pf(void *dev)
*/
static void octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct)
{
+ octep_poll_pfvf_mailbox(oct);
octep_poll_oei_cn93_pf(oct);
}
@@ -578,6 +623,13 @@ static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data)
return IRQ_HANDLED;
}
+/* soft reset of 98xx */
+static int octep_soft_reset_cn98_pf(struct octep_device *oct)
+{
+ dev_info(&oct->pdev->dev, "CN98XX: skip soft reset\n");
+ return 0;
+}
+
/* soft reset of 93xx */
static int octep_soft_reset_cn93_pf(struct octep_device *oct)
{
@@ -634,6 +686,8 @@ static void octep_enable_interrupts_cn93_pf(struct octep_device *oct)
octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT_ENA_W1S(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT_ENA_W1S(1), -1ULL);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(0), -1ULL);
octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(0), -1ULL);
@@ -660,6 +714,8 @@ static void octep_disable_interrupts_cn93_pf(struct octep_device *oct)
octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT_ENA_W1C(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT_ENA_W1C(1), -1ULL);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(0), -1ULL);
octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(0), -1ULL);
@@ -795,6 +851,7 @@ void octep_device_setup_cn93_pf(struct octep_device *oct)
oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf;
oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf;
+ oct->hw_ops.mbox_intr_handler = octep_pfvf_mbox_intr_handler_cn93_pf;
oct->hw_ops.oei_intr_handler = octep_oei_intr_handler_cn93_pf;
oct->hw_ops.ire_intr_handler = octep_ire_intr_handler_cn93_pf;
oct->hw_ops.ore_intr_handler = octep_ore_intr_handler_cn93_pf;
@@ -806,7 +863,10 @@ void octep_device_setup_cn93_pf(struct octep_device *oct)
oct->hw_ops.misc_intr_handler = octep_misc_intr_handler_cn93_pf;
oct->hw_ops.rsvd_intr_handler = octep_rsvd_intr_handler_cn93_pf;
oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf;
- oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf;
+ if (oct->chip_id == OCTEP_PCI_DEVICE_ID_CN98_PF)
+ oct->hw_ops.soft_reset = octep_soft_reset_cn98_pf;
+ else
+ oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf;
oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf;
oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
new file mode 100644
index 0000000000..5de0b5ecbc
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
@@ -0,0 +1,925 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_regs_cnxk_pf.h"
+
+/* We will support 128 pf's in control mbox */
+#define CTRL_MBOX_MAX_PF 128
+#define CTRL_MBOX_SZ ((size_t)(0x400000 / CTRL_MBOX_MAX_PF))
+
+/* Names of Hardware non-queue generic interrupts */
+static char *cnxk_non_ioq_msix_names[] = {
+ "epf_ire_rint",
+ "epf_ore_rint",
+ "epf_vfire_rint",
+ "epf_rsvd0",
+ "epf_vfore_rint",
+ "epf_rsvd1",
+ "epf_mbox_rint",
+ "epf_rsvd2_0",
+ "epf_rsvd2_1",
+ "epf_dma_rint",
+ "epf_dma_vf_rint",
+ "epf_rsvd3",
+ "epf_pp_vf_rint",
+ "epf_rsvd3",
+ "epf_misc_rint",
+ "epf_rsvd5",
+ /* Next 16 are for OEI_RINT */
+ "epf_oei_rint0",
+ "epf_oei_rint1",
+ "epf_oei_rint2",
+ "epf_oei_rint3",
+ "epf_oei_rint4",
+ "epf_oei_rint5",
+ "epf_oei_rint6",
+ "epf_oei_rint7",
+ "epf_oei_rint8",
+ "epf_oei_rint9",
+ "epf_oei_rint10",
+ "epf_oei_rint11",
+ "epf_oei_rint12",
+ "epf_oei_rint13",
+ "epf_oei_rint14",
+ "epf_oei_rint15",
+ /* IOQ interrupt */
+ "octeon_ep"
+};
+
+/* Dump useful hardware CSRs for debug purpose */
+static void cnxk_dump_regs(struct octep_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_INSTR_DBELL(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_CONTROL(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_ENABLE(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_INSTR_BADDR(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_CNTS(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_INT_LEVELS(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_PKT_CNT(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_BYTE_CNT(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_CONTROL(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_ENABLE(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_CNTS(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_INT_LEVELS(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_PKT_CNT(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_BYTE_CNT(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_BYTE_CNT(qno)));
+ dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_ERR_TYPE(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_ERR_TYPE(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static int cnxk_reset_iq(struct octep_device *oct, int q_no)
+{
+ struct octep_config *conf = oct->conf;
+ u64 val = 0ULL;
+
+ dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no);
+
+ /* Get absolute queue number */
+ q_no += conf->pf_ring_cfg.srn;
+
+ /* Disable the Tx/Instruction Ring */
+ octep_write_csr64(oct, CNXK_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_write_csr64(oct, CNXK_SDP_R_IN_CNTS(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = 0xFFFFFFFF;
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ return 0;
+}
+
+/* Reset Hardware Rx queue */
+static void cnxk_reset_oq(struct octep_device *oct, int q_no)
+{
+ u64 val = 0ULL;
+
+ q_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ /* Disable Output (Rx) Ring */
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_ENABLE(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_read_csr(oct, CNXK_SDP_R_OUT_CNTS(q_no));
+ octep_write_csr(oct, CNXK_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF);
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_reset_io_queues_cnxk_pf(struct octep_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CNXK PF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cnxk_reset_iq(oct, q);
+ cnxk_reset_oq(oct, q);
+ }
+}
+
+/* Initialize windowed addresses to access some hardware registers */
+static void octep_setup_pci_window_regs_cnxk_pf(struct octep_device *oct)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+
+ oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_WR_ADDR64);
+ oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_RD_ADDR64);
+ oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_WR_DATA64);
+ oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_RD_DATA64);
+}
+
+/* Configure Hardware mapping: inform hardware which rings belong to PF. */
+static void octep_configure_ring_mapping_cnxk_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) {
+ u64 regval = 0;
+
+ if (oct->pcie_port)
+ regval = 8 << CNXK_SDP_FUNC_SEL_EPF_BIT_POS;
+
+ octep_write_csr64(oct, CNXK_SDP_EPVF_RING(pf_srn + q), regval);
+
+ regval = octep_read_csr64(oct, CNXK_SDP_EPVF_RING(pf_srn + q));
+ dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n",
+ CNXK_SDP_EPVF_RING(pf_srn + q), regval);
+ }
+}
+
+/* Initialize configuration limits and initial active config */
+static void octep_init_config_cnxk_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u8 link = 0;
+ u64 val;
+ int pos;
+
+ /* Read ring configuration:
+ * PF ring count, number of VFs and rings per VF supported
+ */
+ val = octep_read_csr64(oct, CNXK_SDP_EPF_RINFO);
+ dev_info(&pdev->dev, "SDP_EPF_RINFO[0x%x]:0x%llx\n", CNXK_SDP_EPF_RINFO, val);
+ conf->sriov_cfg.max_rings_per_vf = CNXK_SDP_EPF_RINFO_RPVF(val);
+ conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf;
+ conf->sriov_cfg.max_vfs = CNXK_SDP_EPF_RINFO_NVFS(val);
+ conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs;
+ conf->sriov_cfg.vf_srn = CNXK_SDP_EPF_RINFO_SRN(val);
+
+ val = octep_read_csr64(oct, CNXK_SDP_MAC_PF_RING_CTL(oct->pcie_port));
+ dev_info(&pdev->dev, "SDP_MAC_PF_RING_CTL[%d]:0x%llx\n", oct->pcie_port, val);
+ conf->pf_ring_cfg.srn = CNXK_SDP_MAC_PF_RING_CTL_SRN(val);
+ conf->pf_ring_cfg.max_io_rings = CNXK_SDP_MAC_PF_RING_CTL_RPPF(val);
+ conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n",
+ conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf,
+ conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings);
+
+ conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_64BYTE_INSTR;
+ conf->iq.db_min = OCTEP_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD;
+ conf->oq.wmark = OCTEP_OQ_WMARK_MIN;
+
+ conf->msix_cfg.non_ioq_msix = CNXK_NUM_NON_IOQ_INTR;
+ conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings;
+ conf->msix_cfg.non_ioq_msix_names = cnxk_non_ioq_msix_names;
+
+ pos = pci_find_ext_capability(oct->pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_byte(oct->pdev,
+ pos + PCI_SRIOV_FUNC_LINK,
+ &link);
+ link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link);
+ }
+ conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr +
+ CNXK_PEM_BAR4_INDEX_OFFSET +
+ (link * CTRL_MBOX_SZ);
+
+ conf->fw_info.hb_interval = OCTEP_DEFAULT_FW_HB_INTERVAL;
+ conf->fw_info.hb_miss_count = OCTEP_DEFAULT_FW_HB_MISS_COUNT;
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_setup_iq_regs_cnxk_pf(struct octep_device *oct, int iq_no)
+{
+ struct octep_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CNXK_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CNXK_R_IN_CTL_IDLE));
+ }
+
+ reg_val |= CNXK_R_IN_CTL_RDSIZE;
+ reg_val |= CNXK_R_IN_CTL_IS_64B;
+ reg_val |= CNXK_R_IN_CTL_ESR;
+ octep_write_csr64(oct, CNXK_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(iq_no),
+ iq->desc_ring_dma);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_RSIZE(iq_no),
+ iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg = oct->mmio[0].hw_addr +
+ CNXK_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio[0].hw_addr +
+ CNXK_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio[0].hw_addr +
+ CNXK_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff;
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val;
+ u64 oq_ctl = 0ULL;
+ u32 time_threshold = 0;
+ struct octep_oq *oq = oct->oq[oq_no];
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CNXK_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CNXK_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CNXK_R_OUT_CTL_IMODE);
+ reg_val &= ~(CNXK_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CNXK_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CNXK_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CNXK_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CNXK_R_OUT_CTL_ES_I);
+ reg_val &= ~(CNXK_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CNXK_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CNXK_R_OUT_CTL_ES_D);
+ reg_val |= (CNXK_R_OUT_CTL_ES_P);
+
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(oq_no),
+ oq->desc_ring_dma);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(oq_no),
+ oq->max_count);
+
+ oq_ctl = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
+
+ /* Clear the ISIZE and BSIZE (22-0) */
+ oq_ctl &= ~0x7fffffULL;
+
+ /* Populate the BSIZE (15-0) */
+ oq_ctl |= (oq->buffer_size & 0xffff);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio[0].hw_addr + CNXK_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio[0].hw_addr +
+ CNXK_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) |
+ CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ /* set watermark for backpressure */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_WMARK(oq_no));
+ reg_val &= ~0xFFFFFFFFULL;
+ reg_val |= CFG_GET_OQ_WMARK(oct->conf);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_WMARK(oq_no), reg_val);
+}
+
+/* Setup registers for a PF mailbox */
+static void octep_setup_mbox_regs_cnxk_pf(struct octep_device *oct, int q_no)
+{
+ struct octep_mbox *mbox = oct->mbox[q_no];
+
+ /* PF to VF DATA reg. PF writes into this reg */
+ mbox->pf_vf_data_reg = oct->mmio[0].hw_addr + CNXK_SDP_MBOX_PF_VF_DATA(q_no);
+
+ /* VF to PF DATA reg. PF reads from this reg */
+ mbox->vf_pf_data_reg = oct->mmio[0].hw_addr + CNXK_SDP_MBOX_VF_PF_DATA(q_no);
+}
+
+static void octep_poll_pfvf_mailbox_cnxk_pf(struct octep_device *oct)
+{
+ u32 vf, active_vfs, active_rings_per_vf, vf_mbox_queue;
+ u64 reg0;
+
+ reg0 = octep_read_csr64(oct, CNXK_SDP_EPF_MBOX_RINT(0));
+ if (reg0) {
+ active_vfs = CFG_GET_ACTIVE_VFS(oct->conf);
+ active_rings_per_vf = CFG_GET_ACTIVE_RPVF(oct->conf);
+ for (vf = 0; vf < active_vfs; vf++) {
+ vf_mbox_queue = vf * active_rings_per_vf;
+ if (!(reg0 & (0x1UL << vf_mbox_queue)))
+ continue;
+
+ if (!oct->mbox[vf_mbox_queue]) {
+ dev_err(&oct->pdev->dev, "bad mbox vf %d\n", vf);
+ continue;
+ }
+ schedule_work(&oct->mbox[vf_mbox_queue]->wk.work);
+ }
+ if (reg0)
+ octep_write_csr64(oct, CNXK_SDP_EPF_MBOX_RINT(0), reg0);
+ }
+}
+
+static irqreturn_t octep_pfvf_mbox_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+
+ octep_poll_pfvf_mailbox_cnxk_pf(oct);
+ return IRQ_HANDLED;
+}
+
+/* Poll OEI events like heartbeat */
+static void octep_poll_oei_cnxk_pf(struct octep_device *oct)
+{
+ u64 reg0;
+
+ /* Check for OEI INTR */
+ reg0 = octep_read_csr64(oct, CNXK_SDP_EPF_OEI_RINT);
+ if (reg0) {
+ octep_write_csr64(oct, CNXK_SDP_EPF_OEI_RINT, reg0);
+ if (reg0 & CNXK_SDP_EPF_OEI_RINT_DATA_BIT_MBOX)
+ queue_work(octep_wq, &oct->ctrl_mbox_task);
+ if (reg0 & CNXK_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT)
+ atomic_set(&oct->hb_miss_cnt, 0);
+ }
+}
+
+/* OEI interrupt handler */
+static irqreturn_t octep_oei_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+
+ octep_poll_oei_cnxk_pf(oct);
+ return IRQ_HANDLED;
+}
+
+/* Process non-ioq interrupts required to keep pf interface running.
+ * OEI_RINT is needed for control mailbox
+ * MBOX_RINT is needed for pfvf mailbox
+ */
+static void octep_poll_non_ioq_interrupts_cnxk_pf(struct octep_device *oct)
+{
+ octep_poll_pfvf_mailbox_cnxk_pf(oct);
+ octep_poll_oei_cnxk_pf(oct);
+}
+
+/* Interrupt handler for input ring error interrupts. */
+static irqreturn_t octep_ire_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+ int i = 0;
+
+ /* Check for IRERR INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_IRERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "received IRERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT, reg_val);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct,
+ CNXK_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on IQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CNXK_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for output ring error interrupts. */
+static irqreturn_t octep_ore_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+ int i = 0;
+
+ /* Check for ORERR INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_ORERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received ORERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT, reg_val);
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on OQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CNXK_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for vf input ring error interrupts. */
+static irqreturn_t octep_vfire_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+
+ /* Check for VFIRE INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFIRE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT(0), reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for vf output ring error interrupts. */
+static irqreturn_t octep_vfore_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+
+ /* Check for VFORE INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_VFORE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFORE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFORE_RINT(0), reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for dpi dma related interrupts. */
+static irqreturn_t octep_dma_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ u64 reg_val = 0;
+
+ /* Check for DMA INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_DMA_RINT);
+ if (reg_val)
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_RINT, reg_val);
+
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for dpi dma transaction error interrupts for VFs */
+static irqreturn_t octep_dma_vf_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+
+ /* Check for DMA VF INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received DMA_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT(0), reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for pp transaction error interrupts for VFs */
+static irqreturn_t octep_pp_vf_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+
+ /* Check for PPVF INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received PP_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT(0), reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for mac related interrupts. */
+static irqreturn_t octep_misc_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+
+ /* Check for MISC INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_MISC_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received MISC_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_MISC_RINT, reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupts handler for all reserved interrupts. */
+static irqreturn_t octep_rsvd_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+
+ dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n");
+ return IRQ_HANDLED;
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_ioq_intr_handler_cnxk_pf(void *data)
+{
+ struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data;
+ struct octep_oq *oq = vector->oq;
+
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* soft reset */
+static int octep_soft_reset_cnxk_pf(struct octep_device *oct)
+{
+ dev_info(&oct->pdev->dev, "CNXKXX: Doing soft reset\n");
+
+ octep_write_csr64(oct, CNXK_SDP_WIN_WR_MASK_REG, 0xFF);
+
+ /* Firmware status CSR is supposed to be cleared by
+ * core domain reset, but due to a hw bug, it is not.
+ * Set it to RUNNING right before reset so that it is not
+ * left in READY (1) state after a reset. This is required
+ * in addition to the early setting to handle the case where
+ * the OcteonTX is unexpectedly reset, reboots, and then
+ * the module is removed.
+ */
+ OCTEP_PCI_WIN_WRITE(oct, CNXK_PEMX_PFX_CSX_PFCFGX(0, 0, CNXK_PCIEEP_VSECST_CTL),
+ FW_STATUS_RUNNING);
+
+ /* Set chip domain reset bit */
+ OCTEP_PCI_WIN_WRITE(oct, CNXK_RST_CHIP_DOMAIN_W1S, 1);
+ /* Wait till Octeon resets. */
+ mdelay(10);
+ /* restore the reset value */
+ octep_write_csr64(oct, CNXK_SDP_WIN_WR_MASK_REG, 0xFF);
+
+ return 0;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_reinit_regs_cnxk_pf(struct octep_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_enable_interrupts_cnxk_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S(0), -1ULL);
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFORE_RINT_ENA_W1S(0), -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_MBOX_RINT_ENA_W1S(0), -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S(0), -1ULL);
+ octep_write_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S(0), -1ULL);
+}
+
+/* Disable all interrupts */
+static void octep_disable_interrupts_cnxk_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C(0), -1ULL);
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFORE_RINT_ENA_W1C(0), -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_MBOX_RINT_ENA_W1C(0), -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C(0), -1ULL);
+ octep_write_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C(0), -1ULL);
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_update_iq_read_index_cnxk_pf(struct octep_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_enable_iq_cnxk_pf(struct octep_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF);
+
+ while (octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_enable_oq_cnxk_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF);
+
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assined to PF */
+static void octep_enable_io_queues_cnxk_pf(struct octep_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_enable_iq_cnxk_pf(oct, q);
+ octep_enable_oq_cnxk_pf(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assined to PF */
+static void octep_disable_iq_cnxk_pf(struct octep_device *oct, int iq_no)
+{
+ u64 reg_val = 0ULL;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assined to PF */
+static void octep_disable_oq_cnxk_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assined to PF */
+static void octep_disable_io_queues_cnxk_pf(struct octep_device *oct)
+{
+ int q = 0;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_disable_iq_cnxk_pf(oct, q);
+ octep_disable_oq_cnxk_pf(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_dump_registers_cnxk_pf(struct octep_device *oct)
+{
+ u8 srn, num_rings, q;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (q = srn; q < srn + num_rings; q++)
+ cnxk_dump_regs(oct, q);
+}
+
+/**
+ * octep_device_setup_cnxk_pf() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - setup window access to hardware registers.
+ * - set initial configuration and max limits.
+ * - setup hardware mapping of rings to the PF device.
+ */
+void octep_device_setup_cnxk_pf(struct octep_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cnxk_pf;
+ oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cnxk_pf;
+ oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cnxk_pf;
+
+ oct->hw_ops.mbox_intr_handler = octep_pfvf_mbox_intr_handler_cnxk_pf;
+ oct->hw_ops.oei_intr_handler = octep_oei_intr_handler_cnxk_pf;
+ oct->hw_ops.ire_intr_handler = octep_ire_intr_handler_cnxk_pf;
+ oct->hw_ops.ore_intr_handler = octep_ore_intr_handler_cnxk_pf;
+ oct->hw_ops.vfire_intr_handler = octep_vfire_intr_handler_cnxk_pf;
+ oct->hw_ops.vfore_intr_handler = octep_vfore_intr_handler_cnxk_pf;
+ oct->hw_ops.dma_intr_handler = octep_dma_intr_handler_cnxk_pf;
+ oct->hw_ops.dma_vf_intr_handler = octep_dma_vf_intr_handler_cnxk_pf;
+ oct->hw_ops.pp_vf_intr_handler = octep_pp_vf_intr_handler_cnxk_pf;
+ oct->hw_ops.misc_intr_handler = octep_misc_intr_handler_cnxk_pf;
+ oct->hw_ops.rsvd_intr_handler = octep_rsvd_intr_handler_cnxk_pf;
+ oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cnxk_pf;
+ oct->hw_ops.soft_reset = octep_soft_reset_cnxk_pf;
+ oct->hw_ops.reinit_regs = octep_reinit_regs_cnxk_pf;
+
+ oct->hw_ops.enable_interrupts = octep_enable_interrupts_cnxk_pf;
+ oct->hw_ops.disable_interrupts = octep_disable_interrupts_cnxk_pf;
+ oct->hw_ops.poll_non_ioq_interrupts = octep_poll_non_ioq_interrupts_cnxk_pf;
+
+ oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cnxk_pf;
+
+ oct->hw_ops.enable_iq = octep_enable_iq_cnxk_pf;
+ oct->hw_ops.enable_oq = octep_enable_oq_cnxk_pf;
+ oct->hw_ops.enable_io_queues = octep_enable_io_queues_cnxk_pf;
+
+ oct->hw_ops.disable_iq = octep_disable_iq_cnxk_pf;
+ oct->hw_ops.disable_oq = octep_disable_oq_cnxk_pf;
+ oct->hw_ops.disable_io_queues = octep_disable_io_queues_cnxk_pf;
+ oct->hw_ops.reset_io_queues = octep_reset_io_queues_cnxk_pf;
+
+ oct->hw_ops.dump_registers = octep_dump_registers_cnxk_pf;
+
+ octep_setup_pci_window_regs_cnxk_pf(oct);
+
+ oct->pcie_port = octep_read_csr64(oct, CNXK_SDP_MAC_NUMBER) & 0xff;
+ dev_info(&oct->pdev->dev,
+ "Octeon device using PCIE Port %d\n", oct->pcie_port);
+
+ octep_init_config_cnxk_pf(oct);
+ octep_configure_ring_mapping_cnxk_pf(oct);
+
+ /* Firmware status CSR is supposed to be cleared by
+ * core domain reset, but due to IPBUPEM-38842, it is not.
+ * Set it to RUNNING early in boot, so that unexpected resets
+ * leave it in a state that is not READY (1).
+ */
+ OCTEP_PCI_WIN_WRITE(oct, CNXK_PEMX_PFX_CSX_PFCFGX(0, 0, CNXK_PCIEEP_VSECST_CTL),
+ FW_STATUS_RUNNING);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
index 1622a6ebf0..1627660175 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
@@ -13,12 +13,16 @@
#define OCTEP_64BYTE_INSTR 64
/* Tx Queue: maximum descriptors per ring */
+/* This needs to be a power of 2 */
#define OCTEP_IQ_MAX_DESCRIPTORS 1024
/* Minimum input (Tx) requests to be enqueued to ring doorbell */
-#define OCTEP_DB_MIN 1
+#define OCTEP_DB_MIN 8
/* Packet threshold for Tx queue interrupt */
#define OCTEP_IQ_INTR_THRESHOLD 0x0
+/* Minimum watermark for backpressure */
+#define OCTEP_OQ_WMARK_MIN 256
+
/* Rx Queue: maximum descriptors per ring */
#define OCTEP_OQ_MAX_DESCRIPTORS 1024
@@ -44,8 +48,6 @@
/* Minimum MTU supported by Octeon network interface */
#define OCTEP_MIN_MTU ETH_MIN_MTU
-/* Maximum MTU supported by Octeon interface*/
-#define OCTEP_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN))
/* Default MTU */
#define OCTEP_DEFAULT_MTU 1500
@@ -58,7 +60,6 @@
#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
-#define CFG_GET_IQ_PKIND(cfg) ((cfg)->iq.pkind)
#define CFG_GET_IQ_INSTR_SIZE(cfg) (64)
#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold)
@@ -68,12 +69,12 @@
#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+#define CFG_GET_OQ_WMARK(cfg) ((cfg)->oq.wmark)
#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.max_io_rings)
#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.active_io_rings)
#define CFG_GET_PORTS_PF_SRN(cfg) ((cfg)->pf_ring_cfg.srn)
-#define CFG_GET_DPI_PKIND(cfg) ((cfg)->core_cfg.dpi_pkind)
#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us)
#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us)
@@ -97,9 +98,6 @@ struct octep_iq_config {
/* Command size - 32 or 64 bytes */
u16 instr_type;
- /* pkind for packets sent to Octeon */
- u16 pkind;
-
/* Minimum number of commands pending to be posted to Octeon before driver
* hits the Input queue doorbell.
*/
@@ -137,6 +135,12 @@ struct octep_oq_config {
* default. The time is specified in microseconds.
*/
u32 oq_intr_time;
+
+ /* Water mark for backpressure.
+ * Output queue sends backpressure signal to source when
+ * free buffer count falls below wmark.
+ */
+ u32 wmark;
};
/* Tx/Rx configuration */
@@ -189,11 +193,37 @@ struct octep_ctrl_mbox_config {
/* Info from firmware */
struct octep_fw_info {
/* interface pkind */
- u16 pkind;
+ u8 pkind;
+
+ /* front size data */
+ u8 fsz;
+
/* heartbeat interval in milliseconds */
u16 hb_interval;
+
/* heartbeat miss count */
u16 hb_miss_count;
+
+ /* reserved */
+ u16 reserved1;
+
+ /* supported rx offloads OCTEP_ETH_RX_OFFLOAD_* */
+ u16 rx_ol_flags;
+
+ /* supported tx offloads OCTEP_ETH_TX_OFFLOAD_* */
+ u16 tx_ol_flags;
+
+ /* reserved */
+ u32 reserved_offloads;
+
+ /* extra offload flags */
+ u64 ext_ol_flags;
+
+ /* supported features */
+ u64 features[2];
+
+ /* reserved */
+ u64 reserved2[3];
};
/* Data Structure to hold configuration limits and active config */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
index 7f8135788e..6da32d40f9 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
@@ -16,10 +16,12 @@
* |reserved (4 bytes) |
* |-------------------------------------------|
* |host version (8 bytes) |
+ * | low 32 bits |
* |host status (8 bytes) |
* |host reserved (104 bytes) |
* |-------------------------------------------|
- * |fw version (8 bytes) |
+ * |fw version's (8 bytes) |
+ * | min=high 32 bits, max=low 32 bits |
* |fw status (8 bytes) |
* |fw reserved (104 bytes) |
* |===========================================|
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
index 0594607a25..01b7be154c 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
@@ -13,6 +13,7 @@
#include "octep_config.h"
#include "octep_main.h"
#include "octep_ctrl_net.h"
+#include "octep_pfvf_mbox.h"
/* Control plane version */
#define OCTEP_CP_VERSION_CURRENT OCTEP_CP_VERSION(1, 0, 0)
@@ -22,12 +23,15 @@ static const u32 mtu_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mtu);
static const u32 mac_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mac);
static const u32 state_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_state);
static const u32 link_info_sz = sizeof(struct octep_ctrl_net_link_info);
+static const u32 offloads_sz = sizeof(struct octep_ctrl_net_offloads);
static atomic_t ctrl_net_msg_id;
/* Control plane version in which OCTEP_CTRL_NET_H2F_CMD was added */
static const u32 octep_ctrl_net_h2f_cmd_versions[OCTEP_CTRL_NET_H2F_CMD_MAX] = {
- [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_GET_INFO] =
- OCTEP_CP_VERSION(1, 0, 0)
+ [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_DEV_REMOVE] =
+ OCTEP_CP_VERSION(1, 0, 0),
+ [OCTEP_CTRL_NET_H2F_CMD_OFFLOADS] = OCTEP_CP_VERSION(1, 0, 1)
+
};
/* Control plane version in which OCTEP_CTRL_NET_F2H_CMD was added */
@@ -122,7 +126,7 @@ int octep_ctrl_net_init(struct octep_device *oct)
int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
int err;
@@ -139,7 +143,7 @@ int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid)
int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up,
bool wait_for_response)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
init_send_req(&d.msg, req, state_sz, vfid);
@@ -154,7 +158,7 @@ int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up,
int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up,
bool wait_for_response)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
init_send_req(&d.msg, req, state_sz, vfid);
@@ -168,7 +172,7 @@ int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up,
int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
int err;
@@ -187,7 +191,7 @@ int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr)
int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr,
bool wait_for_response)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
init_send_req(&d.msg, req, mac_sz, vfid);
@@ -198,10 +202,28 @@ int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr,
return octep_send_mbox_req(oct, &d, wait_for_response);
}
+int octep_ctrl_net_get_mtu(struct octep_device *oct, int vfid)
+{
+ struct octep_ctrl_net_wait_data d = {};
+ struct octep_ctrl_net_h2f_req *req;
+ int err;
+
+ req = &d.data.req;
+ init_send_req(&d.msg, req, mtu_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MTU;
+ req->mtu.cmd = OCTEP_CTRL_NET_CMD_GET;
+
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
+ return err;
+
+ return d.data.resp.mtu.val;
+}
+
int octep_ctrl_net_set_mtu(struct octep_device *oct, int vfid, int mtu,
bool wait_for_response)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
init_send_req(&d.msg, req, mtu_sz, vfid);
@@ -216,7 +238,7 @@ int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid,
struct octep_iface_rx_stats *rx_stats,
struct octep_iface_tx_stats *tx_stats)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
struct octep_ctrl_net_h2f_resp *resp;
int err;
@@ -236,7 +258,7 @@ int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid,
int octep_ctrl_net_get_link_info(struct octep_device *oct, int vfid,
struct octep_iface_link_info *link_info)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
struct octep_ctrl_net_h2f_resp *resp;
int err;
@@ -262,7 +284,7 @@ int octep_ctrl_net_set_link_info(struct octep_device *oct, int vfid,
struct octep_iface_link_info *link_info,
bool wait_for_response)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
init_send_req(&d.msg, req, link_info_sz, vfid);
@@ -308,6 +330,11 @@ static int process_mbox_notify(struct octep_device *oct,
octep_ctrl_net_f2h_cmd_versions[cmd] < OCTEP_CP_VERSION_CURRENT)
return -EOPNOTSUPP;
+ if (msg->hdr.s.is_vf) {
+ octep_pfvf_notify(oct, msg);
+ return 0;
+ }
+
switch (cmd) {
case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS:
if (netif_running(netdev)) {
@@ -331,8 +358,8 @@ static int process_mbox_notify(struct octep_device *oct,
void octep_ctrl_net_recv_fw_messages(struct octep_device *oct)
{
static u16 msg_sz = sizeof(union octep_ctrl_net_max_data);
- union octep_ctrl_net_max_data data = {0};
- struct octep_ctrl_mbox_msg msg = {0};
+ union octep_ctrl_net_max_data data = {};
+ struct octep_ctrl_mbox_msg msg = {};
int ret;
msg.hdr.s.sz = msg_sz;
@@ -356,7 +383,7 @@ void octep_ctrl_net_recv_fw_messages(struct octep_device *oct)
int octep_ctrl_net_get_info(struct octep_device *oct, int vfid,
struct octep_fw_info *info)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_resp *resp;
struct octep_ctrl_net_h2f_req *req;
int err;
@@ -375,10 +402,41 @@ int octep_ctrl_net_get_info(struct octep_device *oct, int vfid,
return 0;
}
+int octep_ctrl_net_dev_remove(struct octep_device *oct, int vfid)
+{
+ struct octep_ctrl_net_wait_data d = {};
+ struct octep_ctrl_net_h2f_req *req;
+
+ req = &d.data.req;
+ dev_dbg(&oct->pdev->dev, "Sending dev_unload msg to fw\n");
+ init_send_req(&d.msg, req, sizeof(int), vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_DEV_REMOVE;
+
+ return octep_send_mbox_req(oct, &d, false);
+}
+
+int octep_ctrl_net_set_offloads(struct octep_device *oct, int vfid,
+ struct octep_ctrl_net_offloads *offloads,
+ bool wait_for_response)
+{
+ struct octep_ctrl_net_wait_data d = {};
+ struct octep_ctrl_net_h2f_req *req;
+
+ req = &d.data.req;
+ init_send_req(&d.msg, req, offloads_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_OFFLOADS;
+ req->offloads.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req->offloads.offloads = *offloads;
+
+ return octep_send_mbox_req(oct, &d, wait_for_response);
+}
+
int octep_ctrl_net_uninit(struct octep_device *oct)
{
struct octep_ctrl_net_wait_data *pos, *n;
+ octep_ctrl_net_dev_remove(oct, OCTEP_CTRL_NET_INVALID_VFID);
+
list_for_each_entry_safe(pos, n, &oct->ctrl_req_wait_list, list)
pos->done = 1;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
index b330f37013..0b823bea9c 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
@@ -42,6 +42,8 @@ enum octep_ctrl_net_h2f_cmd {
OCTEP_CTRL_NET_H2F_CMD_RX_STATE,
OCTEP_CTRL_NET_H2F_CMD_LINK_INFO,
OCTEP_CTRL_NET_H2F_CMD_GET_INFO,
+ OCTEP_CTRL_NET_H2F_CMD_DEV_REMOVE,
+ OCTEP_CTRL_NET_H2F_CMD_OFFLOADS,
OCTEP_CTRL_NET_H2F_CMD_MAX
};
@@ -112,6 +114,26 @@ struct octep_ctrl_net_h2f_req_cmd_link_info {
struct octep_ctrl_net_link_info info;
};
+/* offloads */
+struct octep_ctrl_net_offloads {
+ /* supported rx offloads OCTEP_RX_OFFLOAD_* */
+ u16 rx_offloads;
+ /* supported tx offloads OCTEP_TX_OFFLOAD_* */
+ u16 tx_offloads;
+ /* reserved */
+ u32 reserved_offloads;
+ /* extra offloads */
+ u64 ext_offloads;
+};
+
+/* get/set offloads */
+struct octep_ctrl_net_h2f_req_cmd_offloads {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* struct octep_ctrl_net_offloads */
+ struct octep_ctrl_net_offloads offloads;
+};
+
/* Host to fw request data */
struct octep_ctrl_net_h2f_req {
union octep_ctrl_net_req_hdr hdr;
@@ -121,6 +143,7 @@ struct octep_ctrl_net_h2f_req {
struct octep_ctrl_net_h2f_req_cmd_state link;
struct octep_ctrl_net_h2f_req_cmd_state rx;
struct octep_ctrl_net_h2f_req_cmd_link_info link_info;
+ struct octep_ctrl_net_h2f_req_cmd_offloads offloads;
};
} __packed;
@@ -178,6 +201,7 @@ struct octep_ctrl_net_h2f_resp {
struct octep_ctrl_net_h2f_resp_cmd_state rx;
struct octep_ctrl_net_link_info link_info;
struct octep_ctrl_net_h2f_resp_cmd_get_info info;
+ struct octep_ctrl_net_offloads offloads;
};
} __packed;
@@ -218,87 +242,105 @@ struct octep_ctrl_net_wait_data {
} data;
};
-/** Initialize data for ctrl net.
+/**
+ * octep_ctrl_net_init() - Initialize data for ctrl net.
*
- * @param oct: non-null pointer to struct octep_device.
+ * @oct: non-null pointer to struct octep_device.
*
* return value: 0 on success, -errno on error.
*/
int octep_ctrl_net_init(struct octep_device *oct);
-/** Get link status from firmware.
+/**
+ * octep_ctrl_net_get_link_status() - Get link status from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
*
* return value: link status 0=down, 1=up.
*/
int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid);
-/** Set link status in firmware.
+/**
+ * octep_ctrl_net_set_link_status() - Set link status in firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param up: boolean status.
- * @param wait_for_response: poll for response.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @up: boolean status.
+ * @wait_for_response: poll for response.
*
* return value: 0 on success, -errno on failure
*/
int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up,
bool wait_for_response);
-/** Set rx state in firmware.
+/**
+ * octep_ctrl_net_set_rx_state() - Set rx state in firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param up: boolean status.
- * @param wait_for_response: poll for response.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @up: boolean status.
+ * @wait_for_response: poll for response.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up,
bool wait_for_response);
-/** Get mac address from firmware.
+/**
+ * octep_ctrl_net_get_mac_addr() - Get mac address from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param addr: non-null pointer to mac address.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @addr: non-null pointer to mac address.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr);
-/** Set mac address in firmware.
+/**
+ * octep_ctrl_net_set_mac_addr() - Set mac address in firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param addr: non-null pointer to mac address.
- * @param wait_for_response: poll for response.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @addr: non-null pointer to mac address.
+ * @wait_for_response: poll for response.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr,
bool wait_for_response);
-/** Set mtu in firmware.
+/**
+ * octep_ctrl_net_get_mtu() - Get max MTU from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param mtu: mtu.
- * @param wait_for_response: poll for response.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ *
+ * return value: mtu on success, -errno on failure.
+ */
+int octep_ctrl_net_get_mtu(struct octep_device *oct, int vfid);
+
+/**
+ * octep_ctrl_net_set_mtu() - Set mtu in firmware.
+ *
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @mtu: mtu.
+ * @wait_for_response: poll for response.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_set_mtu(struct octep_device *oct, int vfid, int mtu,
bool wait_for_response);
-/** Get interface statistics from firmware.
+/**
+ * octep_ctrl_net_get_if_stats() - Get interface statistics from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param rx_stats: non-null pointer struct octep_iface_rx_stats.
- * @param tx_stats: non-null pointer struct octep_iface_tx_stats.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @rx_stats: non-null pointer struct octep_iface_rx_stats.
+ * @tx_stats: non-null pointer struct octep_iface_tx_stats.
*
* return value: 0 on success, -errno on failure.
*/
@@ -306,23 +348,25 @@ int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid,
struct octep_iface_rx_stats *rx_stats,
struct octep_iface_tx_stats *tx_stats);
-/** Get link info from firmware.
+/**
+ * octep_ctrl_net_get_link_info() - Get link info from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param link_info: non-null pointer to struct octep_iface_link_info.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @link_info: non-null pointer to struct octep_iface_link_info.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_get_link_info(struct octep_device *oct, int vfid,
struct octep_iface_link_info *link_info);
-/** Set link info in firmware.
+/**
+ * octep_ctrl_net_set_link_info() - Set link info in firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param link_info: non-null pointer to struct octep_iface_link_info.
- * @param wait_for_response: poll for response.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @link_info: non-null pointer to struct octep_iface_link_info.
+ * @wait_for_response: poll for response.
*
* return value: 0 on success, -errno on failure.
*/
@@ -331,26 +375,53 @@ int octep_ctrl_net_set_link_info(struct octep_device *oct,
struct octep_iface_link_info *link_info,
bool wait_for_response);
-/** Poll for firmware messages and process them.
+/**
+ * octep_ctrl_net_recv_fw_messages() - Poll for firmware messages and process them.
*
- * @param oct: non-null pointer to struct octep_device.
+ * @oct: non-null pointer to struct octep_device.
*/
void octep_ctrl_net_recv_fw_messages(struct octep_device *oct);
-/** Get info from firmware.
+/**
+ * octep_ctrl_net_get_info() - Get info from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param info: non-null pointer to struct octep_fw_info.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @info: non-null pointer to struct octep_fw_info.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_get_info(struct octep_device *oct, int vfid,
struct octep_fw_info *info);
-/** Uninitialize data for ctrl net.
+/**
+ * octep_ctrl_net_dev_remove() - Indicate to firmware that a device unload has happened.
+ *
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_dev_remove(struct octep_device *oct, int vfid);
+
+/**
+ * octep_ctrl_net_set_offloads() - Set offloads in firmware.
+ *
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @offloads: non-null pointer to struct octep_ctrl_net_offloads.
+ * @wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_set_offloads(struct octep_device *oct, int vfid,
+ struct octep_ctrl_net_offloads *offloads,
+ bool wait_for_response);
+
+/**
+ * octep_ctrl_net_uninit() - Uninitialize data for ctrl net.
*
- * @param oct: non-null pointer to struct octep_device.
+ * @oct: non-null pointer to struct octep_device.
*
* return value: 0 on success, -errno on error.
*/
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index a9bdf3283a..7c9faa714a 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -16,14 +16,20 @@
#include "octep_config.h"
#include "octep_main.h"
#include "octep_ctrl_net.h"
+#include "octep_pfvf_mbox.h"
#define OCTEP_INTR_POLL_TIME_MSECS 100
struct workqueue_struct *octep_wq;
/* Supported Devices */
static const struct pci_device_id octep_pci_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN98_PF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_PF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KA_PF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KA_PF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KB_PF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KB_PF)},
{0, },
};
MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl);
@@ -155,6 +161,21 @@ static void octep_disable_msix(struct octep_device *oct)
}
/**
+ * octep_mbox_intr_handler() - common handler for pfvf mbox interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for pfvf mbox interrupts.
+ */
+static irqreturn_t octep_mbox_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.mbox_intr_handler(oct);
+}
+
+/**
* octep_oei_intr_handler() - common handler for output endpoint interrupts.
*
* @irq: Interrupt number.
@@ -357,8 +378,12 @@ static int octep_request_irqs(struct octep_device *oct)
snprintf(irq_name, OCTEP_MSIX_NAME_SIZE,
"%s-%s", netdev->name, non_ioq_msix_names[i]);
- if (!strncmp(non_ioq_msix_names[i], "epf_oei_rint",
- strlen("epf_oei_rint"))) {
+ if (!strncmp(non_ioq_msix_names[i], "epf_mbox_rint", strlen("epf_mbox_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_mbox_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_oei_rint",
+ strlen("epf_oei_rint"))) {
ret = request_irq(msix_entry->vector,
octep_oei_intr_handler, 0,
irq_name, oct);
@@ -777,17 +802,24 @@ static int octep_stop(struct net_device *netdev)
*/
static inline int octep_iq_full_check(struct octep_iq *iq)
{
- if (likely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ if (likely((IQ_INSTR_SPACE(iq)) >
OCTEP_WAKE_QUEUE_THRESHOLD))
return 0;
/* Stop the queue if unable to send */
netif_stop_subqueue(iq->netdev, iq->q_no);
+ /* Allow for pending updates in write index
+ * from iq_process_completion in other cpus
+ * to reflect, in case queue gets free
+ * entries.
+ */
+ smp_mb();
+
/* check again and restart the queue, in case NAPI has just freed
* enough Tx ring entries.
*/
- if (unlikely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ if (unlikely(IQ_INSTR_SPACE(iq) >
OCTEP_WAKE_QUEUE_THRESHOLD)) {
netif_start_subqueue(iq->netdev, iq->q_no);
iq->stats.restart_cnt++;
@@ -810,6 +842,7 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct octep_device *oct = netdev_priv(netdev);
+ netdev_features_t feat = netdev->features;
struct octep_tx_sglist_desc *sglist;
struct octep_tx_buffer *tx_buffer;
struct octep_tx_desc_hw *hw_desc;
@@ -818,8 +851,12 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
struct octep_iq *iq;
skb_frag_t *frag;
u16 nr_frags, si;
+ int xmit_more;
u16 q_no, wi;
+ if (skb_put_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
q_no = skb_get_queue_mapping(skb);
if (q_no >= oct->num_iqs) {
netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no);
@@ -827,10 +864,6 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
}
iq = oct->iq[q_no];
- if (octep_iq_full_check(iq)) {
- iq->stats.tx_busy++;
- return NETDEV_TX_BUSY;
- }
shinfo = skb_shinfo(skb);
nr_frags = shinfo->nr_frags;
@@ -843,8 +876,9 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
tx_buffer->skb = skb;
ih = &hw_desc->ih;
- ih->tlen = skb->len;
- ih->pkind = oct->pkind;
+ ih->pkind = oct->conf->fw_info.pkind;
+ ih->fsz = oct->conf->fw_info.fsz;
+ ih->tlen = skb->len + ih->fsz;
if (!nr_frags) {
tx_buffer->gather = 0;
@@ -869,9 +903,6 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
if (dma_mapping_error(iq->dev, dma))
goto dma_map_err;
- dma_sync_single_for_cpu(iq->dev, tx_buffer->sglist_dma,
- OCTEP_SGLIST_SIZE_PER_PKT,
- DMA_TO_DEVICE);
memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT);
sglist[0].len[3] = len;
sglist[0].dma_ptr[0] = dma;
@@ -891,26 +922,46 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
frag++;
si++;
}
- dma_sync_single_for_device(iq->dev, tx_buffer->sglist_dma,
- OCTEP_SGLIST_SIZE_PER_PKT,
- DMA_TO_DEVICE);
-
hw_desc->dptr = tx_buffer->sglist_dma;
}
- netdev_tx_sent_queue(iq->netdev_q, skb->len);
+ if (oct->conf->fw_info.tx_ol_flags) {
+ if ((feat & (NETIF_F_TSO)) && (skb_is_gso(skb))) {
+ hw_desc->txm.ol_flags = OCTEP_TX_OFFLOAD_CKSUM;
+ hw_desc->txm.ol_flags |= OCTEP_TX_OFFLOAD_TSO;
+ hw_desc->txm.gso_size = skb_shinfo(skb)->gso_size;
+ hw_desc->txm.gso_segs = skb_shinfo(skb)->gso_segs;
+ } else if (feat & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+ hw_desc->txm.ol_flags = OCTEP_TX_OFFLOAD_CKSUM;
+ }
+ /* due to ESR txm will be swapped by hw */
+ hw_desc->txm64[0] = (__force u64)cpu_to_be64(hw_desc->txm64[0]);
+ }
+
+ xmit_more = netdev_xmit_more();
+
+ __netdev_tx_sent_queue(iq->netdev_q, skb->len, xmit_more);
+
skb_tx_timestamp(skb);
- atomic_inc(&iq->instr_pending);
+ iq->fill_cnt++;
wi++;
- if (wi == iq->max_count)
- wi = 0;
- iq->host_write_index = wi;
+ iq->host_write_index = wi & iq->ring_size_mask;
+
+ /* octep_iq_full_check stops the queue and returns
+ * true if so, in case the queue has become full
+ * by inserting current packet. If so, we can
+ * go ahead and ring doorbell.
+ */
+ if (!octep_iq_full_check(iq) && xmit_more &&
+ iq->fill_cnt < iq->fill_threshold)
+ return NETDEV_TX_OK;
+
/* Flush the hw descriptor before writing to doorbell */
wmb();
-
- /* Ring Doorbell to notify the NIC there is a new packet */
- writel(1, iq->doorbell_reg);
- iq->stats.instr_posted++;
+ /* Ring Doorbell to notify the NIC of new packets */
+ writel(iq->fill_cnt, iq->doorbell_reg);
+ iq->stats.instr_posted += iq->fill_cnt;
+ iq->fill_cnt = 0;
return NETDEV_TX_OK;
dma_map_sg_err:
@@ -1051,6 +1102,41 @@ static int octep_change_mtu(struct net_device *netdev, int new_mtu)
return err;
}
+static int octep_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct octep_ctrl_net_offloads offloads = { 0 };
+ struct octep_device *oct = netdev_priv(dev);
+ int err;
+
+ /* We only support features received from firmware */
+ if ((features & dev->hw_features) != features)
+ return -EINVAL;
+
+ if (features & NETIF_F_TSO)
+ offloads.tx_offloads |= OCTEP_TX_OFFLOAD_TSO;
+
+ if (features & NETIF_F_TSO6)
+ offloads.tx_offloads |= OCTEP_TX_OFFLOAD_TSO;
+
+ if (features & NETIF_F_IP_CSUM)
+ offloads.tx_offloads |= OCTEP_TX_OFFLOAD_CKSUM;
+
+ if (features & NETIF_F_IPV6_CSUM)
+ offloads.tx_offloads |= OCTEP_TX_OFFLOAD_CKSUM;
+
+ if (features & NETIF_F_RXCSUM)
+ offloads.rx_offloads |= OCTEP_RX_OFFLOAD_CKSUM;
+
+ err = octep_ctrl_net_set_offloads(oct,
+ OCTEP_CTRL_NET_INVALID_VFID,
+ &offloads,
+ true);
+ if (!err)
+ dev->features = features;
+
+ return err;
+}
+
static const struct net_device_ops octep_netdev_ops = {
.ndo_open = octep_open,
.ndo_stop = octep_stop,
@@ -1059,6 +1145,7 @@ static const struct net_device_ops octep_netdev_ops = {
.ndo_tx_timeout = octep_tx_timeout,
.ndo_set_mac_address = octep_set_mac,
.ndo_change_mtu = octep_change_mtu,
+ .ndo_set_features = octep_set_features,
};
/**
@@ -1132,10 +1219,20 @@ static void octep_ctrl_mbox_task(struct work_struct *work)
static const char *octep_devid_to_str(struct octep_device *oct)
{
switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN98_PF:
+ return "CN98XX";
case OCTEP_PCI_DEVICE_ID_CN93_PF:
return "CN93XX";
case OCTEP_PCI_DEVICE_ID_CNF95N_PF:
return "CNF95N";
+ case OCTEP_PCI_DEVICE_ID_CN10KA_PF:
+ return "CN10KA";
+ case OCTEP_PCI_DEVICE_ID_CNF10KA_PF:
+ return "CNF10KA";
+ case OCTEP_PCI_DEVICE_ID_CNF10KB_PF:
+ return "CNF10KB";
+ case OCTEP_PCI_DEVICE_ID_CN10KB_PF:
+ return "CN10KB";
default:
return "Unsupported";
}
@@ -1174,6 +1271,7 @@ int octep_device_setup(struct octep_device *oct)
dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device);
switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN98_PF:
case OCTEP_PCI_DEVICE_ID_CN93_PF:
case OCTEP_PCI_DEVICE_ID_CNF95N_PF:
dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n",
@@ -1181,13 +1279,20 @@ int octep_device_setup(struct octep_device *oct)
OCTEP_MINOR_REV(oct));
octep_device_setup_cn93_pf(oct);
break;
+ case OCTEP_PCI_DEVICE_ID_CNF10KA_PF:
+ case OCTEP_PCI_DEVICE_ID_CN10KA_PF:
+ case OCTEP_PCI_DEVICE_ID_CNF10KB_PF:
+ case OCTEP_PCI_DEVICE_ID_CN10KB_PF:
+ dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n",
+ octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct), OCTEP_MINOR_REV(oct));
+ octep_device_setup_cnxk_pf(oct);
+ break;
default:
dev_err(&pdev->dev,
"%s: unsupported device\n", __func__);
goto unsupported_dev;
}
- oct->pkind = CFG_GET_IQ_PKIND(oct->conf);
ret = octep_ctrl_net_init(oct);
if (ret)
@@ -1237,6 +1342,7 @@ static void octep_device_cleanup(struct octep_device *oct)
oct->mbox[i] = NULL;
}
+ octep_delete_pfvf_mbox(oct);
octep_ctrl_net_uninit(oct);
cancel_delayed_work_sync(&oct->hb_task);
@@ -1284,6 +1390,7 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct octep_device *octep_dev = NULL;
struct net_device *netdev;
+ int max_rx_pktlen;
int err;
err = pci_enable_device(pdev);
@@ -1333,6 +1440,12 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_octep_config;
}
+ err = octep_setup_pfvf_mbox(octep_dev);
+ if (err) {
+ dev_err(&pdev->dev, "PF-VF mailbox setup failed\n");
+ goto register_dev_err;
+ }
+
err = octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
&octep_dev->conf->fw_info);
if (err) {
@@ -1350,11 +1463,29 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_carrier_off(netdev);
netdev->hw_features = NETIF_F_SG;
- netdev->features |= netdev->hw_features;
+ if (OCTEP_TX_IP_CSUM(octep_dev->conf->fw_info.tx_ol_flags))
+ netdev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+
+ if (OCTEP_RX_IP_CSUM(octep_dev->conf->fw_info.rx_ol_flags))
+ netdev->hw_features |= NETIF_F_RXCSUM;
+
+ max_rx_pktlen = octep_ctrl_net_get_mtu(octep_dev, OCTEP_CTRL_NET_INVALID_VFID);
+ if (max_rx_pktlen < 0) {
+ dev_err(&octep_dev->pdev->dev,
+ "Failed to get max receive packet size; err = %d\n", max_rx_pktlen);
+ err = max_rx_pktlen;
+ goto register_dev_err;
+ }
netdev->min_mtu = OCTEP_MIN_MTU;
- netdev->max_mtu = OCTEP_MAX_MTU;
+ netdev->max_mtu = max_rx_pktlen - (ETH_HLEN + ETH_FCS_LEN);
netdev->mtu = OCTEP_DEFAULT_MTU;
+ if (OCTEP_TX_TSO(octep_dev->conf->fw_info.tx_ol_flags)) {
+ netdev->hw_features |= NETIF_F_TSO;
+ netif_set_tso_max_size(netdev, netdev->max_mtu);
+ }
+
+ netdev->features |= netdev->hw_features;
err = octep_ctrl_net_get_mac_addr(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
octep_dev->mac_addr);
if (err) {
@@ -1383,6 +1514,21 @@ err_dma_mask:
return err;
}
+static int octep_sriov_disable(struct octep_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+
+ if (pci_vfs_assigned(oct->pdev)) {
+ dev_warn(&pdev->dev, "Can't disable SRIOV while VFs are assigned\n");
+ return -EPERM;
+ }
+
+ pci_disable_sriov(pdev);
+ CFG_GET_ACTIVE_VFS(oct->conf) = 0;
+
+ return 0;
+}
+
/**
* octep_remove() - Remove Octeon PCI device from driver control.
*
@@ -1400,6 +1546,7 @@ static void octep_remove(struct pci_dev *pdev)
return;
netdev = oct->netdev;
+ octep_sriov_disable(oct);
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
@@ -1410,11 +1557,47 @@ static void octep_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static int octep_sriov_enable(struct octep_device *oct, int num_vfs)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int err;
+
+ CFG_GET_ACTIVE_VFS(oct->conf) = num_vfs;
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ dev_warn(&pdev->dev, "Failed to enable SRIOV err=%d\n", err);
+ CFG_GET_ACTIVE_VFS(oct->conf) = 0;
+ return err;
+ }
+
+ return num_vfs;
+}
+
+static int octep_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct octep_device *oct = pci_get_drvdata(pdev);
+ int max_nvfs;
+
+ if (num_vfs == 0)
+ return octep_sriov_disable(oct);
+
+ max_nvfs = CFG_GET_MAX_VFS(oct->conf);
+
+ if (num_vfs > max_nvfs) {
+ dev_err(&pdev->dev, "Invalid VF count Max supported VFs = %d\n",
+ max_nvfs);
+ return -EINVAL;
+ }
+
+ return octep_sriov_enable(oct, num_vfs);
+}
+
static struct pci_driver octep_driver = {
.name = OCTEP_DRV_NAME,
.id_table = octep_pci_id_tbl,
.probe = octep_probe,
.remove = octep_remove,
+ .sriov_configure = octep_sriov_configure,
};
/**
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
index 6df902ebb7..fee59e0e01 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -18,11 +18,17 @@
#define OCTEP_PCIID_CN93_PF 0xB200177d
#define OCTEP_PCIID_CN93_VF 0xB203177d
+#define OCTEP_PCI_DEVICE_ID_CN98_PF 0xB100
#define OCTEP_PCI_DEVICE_ID_CN93_PF 0xB200
#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203
#define OCTEP_PCI_DEVICE_ID_CNF95N_PF 0xB400 //95N PF
+#define OCTEP_PCI_DEVICE_ID_CN10KA_PF 0xB900 //CN10KA PF
+#define OCTEP_PCI_DEVICE_ID_CNF10KA_PF 0xBA00 //CNF10KA PF
+#define OCTEP_PCI_DEVICE_ID_CNF10KB_PF 0xBC00 //CNF10KB PF
+#define OCTEP_PCI_DEVICE_ID_CN10KB_PF 0xBD00 //CN10KB PF
+
#define OCTEP_MAX_QUEUES 63
#define OCTEP_MAX_IQ OCTEP_MAX_QUEUES
#define OCTEP_MAX_OQ OCTEP_MAX_QUEUES
@@ -40,6 +46,15 @@
#define OCTEP_OQ_INTR_RESEND_BIT 59
#define OCTEP_MMIO_REGIONS 3
+
+#define IQ_INSTR_PENDING(iq) ({ typeof(iq) iq__ = (iq); \
+ ((iq__)->host_write_index - (iq__)->flush_index) & \
+ (iq__)->ring_size_mask; \
+ })
+#define IQ_INSTR_SPACE(iq) ({ typeof(iq) iq_ = (iq); \
+ (iq_)->max_count - IQ_INSTR_PENDING(iq_); \
+ })
+
/* PCI address space mapping information.
* Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
* Octeon gets mapped to different physical address spaces in
@@ -65,6 +80,7 @@ struct octep_hw_ops {
void (*setup_oq_regs)(struct octep_device *oct, int q);
void (*setup_mbox_regs)(struct octep_device *oct, int mbox);
+ irqreturn_t (*mbox_intr_handler)(void *ioq_vector);
irqreturn_t (*oei_intr_handler)(void *ioq_vector);
irqreturn_t (*ire_intr_handler)(void *ioq_vector);
irqreturn_t (*ore_intr_handler)(void *ioq_vector);
@@ -103,28 +119,27 @@ struct octep_mbox_data {
u64 *data;
};
+#define MAX_VF_PF_MBOX_DATA_SIZE 384
+/* wrappers around work structs */
+struct octep_pfvf_mbox_wk {
+ struct work_struct work;
+ void *ctxptr;
+ u64 ctxul;
+};
+
/* Octeon device mailbox */
struct octep_mbox {
- /* A spinlock to protect access to this q_mbox. */
- spinlock_t lock;
-
- u32 q_no;
- u32 state;
-
- /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
- u8 __iomem *mbox_int_reg;
-
- /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
- * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
- */
- u8 __iomem *mbox_write_reg;
-
- /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
- * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
- */
- u8 __iomem *mbox_read_reg;
-
+ /* A mutex to protect access to this q_mbox. */
+ struct mutex lock;
+ u32 vf_id;
+ u32 config_data_index;
+ u32 message_len;
+ u8 __iomem *pf_vf_data_reg;
+ u8 __iomem *vf_pf_data_reg;
+ struct octep_pfvf_mbox_wk wk;
+ struct octep_device *oct;
struct octep_mbox_data mbox_data;
+ u8 config_data[MAX_VF_PF_MBOX_DATA_SIZE];
};
/* Tx/Rx queue vector per interrupt. */
@@ -202,6 +217,12 @@ struct octep_iface_link_info {
u8 oper_up;
};
+/* The Octeon VF device specific info data structure.*/
+struct octep_pfvf_info {
+ u8 mac_addr[ETH_ALEN];
+ u32 mbox_version;
+};
+
/* The Octeon device specific private data structure.
* Each Octeon device has this structure to represent all its components.
*/
@@ -232,8 +253,7 @@ struct octep_device {
/* Tx queues (IQ: Instruction Queue) */
u16 num_iqs;
- /* pkind value to be used in every Tx hardware descriptor */
- u8 pkind;
+
/* Pointers to Octeon Tx queues */
struct octep_iq *iq[OCTEP_MAX_IQ];
@@ -268,6 +288,8 @@ struct octep_device {
/* Mailbox to talk to VFs */
struct octep_mbox *mbox[OCTEP_MAX_VF];
+ /* VFs info */
+ struct octep_pfvf_info vf_info[OCTEP_MAX_VF];
/* Work entry to handle Tx timeout */
struct work_struct tx_timeout_task;
@@ -377,6 +399,7 @@ int octep_setup_oqs(struct octep_device *oct);
void octep_free_oqs(struct octep_device *oct);
void octep_oq_dbell_init(struct octep_device *oct);
void octep_device_setup_cn93_pf(struct octep_device *oct);
+void octep_device_setup_cnxk_pf(struct octep_device *oct);
int octep_iq_process_completions(struct octep_iq *iq, u16 budget);
int octep_oq_process_rx(struct octep_oq *oq, int budget);
void octep_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
new file mode 100644
index 0000000000..2e2c3be8a0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_pfvf_mbox.h"
+#include "octep_ctrl_net.h"
+
+/* When a new command is implemented, the below table should be updated
+ * with new command and it's version info.
+ */
+static u32 pfvf_cmd_versions[OCTEP_PFVF_MBOX_CMD_MAX] = {
+ [0 ... OCTEP_PFVF_MBOX_CMD_DEV_REMOVE] = OCTEP_PFVF_MBOX_VERSION_V1,
+ [OCTEP_PFVF_MBOX_CMD_GET_FW_INFO ... OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS] =
+ OCTEP_PFVF_MBOX_VERSION_V2
+};
+
+static void octep_pfvf_validate_version(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ u32 vf_version = (u32)cmd.s_version.version;
+
+ dev_dbg(&oct->pdev->dev, "VF id:%d VF version:%d PF version:%d\n",
+ vf_id, vf_version, OCTEP_PFVF_MBOX_VERSION_CURRENT);
+ if (vf_version < OCTEP_PFVF_MBOX_VERSION_CURRENT)
+ rsp->s_version.version = vf_version;
+ else
+ rsp->s_version.version = OCTEP_PFVF_MBOX_VERSION_CURRENT;
+
+ oct->vf_info[vf_id].mbox_version = rsp->s_version.version;
+ dev_dbg(&oct->pdev->dev, "VF id:%d negotiated VF version:%d\n",
+ vf_id, oct->vf_info[vf_id].mbox_version);
+
+ rsp->s_version.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_get_link_status(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int status;
+
+ status = octep_ctrl_net_get_link_status(oct, vf_id);
+ if (status < 0) {
+ rsp->s_link_status.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Get VF link status failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_link_status.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+ rsp->s_link_status.status = status;
+}
+
+static void octep_pfvf_set_link_status(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_set_link_status(oct, vf_id, cmd.s_link_status.status, true);
+ if (err) {
+ rsp->s_link_status.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Set VF link status failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_link_status.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_set_rx_state(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_set_rx_state(oct, vf_id, cmd.s_link_state.state, true);
+ if (err) {
+ rsp->s_link_state.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Set VF Rx link state failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_link_state.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static int octep_send_notification(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd)
+{
+ u32 max_rings_per_vf, vf_mbox_queue;
+ struct octep_mbox *mbox;
+
+ /* check if VF PF Mailbox is compatible for this notification */
+ if (pfvf_cmd_versions[cmd.s.opcode] > oct->vf_info[vf_id].mbox_version) {
+ dev_dbg(&oct->pdev->dev, "VF Mbox doesn't support Notification:%d on VF ver:%d\n",
+ cmd.s.opcode, oct->vf_info[vf_id].mbox_version);
+ return -EOPNOTSUPP;
+ }
+
+ max_rings_per_vf = CFG_GET_MAX_RPVF(oct->conf);
+ vf_mbox_queue = vf_id * max_rings_per_vf;
+ if (!oct->mbox[vf_mbox_queue]) {
+ dev_err(&oct->pdev->dev, "Notif obtained for bad mbox vf %d\n", vf_id);
+ return -EINVAL;
+ }
+ mbox = oct->mbox[vf_mbox_queue];
+
+ mutex_lock(&mbox->lock);
+ writeq(cmd.u64, mbox->pf_vf_data_reg);
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+}
+
+static void octep_pfvf_set_mtu(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_set_mtu(oct, vf_id, cmd.s_set_mtu.mtu, true);
+ if (err) {
+ rsp->s_set_mtu.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Set VF MTU failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_set_mtu.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_get_mtu(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int max_rx_pktlen = oct->netdev->max_mtu + (ETH_HLEN + ETH_FCS_LEN);
+
+ rsp->s_set_mtu.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+ rsp->s_get_mtu.mtu = max_rx_pktlen;
+}
+
+static void octep_pfvf_set_mac_addr(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_set_mac_addr(oct, vf_id, cmd.s_set_mac.mac_addr, true);
+ if (err) {
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Set VF MAC address failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_get_mac_addr(oct, vf_id, rsp->s_set_mac.mac_addr);
+ if (err) {
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Get VF MAC address failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_dev_remove(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_dev_remove(oct, vf_id);
+ if (err) {
+ rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Failed to acknowledge fw of vf %d removal\n",
+ vf_id);
+ return;
+ }
+ rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_get_fw_info(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ struct octep_fw_info fw_info;
+ int err;
+
+ err = octep_ctrl_net_get_info(oct, vf_id, &fw_info);
+ if (err) {
+ rsp->s_fw_info.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Get VF info failed via host control Mbox\n");
+ return;
+ }
+
+ rsp->s_fw_info.pkind = fw_info.pkind;
+ rsp->s_fw_info.fsz = fw_info.fsz;
+ rsp->s_fw_info.rx_ol_flags = fw_info.rx_ol_flags;
+ rsp->s_fw_info.tx_ol_flags = fw_info.tx_ol_flags;
+
+ rsp->s_fw_info.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_set_offloads(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ struct octep_ctrl_net_offloads offloads = {
+ .rx_offloads = cmd.s_offloads.rx_ol_flags,
+ .tx_offloads = cmd.s_offloads.tx_ol_flags
+ };
+ int err;
+
+ err = octep_ctrl_net_set_offloads(oct, vf_id, &offloads, true);
+ if (err) {
+ rsp->s_offloads.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Set VF offloads failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_offloads.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+int octep_setup_pfvf_mbox(struct octep_device *oct)
+{
+ int i = 0, num_vfs = 0, rings_per_vf = 0;
+ int ring = 0;
+
+ num_vfs = oct->conf->sriov_cfg.active_vfs;
+ rings_per_vf = oct->conf->sriov_cfg.max_rings_per_vf;
+
+ for (i = 0; i < num_vfs; i++) {
+ ring = rings_per_vf * i;
+ oct->mbox[ring] = vzalloc(sizeof(*oct->mbox[ring]));
+
+ if (!oct->mbox[ring])
+ goto free_mbox;
+
+ memset(oct->mbox[ring], 0, sizeof(struct octep_mbox));
+ memset(&oct->vf_info[i], 0, sizeof(struct octep_pfvf_info));
+ mutex_init(&oct->mbox[ring]->lock);
+ INIT_WORK(&oct->mbox[ring]->wk.work, octep_pfvf_mbox_work);
+ oct->mbox[ring]->wk.ctxptr = oct->mbox[ring];
+ oct->mbox[ring]->oct = oct;
+ oct->mbox[ring]->vf_id = i;
+ oct->hw_ops.setup_mbox_regs(oct, ring);
+ }
+ return 0;
+
+free_mbox:
+ while (i) {
+ i--;
+ ring = rings_per_vf * i;
+ cancel_work_sync(&oct->mbox[ring]->wk.work);
+ mutex_destroy(&oct->mbox[ring]->lock);
+ vfree(oct->mbox[ring]);
+ oct->mbox[ring] = NULL;
+ }
+ return -ENOMEM;
+}
+
+void octep_delete_pfvf_mbox(struct octep_device *oct)
+{
+ int rings_per_vf = oct->conf->sriov_cfg.max_rings_per_vf;
+ int num_vfs = oct->conf->sriov_cfg.active_vfs;
+ int i = 0, ring = 0, vf_srn = 0;
+
+ for (i = 0; i < num_vfs; i++) {
+ ring = vf_srn + rings_per_vf * i;
+ if (!oct->mbox[ring])
+ continue;
+
+ if (work_pending(&oct->mbox[ring]->wk.work))
+ cancel_work_sync(&oct->mbox[ring]->wk.work);
+
+ mutex_destroy(&oct->mbox[ring]->lock);
+ vfree(oct->mbox[ring]);
+ oct->mbox[ring] = NULL;
+ }
+}
+
+static void octep_pfvf_pf_get_data(struct octep_device *oct,
+ struct octep_mbox *mbox, int vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int length = 0;
+ int i = 0;
+ int err;
+ struct octep_iface_link_info link_info;
+ struct octep_iface_rx_stats rx_stats;
+ struct octep_iface_tx_stats tx_stats;
+
+ rsp->s_data.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+
+ if (cmd.s_data.frag != OCTEP_PFVF_MBOX_MORE_FRAG_FLAG) {
+ mbox->config_data_index = 0;
+ memset(mbox->config_data, 0, MAX_VF_PF_MBOX_DATA_SIZE);
+ /* Based on the OPCODE CMD the PF driver
+ * specific API should be called to fetch
+ * the requested data
+ */
+ switch (cmd.s.opcode) {
+ case OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO:
+ memset(&link_info, 0, sizeof(link_info));
+ err = octep_ctrl_net_get_link_info(oct, vf_id, &link_info);
+ if (!err) {
+ mbox->message_len = sizeof(link_info);
+ *((int32_t *)rsp->s_data.data) = mbox->message_len;
+ memcpy(mbox->config_data, (u8 *)&link_info, sizeof(link_info));
+ } else {
+ rsp->s_data.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ return;
+ }
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_STATS:
+ memset(&rx_stats, 0, sizeof(rx_stats));
+ memset(&tx_stats, 0, sizeof(tx_stats));
+ err = octep_ctrl_net_get_if_stats(oct, vf_id, &rx_stats, &tx_stats);
+ if (!err) {
+ mbox->message_len = sizeof(rx_stats) + sizeof(tx_stats);
+ *((int32_t *)rsp->s_data.data) = mbox->message_len;
+ memcpy(mbox->config_data, (u8 *)&rx_stats, sizeof(rx_stats));
+ memcpy(mbox->config_data + sizeof(rx_stats), (u8 *)&tx_stats,
+ sizeof(tx_stats));
+
+ } else {
+ rsp->s_data.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ return;
+ }
+ break;
+ }
+ *((int32_t *)rsp->s_data.data) = mbox->message_len;
+ return;
+ }
+
+ if (mbox->message_len > OCTEP_PFVF_MBOX_MAX_DATA_SIZE)
+ length = OCTEP_PFVF_MBOX_MAX_DATA_SIZE;
+ else
+ length = mbox->message_len;
+
+ mbox->message_len -= length;
+
+ for (i = 0; i < length; i++) {
+ rsp->s_data.data[i] =
+ mbox->config_data[mbox->config_data_index];
+ mbox->config_data_index++;
+ }
+}
+
+void octep_pfvf_notify(struct octep_device *oct, struct octep_ctrl_mbox_msg *msg)
+{
+ union octep_pfvf_mbox_word notif = { 0 };
+ struct octep_ctrl_net_f2h_req *req;
+
+ req = (struct octep_ctrl_net_f2h_req *)msg->sg_list[0].msg;
+ switch (req->hdr.s.cmd) {
+ case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS:
+ notif.s_link_status.opcode = OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS;
+ notif.s_link_status.status = req->link.state;
+ break;
+ default:
+ pr_info("Unknown mbox notif for vf: %u\n",
+ req->hdr.s.cmd);
+ return;
+ }
+
+ notif.s.type = OCTEP_PFVF_MBOX_TYPE_CMD;
+ octep_send_notification(oct, msg->hdr.s.vf_idx, notif);
+}
+
+void octep_pfvf_mbox_work(struct work_struct *work)
+{
+ struct octep_pfvf_mbox_wk *wk = container_of(work, struct octep_pfvf_mbox_wk, work);
+ union octep_pfvf_mbox_word cmd = { 0 };
+ union octep_pfvf_mbox_word rsp = { 0 };
+ struct octep_mbox *mbox = NULL;
+ struct octep_device *oct = NULL;
+ int vf_id;
+
+ mbox = (struct octep_mbox *)wk->ctxptr;
+ oct = (struct octep_device *)mbox->oct;
+ vf_id = mbox->vf_id;
+
+ mutex_lock(&mbox->lock);
+ cmd.u64 = readq(mbox->vf_pf_data_reg);
+ rsp.u64 = 0;
+
+ switch (cmd.s.opcode) {
+ case OCTEP_PFVF_MBOX_CMD_VERSION:
+ octep_pfvf_validate_version(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS:
+ octep_pfvf_get_link_status(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS:
+ octep_pfvf_set_link_status(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_SET_RX_STATE:
+ octep_pfvf_set_rx_state(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_SET_MTU:
+ octep_pfvf_set_mtu(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR:
+ octep_pfvf_set_mac_addr(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR:
+ octep_pfvf_get_mac_addr(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO:
+ case OCTEP_PFVF_MBOX_CMD_GET_STATS:
+ octep_pfvf_pf_get_data(oct, mbox, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_MTU:
+ octep_pfvf_get_mtu(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_DEV_REMOVE:
+ octep_pfvf_dev_remove(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_FW_INFO:
+ octep_pfvf_get_fw_info(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS:
+ octep_pfvf_set_offloads(oct, vf_id, cmd, &rsp);
+ break;
+ default:
+ dev_err(&oct->pdev->dev, "PF-VF mailbox: invalid opcode %d\n", cmd.s.opcode);
+ rsp.s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ break;
+ }
+ writeq(rsp.u64, mbox->vf_pf_data_reg);
+ mutex_unlock(&mbox->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
new file mode 100644
index 0000000000..0dc6eead29
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_PFVF_MBOX_H_
+#define _OCTEP_PFVF_MBOX_H_
+
+/* VF flags */
+#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT_ULL(0) /* PF has set VF MAC address */
+#define OCTEON_SDP_16K_HW_FRS 16380UL
+#define OCTEON_SDP_64K_HW_FRS 65531UL
+
+/* When a new command is implemented,PF Mbox version should be bumped.
+ */
+enum octep_pfvf_mbox_version {
+ OCTEP_PFVF_MBOX_VERSION_V0,
+ OCTEP_PFVF_MBOX_VERSION_V1,
+ OCTEP_PFVF_MBOX_VERSION_V2,
+};
+
+#define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2
+
+enum octep_pfvf_mbox_opcode {
+ OCTEP_PFVF_MBOX_CMD_VERSION,
+ OCTEP_PFVF_MBOX_CMD_SET_MTU,
+ OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR,
+ OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR,
+ OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
+ OCTEP_PFVF_MBOX_CMD_GET_STATS,
+ OCTEP_PFVF_MBOX_CMD_SET_RX_STATE,
+ OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_GET_MTU,
+ OCTEP_PFVF_MBOX_CMD_DEV_REMOVE,
+ OCTEP_PFVF_MBOX_CMD_GET_FW_INFO,
+ OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS,
+ OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_MAX,
+};
+
+enum octep_pfvf_mbox_word_type {
+ OCTEP_PFVF_MBOX_TYPE_CMD,
+ OCTEP_PFVF_MBOX_TYPE_RSP_ACK,
+ OCTEP_PFVF_MBOX_TYPE_RSP_NACK,
+};
+
+enum octep_pfvf_mbox_cmd_status {
+ OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP = 1,
+ OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT = 2,
+ OCTEP_PFVF_MBOX_CMD_STATUS_NACK = 3,
+ OCTEP_PFVF_MBOX_CMD_STATUS_BUSY = 4
+};
+
+enum octep_pfvf_mbox_state {
+ OCTEP_PFVF_MBOX_STATE_IDLE = 0,
+ OCTEP_PFVF_MBOX_STATE_BUSY = 1,
+};
+
+enum octep_pfvf_link_status {
+ OCTEP_PFVF_LINK_STATUS_DOWN,
+ OCTEP_PFVF_LINK_STATUS_UP,
+};
+
+enum octep_pfvf_link_speed {
+ OCTEP_PFVF_LINK_SPEED_NONE,
+ OCTEP_PFVF_LINK_SPEED_1000,
+ OCTEP_PFVF_LINK_SPEED_10000,
+ OCTEP_PFVF_LINK_SPEED_25000,
+ OCTEP_PFVF_LINK_SPEED_40000,
+ OCTEP_PFVF_LINK_SPEED_50000,
+ OCTEP_PFVF_LINK_SPEED_100000,
+ OCTEP_PFVF_LINK_SPEED_LAST,
+};
+
+enum octep_pfvf_link_duplex {
+ OCTEP_PFVF_LINK_HALF_DUPLEX,
+ OCTEP_PFVF_LINK_FULL_DUPLEX,
+};
+
+enum octep_pfvf_link_autoneg {
+ OCTEP_PFVF_LINK_AUTONEG,
+ OCTEP_PFVF_LINK_FIXED,
+};
+
+#define OCTEP_PFVF_MBOX_TIMEOUT_MS 500
+#define OCTEP_PFVF_MBOX_MAX_RETRIES 2
+#define OCTEP_PFVF_MBOX_MAX_DATA_SIZE 6
+#define OCTEP_PFVF_MBOX_MORE_FRAG_FLAG 1
+#define OCTEP_PFVF_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1)
+
+union octep_pfvf_mbox_word {
+ u64 u64;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 data:48;
+ } s;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 frag:1;
+ u64 rsvd:5;
+ u8 data[6];
+ } s_data;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 version:48;
+ } s_version;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u8 mac_addr[6];
+ } s_set_mac;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 mtu:48;
+ } s_set_mtu;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 mtu:48;
+ } s_get_mtu;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 state:1;
+ u64 rsvd:53;
+ } s_link_state;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 status:1;
+ u64 rsvd:53;
+ } s_link_status;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 pkind:8;
+ u64 fsz:8;
+ u64 rx_ol_flags:16;
+ u64 tx_ol_flags:16;
+ u64 rsvd:6;
+ } s_fw_info;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:22;
+ u64 rx_ol_flags:16;
+ u64 tx_ol_flags:16;
+ } s_offloads;
+} __packed;
+
+void octep_pfvf_mbox_work(struct work_struct *work);
+int octep_setup_pfvf_mbox(struct octep_device *oct);
+void octep_delete_pfvf_mbox(struct octep_device *oct);
+void octep_pfvf_notify(struct octep_device *oct, struct octep_ctrl_mbox_msg *msg);
+#endif
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
index 0a43983e91..ca473502d7 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -208,6 +208,9 @@
#define CN93_SDP_R_MBOX_PF_VF_INT_START 0x10220
#define CN93_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+#define CN93_SDP_MBOX_VF_PF_DATA_START 0x24000
+#define CN93_SDP_MBOX_PF_VF_DATA_START 0x22000
+
#define CN93_SDP_R_MBOX_PF_VF_DATA(ring) \
(CN93_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_RING_OFFSET))
@@ -217,6 +220,12 @@
#define CN93_SDP_R_MBOX_VF_PF_DATA(ring) \
(CN93_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_RING_OFFSET))
+#define CN93_SDP_MBOX_VF_PF_DATA(ring) \
+ (CN93_SDP_MBOX_VF_PF_DATA_START + ((ring) * CN93_EPVF_RING_OFFSET))
+
+#define CN93_SDP_MBOX_PF_VF_DATA(ring) \
+ (CN93_SDP_MBOX_PF_VF_DATA_START + ((ring) * CN93_EPVF_RING_OFFSET))
+
/* ##################### Interrupt Registers ########################## */
#define CN93_SDP_R_ERR_TYPE_START 0x10400
@@ -362,6 +371,10 @@
#define CN93_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0xFF)
#define CN93_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F)
+#define CN98_SDP_MAC_PF_RING_CTL_NPFS(val) (((val) >> 48) & 0xF)
+#define CN98_SDP_MAC_PF_RING_CTL_SRN(val) ((val) & 0xFF)
+#define CN98_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 32) & 0x3F)
+
/* Number of non-queue interrupts in CN93xx */
#define CN93_NUM_NON_IOQ_INTR 16
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h
new file mode 100644
index 0000000000..e637d7c822
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h
@@ -0,0 +1,416 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_REGS_CNXK_PF_H_
+#define _OCTEP_REGS_CNXK_PF_H_
+
+/* ############################ RST ######################### */
+#define CNXK_RST_BOOT 0x000087E006001600ULL
+#define CNXK_RST_CHIP_DOMAIN_W1S 0x000087E006001810ULL
+#define CNXK_RST_CORE_DOMAIN_W1S 0x000087E006001820ULL
+#define CNXK_RST_CORE_DOMAIN_W1C 0x000087E006001828ULL
+
+#define CNXK_CONFIG_XPANSION_BAR 0x38
+#define CNXK_CONFIG_PCIE_CAP 0x70
+#define CNXK_CONFIG_PCIE_DEVCAP 0x74
+#define CNXK_CONFIG_PCIE_DEVCTL 0x78
+#define CNXK_CONFIG_PCIE_LINKCAP 0x7C
+#define CNXK_CONFIG_PCIE_LINKCTL 0x80
+#define CNXK_CONFIG_PCIE_SLOTCAP 0x84
+#define CNXK_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CNXK_PCIE_SRIOV_FDL 0x188 /* 0x98 */
+#define CNXK_PCIE_SRIOV_FDL_BIT_POS 0x10
+#define CNXK_PCIE_SRIOV_FDL_MASK 0xFF
+
+#define CNXK_CONFIG_PCIE_FLTMSK 0x720
+
+/* ################# Offsets of RING, EPF, MAC ######################### */
+#define CNXK_RING_OFFSET (0x1ULL << 17)
+#define CNXK_EPF_OFFSET (0x1ULL << 25)
+#define CNXK_MAC_OFFSET (0x1ULL << 4)
+#define CNXK_BIT_ARRAY_OFFSET (0x1ULL << 4)
+#define CNXK_EPVF_RING_OFFSET (0x1ULL << 4)
+
+/* ################# Scratch Registers ######################### */
+#define CNXK_SDP_EPF_SCRATCH 0x209E0
+
+/* ################# Window Registers ######################### */
+#define CNXK_SDP_WIN_WR_ADDR64 0x20000
+#define CNXK_SDP_WIN_RD_ADDR64 0x20010
+#define CNXK_SDP_WIN_WR_DATA64 0x20020
+#define CNXK_SDP_WIN_WR_MASK_REG 0x20030
+#define CNXK_SDP_WIN_RD_DATA64 0x20040
+
+#define CNXK_SDP_MAC_NUMBER 0x2C100
+
+/* ################# Global Previliged registers ######################### */
+#define CNXK_SDP_EPF_RINFO 0x209F0
+
+#define CNXK_SDP_EPF_RINFO_SRN(val) ((val) & 0x7F)
+#define CNXK_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF)
+#define CNXK_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) & 0x7F)
+
+/* SDP Function select */
+#define CNXK_SDP_FUNC_SEL_EPF_BIT_POS 7
+#define CNXK_SDP_FUNC_SEL_FUNC_BIT_POS 0
+
+/* ##### RING IN (Into device from PCI: Tx Ring) REGISTERS #### */
+#define CNXK_SDP_R_IN_CONTROL_START 0x10000
+#define CNXK_SDP_R_IN_ENABLE_START 0x10010
+#define CNXK_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CNXK_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CNXK_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CNXK_SDP_R_IN_CNTS_START 0x10050
+#define CNXK_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CNXK_SDP_R_IN_PKT_CNT_START 0x10080
+#define CNXK_SDP_R_IN_BYTE_CNT_START 0x10090
+
+#define CNXK_SDP_R_IN_CONTROL(ring) \
+ (CNXK_SDP_R_IN_CONTROL_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_ENABLE(ring) \
+ (CNXK_SDP_R_IN_ENABLE_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INSTR_BADDR(ring) \
+ (CNXK_SDP_R_IN_INSTR_BADDR_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CNXK_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INSTR_DBELL(ring) \
+ (CNXK_SDP_R_IN_INSTR_DBELL_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_CNTS(ring) \
+ (CNXK_SDP_R_IN_CNTS_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INT_LEVELS(ring) \
+ (CNXK_SDP_R_IN_INT_LEVELS_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_PKT_CNT(ring) \
+ (CNXK_SDP_R_IN_PKT_CNT_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_BYTE_CNT(ring) \
+ (CNXK_SDP_R_IN_BYTE_CNT_START + ((ring) * CNXK_RING_OFFSET))
+
+/* Rings per Virtual Function */
+#define CNXK_R_IN_CTL_RPVF_MASK (0xF)
+#define CNXK_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CNXK_R_IN_CTL_IDLE (0x1ULL << 28)
+#define CNXK_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CNXK_R_IN_CTL_IS_64B (0x1ULL << 24)
+#define CNXK_R_IN_CTL_D_NSR (0x1ULL << 8)
+#define CNXK_R_IN_CTL_D_ESR (0x1ULL << 6)
+#define CNXK_R_IN_CTL_D_ROR (0x1ULL << 5)
+#define CNXK_R_IN_CTL_NSR (0x1ULL << 3)
+#define CNXK_R_IN_CTL_ESR (0x1ULL << 1)
+#define CNXK_R_IN_CTL_ROR (0x1ULL << 0)
+
+#define CNXK_R_IN_CTL_MASK (CNXK_R_IN_CTL_RDSIZE | CNXK_R_IN_CTL_IS_64B)
+
+/* ##### RING OUT (out from device to PCI host: Rx Ring) REGISTERS #### */
+#define CNXK_SDP_R_OUT_CNTS_START 0x10100
+#define CNXK_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CNXK_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CNXK_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CNXK_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CNXK_SDP_R_OUT_CONTROL_START 0x10150
+#define CNXK_SDP_R_OUT_WMARK_START 0x10160
+#define CNXK_SDP_R_OUT_ENABLE_START 0x10170
+#define CNXK_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CNXK_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CNXK_SDP_R_OUT_CONTROL(ring) \
+ (CNXK_SDP_R_OUT_CONTROL_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_ENABLE(ring) \
+ (CNXK_SDP_R_OUT_ENABLE_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CNXK_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CNXK_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CNXK_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_WMARK(ring) \
+ (CNXK_SDP_R_OUT_WMARK_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_CNTS(ring) \
+ (CNXK_SDP_R_OUT_CNTS_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_INT_LEVELS(ring) \
+ (CNXK_SDP_R_OUT_INT_LEVELS_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_PKT_CNT(ring) \
+ (CNXK_SDP_R_OUT_PKT_CNT_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_BYTE_CNT(ring) \
+ (CNXK_SDP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CNXK_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CNXK_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CNXK_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CNXK_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CNXK_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CNXK_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CNXK_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CNXK_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CNXK_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CNXK_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CNXK_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CNXK_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CNXK_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ############### Interrupt Moderation Registers ############### */
+#define CNXK_SDP_R_IN_INT_MDRT_CTL0_START 0x10280
+#define CNXK_SDP_R_IN_INT_MDRT_CTL1_START 0x102A0
+#define CNXK_SDP_R_IN_INT_MDRT_DBG_START 0x102C0
+
+#define CNXK_SDP_R_OUT_INT_MDRT_CTL0_START 0x10380
+#define CNXK_SDP_R_OUT_INT_MDRT_CTL1_START 0x103A0
+#define CNXK_SDP_R_OUT_INT_MDRT_DBG_START 0x103C0
+
+#define CNXK_SDP_R_MBOX_ISM_START 0x10500
+#define CNXK_SDP_R_OUT_CNTS_ISM_START 0x10510
+#define CNXK_SDP_R_IN_CNTS_ISM_START 0x10520
+
+#define CNXK_SDP_R_IN_INT_MDRT_CTL0(ring) \
+ (CNXK_SDP_R_IN_INT_MDRT_CTL0_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INT_MDRT_CTL1(ring) \
+ (CNXK_SDP_R_IN_INT_MDRT_CTL1_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INT_MDRT_DBG(ring) \
+ (CNXK_SDP_R_IN_INT_MDRT_DBG_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_INT_MDRT_CTL0(ring) \
+ (CNXK_SDP_R_OUT_INT_MDRT_CTL0_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_INT_MDRT_CTL1(ring) \
+ (CNXK_SDP_R_OUT_INT_MDRT_CTL1_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_INT_MDRT_DBG(ring) \
+ (CNXK_SDP_R_OUT_INT_MDRT_DBG_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_MBOX_ISM(ring) \
+ (CNXK_SDP_R_MBOX_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_CNTS_ISM(ring) \
+ (CNXK_SDP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_CNTS_ISM(ring) \
+ (CNXK_SDP_R_IN_CNTS_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+/* ##################### Mail Box Registers ########################## */
+/* INT register for VF. when a MBOX write from PF happed to a VF,
+ * corresponding bit will be set in this register as well as in
+ * PF_VF_INT register.
+ *
+ * This is a RO register, the int can be cleared by writing 1 to PF_VF_INT
+ */
+/* Basically first 3 are from PF to VF. The last one is data from VF to PF */
+#define CNXK_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+#define CNXK_SDP_R_MBOX_PF_VF_INT_START 0x10220
+#define CNXK_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CNXK_SDP_MBOX_VF_PF_DATA_START 0x24000
+#define CNXK_SDP_MBOX_PF_VF_DATA_START 0x22000
+
+#define CNXK_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CNXK_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CNXK_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CNXK_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_MBOX_VF_PF_DATA(ring) \
+ (CNXK_SDP_MBOX_VF_PF_DATA_START + ((ring) * CNXK_EPVF_RING_OFFSET))
+
+#define CNXK_SDP_MBOX_PF_VF_DATA(ring) \
+ (CNXK_SDP_MBOX_PF_VF_DATA_START + ((ring) * CNXK_EPVF_RING_OFFSET))
+
+/* ##################### Interrupt Registers ########################## */
+#define CNXK_SDP_R_ERR_TYPE_START 0x10400
+
+#define CNXK_SDP_R_ERR_TYPE(ring) \
+ (CNXK_SDP_R_ERR_TYPE_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_MBOX_ISM_START 0x10500
+#define CNXK_SDP_R_OUT_CNTS_ISM_START 0x10510
+#define CNXK_SDP_R_IN_CNTS_ISM_START 0x10520
+
+#define CNXK_SDP_R_MBOX_ISM(ring) \
+ (CNXK_SDP_R_MBOX_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_CNTS_ISM(ring) \
+ (CNXK_SDP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_CNTS_ISM(ring) \
+ (CNXK_SDP_R_IN_CNTS_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_EPF_MBOX_RINT_START 0x20100
+#define CNXK_SDP_EPF_MBOX_RINT_W1S_START 0x20120
+#define CNXK_SDP_EPF_MBOX_RINT_ENA_W1C_START 0x20140
+#define CNXK_SDP_EPF_MBOX_RINT_ENA_W1S_START 0x20160
+
+#define CNXK_SDP_EPF_VFIRE_RINT_START 0x20180
+#define CNXK_SDP_EPF_VFIRE_RINT_W1S_START 0x201A0
+#define CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C_START 0x201C0
+#define CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S_START 0x201E0
+
+#define CNXK_SDP_EPF_IRERR_RINT 0x20200
+#define CNXK_SDP_EPF_IRERR_RINT_W1S 0x20210
+#define CNXK_SDP_EPF_IRERR_RINT_ENA_W1C 0x20220
+#define CNXK_SDP_EPF_IRERR_RINT_ENA_W1S 0x20230
+
+#define CNXK_SDP_EPF_VFORE_RINT_START 0x20240
+#define CNXK_SDP_EPF_VFORE_RINT_W1S_START 0x20260
+#define CNXK_SDP_EPF_VFORE_RINT_ENA_W1C_START 0x20280
+#define CNXK_SDP_EPF_VFORE_RINT_ENA_W1S_START 0x202A0
+
+#define CNXK_SDP_EPF_ORERR_RINT 0x20320
+#define CNXK_SDP_EPF_ORERR_RINT_W1S 0x20330
+#define CNXK_SDP_EPF_ORERR_RINT_ENA_W1C 0x20340
+#define CNXK_SDP_EPF_ORERR_RINT_ENA_W1S 0x20350
+
+#define CNXK_SDP_EPF_OEI_RINT 0x20400
+#define CNXK_SDP_EPF_OEI_RINT_W1S 0x20500
+#define CNXK_SDP_EPF_OEI_RINT_ENA_W1C 0x20600
+#define CNXK_SDP_EPF_OEI_RINT_ENA_W1S 0x20700
+
+#define CNXK_SDP_EPF_DMA_RINT 0x20800
+#define CNXK_SDP_EPF_DMA_RINT_W1S 0x20810
+#define CNXK_SDP_EPF_DMA_RINT_ENA_W1C 0x20820
+#define CNXK_SDP_EPF_DMA_RINT_ENA_W1S 0x20830
+
+#define CNXK_SDP_EPF_DMA_INT_LEVEL_START 0x20840
+#define CNXK_SDP_EPF_DMA_CNT_START 0x20860
+#define CNXK_SDP_EPF_DMA_TIM_START 0x20880
+
+#define CNXK_SDP_EPF_MISC_RINT 0x208A0
+#define CNXK_SDP_EPF_MISC_RINT_W1S 0x208B0
+#define CNXK_SDP_EPF_MISC_RINT_ENA_W1C 0x208C0
+#define CNXK_SDP_EPF_MISC_RINT_ENA_W1S 0x208D0
+
+#define CNXK_SDP_EPF_DMA_VF_RINT_START 0x208E0
+#define CNXK_SDP_EPF_DMA_VF_RINT_W1S_START 0x20900
+#define CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C_START 0x20920
+#define CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S_START 0x20940
+
+#define CNXK_SDP_EPF_PP_VF_RINT_START 0x20960
+#define CNXK_SDP_EPF_PP_VF_RINT_W1S_START 0x20980
+#define CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C_START 0x209A0
+#define CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S_START 0x209C0
+
+#define CNXK_SDP_EPF_MBOX_RINT(index) \
+ (CNXK_SDP_EPF_MBOX_RINT_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_MBOX_RINT_W1S(index) \
+ (CNXK_SDP_EPF_MBOX_RINT_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_MBOX_RINT_ENA_W1C(index) \
+ (CNXK_SDP_EPF_MBOX_RINT_ENA_W1C_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_MBOX_RINT_ENA_W1S(index) \
+ (CNXK_SDP_EPF_MBOX_RINT_ENA_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+
+#define CNXK_SDP_EPF_VFIRE_RINT(index) \
+ (CNXK_SDP_EPF_VFIRE_RINT_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFIRE_RINT_W1S(index) \
+ (CNXK_SDP_EPF_VFIRE_RINT_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C(index) \
+ (CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S(index) \
+ (CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+
+#define CNXK_SDP_EPF_VFORE_RINT(index) \
+ (CNXK_SDP_EPF_VFORE_RINT_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFORE_RINT_W1S(index) \
+ (CNXK_SDP_EPF_VFORE_RINT_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFORE_RINT_ENA_W1C(index) \
+ (CNXK_SDP_EPF_VFORE_RINT_ENA_W1C_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFORE_RINT_ENA_W1S(index) \
+ (CNXK_SDP_EPF_VFORE_RINT_ENA_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+
+#define CNXK_SDP_EPF_DMA_VF_RINT(index) \
+ (CNXK_SDP_EPF_DMA_VF_RINT_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_DMA_VF_RINT_W1S(index) \
+ (CNXK_SDP_EPF_DMA_VF_RINT_W1S_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C(index) \
+ (CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S(index) \
+ (CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+
+#define CNXK_SDP_EPF_PP_VF_RINT(index) \
+ (CNXK_SDP_EPF_PP_VF_RINT_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_PP_VF_RINT_W1S(index) \
+ (CNXK_SDP_EPF_PP_VF_RINT_W1S_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C(index) \
+ (CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S(index) \
+ (CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+
+/*------------------ Interrupt Masks ----------------*/
+#define CNXK_INTR_R_SEND_ISM BIT_ULL(63)
+#define CNXK_INTR_R_OUT_INT BIT_ULL(62)
+#define CNXK_INTR_R_IN_INT BIT_ULL(61)
+#define CNXK_INTR_R_MBOX_INT BIT_ULL(60)
+#define CNXK_INTR_R_RESEND BIT_ULL(59)
+#define CNXK_INTR_R_CLR_TIM BIT_ULL(58)
+
+/* ####################### Ring Mapping Registers ################################## */
+#define CNXK_SDP_EPVF_RING_START 0x26000
+#define CNXK_SDP_IN_RING_TB_MAP_START 0x28000
+#define CNXK_SDP_IN_RATE_LIMIT_START 0x2A000
+#define CNXK_SDP_MAC_PF_RING_CTL_START 0x2C000
+
+#define CNXK_SDP_EPVF_RING(ring) \
+ (CNXK_SDP_EPVF_RING_START + ((ring) * CNXK_EPVF_RING_OFFSET))
+#define CNXK_SDP_IN_RING_TB_MAP(ring) \
+ (CNXK_SDP_N_RING_TB_MAP_START + ((ring) * CNXK_EPVF_RING_OFFSET))
+#define CNXK_SDP_IN_RATE_LIMIT(ring) \
+ (CNXK_SDP_IN_RATE_LIMIT_START + ((ring) * CNXK_EPVF_RING_OFFSET))
+#define CNXK_SDP_MAC_PF_RING_CTL(mac) \
+ (CNXK_SDP_MAC_PF_RING_CTL_START + ((mac) * CNXK_MAC_OFFSET))
+
+#define CNXK_SDP_MAC_PF_RING_CTL_NPFS(val) ((val) & 0x3)
+#define CNXK_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0x7F)
+#define CNXK_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F)
+
+/* Number of non-queue interrupts in CNXKxx */
+#define CNXK_NUM_NON_IOQ_INTR 32
+
+/* bit 0 for control mbox interrupt */
+#define CNXK_SDP_EPF_OEI_RINT_DATA_BIT_MBOX BIT_ULL(0)
+/* bit 1 for firmware heartbeat interrupt */
+#define CNXK_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1)
+#define FW_STATUS_RUNNING 2ULL
+#define CNXK_PEMX_PFX_CSX_PFCFGX(pem, pf, offset) ({ typeof(offset) _off = (offset); \
+ ((0x8e0000008000 | \
+ (uint64_t)(pem) << 36 \
+ | (pf) << 18 \
+ | ((_off >> 16) & 1) << 16 \
+ | (_off >> 3) << 3) \
+ + (((_off >> 2) & 1) << 2)); \
+ })
+
+/* Register defines for use with CNXK_PEMX_PFX_CSX_PFCFGX */
+#define CNXK_PCIEEP_VSECST_CTL 0x418
+
+#define CNXK_PEM_BAR4_INDEX 7
+#define CNXK_PEM_BAR4_INDEX_SIZE 0x400000ULL
+#define CNXK_PEM_BAR4_INDEX_OFFSET (CNXK_PEM_BAR4_INDEX * CNXK_PEM_BAR4_INDEX_SIZE)
+
+#endif /* _OCTEP_REGS_CNXK_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index 3c43f80785..4746a6b258 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -143,7 +143,7 @@ static int octep_setup_oq(struct octep_device *oct, int q_no)
* additional header is filled-in by Octeon after length field in
* Rx packets. this header contains additional packet information.
*/
- if (oct->caps_enabled)
+ if (oct->conf->fw_info.rx_ol_flags)
oq->max_single_buffer_size -= OCTEP_OQ_RESP_HW_EXT_SIZE;
oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
@@ -353,11 +353,13 @@ static int __octep_oq_process_rx(struct octep_device *oct,
struct octep_oq *oq, u16 pkts_to_process)
{
struct octep_oq_resp_hw_ext *resp_hw_ext = NULL;
+ netdev_features_t feat = oq->netdev->features;
struct octep_rx_buffer *buff_info;
struct octep_oq_resp_hw *resp_hw;
u32 pkt, rx_bytes, desc_used;
struct sk_buff *skb;
u16 data_offset;
+ u16 rx_ol_flags;
u32 read_idx;
read_idx = oq->host_read_idx;
@@ -372,7 +374,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
/* Swap the length field that is in Big-Endian to CPU */
buff_info->len = be64_to_cpu(resp_hw->length);
- if (oct->caps_enabled & OCTEP_CAP_RX_CHECKSUM) {
+ if (oct->conf->fw_info.rx_ol_flags) {
/* Extended response header is immediately after
* response header (resp_hw)
*/
@@ -384,11 +386,13 @@ static int __octep_oq_process_rx(struct octep_device *oct,
*/
data_offset = OCTEP_OQ_RESP_HW_SIZE +
OCTEP_OQ_RESP_HW_EXT_SIZE;
+ rx_ol_flags = resp_hw_ext->rx_ol_flags;
} else {
/* Data is immediately after
* Hardware Rx response header.
*/
data_offset = OCTEP_OQ_RESP_HW_SIZE;
+ rx_ol_flags = 0;
}
rx_bytes += buff_info->len;
@@ -444,8 +448,8 @@ static int __octep_oq_process_rx(struct octep_device *oct,
skb->dev = oq->netdev;
skb->protocol = eth_type_trans(skb, skb->dev);
- if (resp_hw_ext &&
- resp_hw_ext->csum_verified == OCTEP_CSUM_VERIFIED)
+ if (feat & NETIF_F_RXCSUM &&
+ OCTEP_RX_CSUM_VERIFIED(rx_ol_flags))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
index 49feae80d7..3b08e2d560 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
@@ -20,13 +20,33 @@ struct octep_oq_desc_hw {
dma_addr_t buffer_ptr;
u64 info_ptr;
};
+
static_assert(sizeof(struct octep_oq_desc_hw) == 16);
#define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw))
-#define OCTEP_CSUM_L4_VERIFIED 0x1
-#define OCTEP_CSUM_IP_VERIFIED 0x2
-#define OCTEP_CSUM_VERIFIED (OCTEP_CSUM_L4_VERIFIED | OCTEP_CSUM_IP_VERIFIED)
+/* Rx offload flags */
+#define OCTEP_RX_OFFLOAD_VLAN_STRIP BIT(0)
+#define OCTEP_RX_OFFLOAD_IPV4_CKSUM BIT(1)
+#define OCTEP_RX_OFFLOAD_UDP_CKSUM BIT(2)
+#define OCTEP_RX_OFFLOAD_TCP_CKSUM BIT(3)
+
+#define OCTEP_RX_OFFLOAD_CKSUM (OCTEP_RX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_RX_OFFLOAD_UDP_CKSUM | \
+ OCTEP_RX_OFFLOAD_TCP_CKSUM)
+
+#define OCTEP_RX_IP_CSUM(flags) ((flags) & \
+ (OCTEP_RX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_RX_OFFLOAD_TCP_CKSUM | \
+ OCTEP_RX_OFFLOAD_UDP_CKSUM))
+
+/* bit 0 is vlan strip */
+#define OCTEP_RX_CSUM_IP_VERIFIED BIT(1)
+#define OCTEP_RX_CSUM_L4_VERIFIED BIT(2)
+
+#define OCTEP_RX_CSUM_VERIFIED(flags) ((flags) & \
+ (OCTEP_RX_CSUM_L4_VERIFIED | \
+ OCTEP_RX_CSUM_IP_VERIFIED))
/* Extended Response Header in packet data received from Hardware.
* Includes metadata like checksum status.
@@ -35,11 +55,12 @@ static_assert(sizeof(struct octep_oq_desc_hw) == 16);
*/
struct octep_oq_resp_hw_ext {
/* Reserved. */
- u64 reserved:62;
+ u64 rsvd:48;
- /* checksum verified. */
- u64 csum_verified:2;
+ /* offload flags */
+ u16 rx_ol_flags;
};
+
static_assert(sizeof(struct octep_oq_resp_hw_ext) == 8);
#define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext))
@@ -52,6 +73,7 @@ struct octep_oq_resp_hw {
/* The Length of the packet. */
__be64 length;
};
+
static_assert(sizeof(struct octep_oq_resp_hw) == 8);
#define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw))
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
index d0adb82d65..06851b78aa 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -21,7 +21,6 @@ static void octep_iq_reset_indices(struct octep_iq *iq)
iq->flush_index = 0;
iq->pkts_processed = 0;
iq->pkt_in_done = 0;
- atomic_set(&iq->instr_pending, 0);
}
/**
@@ -82,7 +81,6 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
}
iq->pkts_processed += compl_pkts;
- atomic_sub(compl_pkts, &iq->instr_pending);
iq->stats.instr_completed += compl_pkts;
iq->stats.bytes_sent += compl_bytes;
iq->stats.sgentry_sent += compl_sg;
@@ -91,7 +89,7 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) &&
- ((iq->max_count - atomic_read(&iq->instr_pending)) >
+ (IQ_INSTR_SPACE(iq) >
OCTEP_WAKE_QUEUE_THRESHOLD))
netif_wake_subqueue(iq->netdev, iq->q_no);
return !budget;
@@ -144,7 +142,6 @@ static void octep_iq_free_pending(struct octep_iq *iq)
dev_kfree_skb_any(skb);
}
- atomic_set(&iq->instr_pending, 0);
iq->flush_index = fi;
netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
index 86c98b13fc..875a2c3409 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -36,6 +36,7 @@ struct octep_tx_sglist_desc {
u16 len[4];
dma_addr_t dma_ptr[4];
};
+
static_assert(sizeof(struct octep_tx_sglist_desc) == 40);
/* Each Scatter/Gather entry sent to hardwar hold four pointers.
@@ -60,6 +61,18 @@ struct octep_tx_buffer {
/* Hardware interface Tx statistics */
struct octep_iface_tx_stats {
+ /* Total frames sent on the interface */
+ u64 pkts;
+
+ /* Total octets sent on the interface */
+ u64 octs;
+
+ /* Packets sent to a broadcast DMAC */
+ u64 bcst;
+
+ /* Packets sent to the multicast DMAC */
+ u64 mcst;
+
/* Packets dropped due to excessive collisions */
u64 xscol;
@@ -76,12 +89,6 @@ struct octep_iface_tx_stats {
*/
u64 scol;
- /* Total octets sent on the interface */
- u64 octs;
-
- /* Total frames sent on the interface */
- u64 pkts;
-
/* Packets sent with an octet count < 64 */
u64 hist_lt64;
@@ -106,12 +113,6 @@ struct octep_iface_tx_stats {
/* Packets sent with an octet count of > 1518 */
u64 hist_gt1518;
- /* Packets sent to a broadcast DMAC */
- u64 bcst;
-
- /* Packets sent to the multicast DMAC */
- u64 mcst;
-
/* Packets sent that experienced a transmit underflow and were
* truncated
*/
@@ -172,9 +173,6 @@ struct octep_iq {
/* Statistics for this input queue. */
struct octep_iq_stats stats;
- /* This field keeps track of the instructions pending in this queue. */
- atomic_t instr_pending;
-
/* Pointer to the Virtual Base addr of the input ring. */
struct octep_tx_desc_hw *desc_ring;
@@ -240,32 +238,53 @@ struct octep_instr_hdr {
/* Reserved3 */
u64 reserved3:1;
};
+
static_assert(sizeof(struct octep_instr_hdr) == 8);
-/* Hardware Tx completion response header */
-struct octep_instr_resp_hdr {
- /* Request ID */
- u64 rid:16;
+/* Tx offload flags */
+#define OCTEP_TX_OFFLOAD_VLAN_INSERT BIT(0)
+#define OCTEP_TX_OFFLOAD_IPV4_CKSUM BIT(1)
+#define OCTEP_TX_OFFLOAD_UDP_CKSUM BIT(2)
+#define OCTEP_TX_OFFLOAD_TCP_CKSUM BIT(3)
+#define OCTEP_TX_OFFLOAD_SCTP_CKSUM BIT(4)
+#define OCTEP_TX_OFFLOAD_TCP_TSO BIT(5)
+#define OCTEP_TX_OFFLOAD_UDP_TSO BIT(6)
+
+#define OCTEP_TX_OFFLOAD_CKSUM (OCTEP_TX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_TX_OFFLOAD_UDP_CKSUM | \
+ OCTEP_TX_OFFLOAD_TCP_CKSUM)
+
+#define OCTEP_TX_OFFLOAD_TSO (OCTEP_TX_OFFLOAD_TCP_TSO | \
+ OCTEP_TX_OFFLOAD_UDP_TSO)
+
+#define OCTEP_TX_IP_CSUM(flags) ((flags) & \
+ (OCTEP_TX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_TX_OFFLOAD_TCP_CKSUM | \
+ OCTEP_TX_OFFLOAD_UDP_CKSUM))
- /* PCIe port to use for response */
- u64 pcie_port:3;
+#define OCTEP_TX_TSO(flags) ((flags) & \
+ (OCTEP_TX_OFFLOAD_TCP_TSO | \
+ OCTEP_TX_OFFLOAD_UDP_TSO))
- /* Scatter indicator 1=scatter */
- u64 scatter:1;
+struct tx_mdata {
- /* Size of Expected result OR no. of entries in scatter list */
- u64 rlenssz:14;
+ /* offload flags */
+ u16 ol_flags;
- /* Desired destination port for result */
- u64 dport:6;
+ /* gso size */
+ u16 gso_size;
- /* Opcode Specific parameters */
- u64 param:8;
+ /* gso flags */
+ u16 gso_segs;
- /* Opcode for the return packet */
- u64 opcode:16;
+ /* reserved */
+ u16 rsvd1;
+
+ /* reserved */
+ u64 rsvd2;
};
-static_assert(sizeof(struct octep_instr_hdr) == 8);
+
+static_assert(sizeof(struct tx_mdata) == 16);
/* 64-byte Tx instruction format.
* Format of instruction for a 64-byte mode input queue.
@@ -284,18 +303,14 @@ struct octep_tx_desc_hw {
struct octep_instr_hdr ih;
u64 ih64;
};
-
- /* Pointer where the response for a RAW mode packet will be written
- * by Octeon.
- */
- u64 rptr;
-
- /* Input Instruction Response Header. */
- struct octep_instr_resp_hdr irh;
-
+ union {
+ u64 txm64[2];
+ struct tx_mdata txm;
+ };
/* Additional headers available in a 64-byte instruction. */
- u64 exhdr[4];
+ u64 exthdr[4];
};
+
static_assert(sizeof(struct octep_tx_desc_hw) == 64);
#define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw))
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 3c0f55b3e4..b86f3224f0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -808,6 +808,11 @@ static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cfg |= rx_pause ? CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 7d741e3ba8..1e5aa53975 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -452,4 +452,5 @@ const char *otx2_mbox_id2name(u16 id)
EXPORT_SYMBOL(otx2_mbox_id2name);
MODULE_AUTHOR("Marvell.");
+MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index bd4b9661ee..98e203a0e2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -310,6 +310,13 @@ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
nix_bandprof_get_hwinfo_rsp) \
M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
msg_req, nix_inline_ipsec_cfg) \
+M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \
+ nix_mcast_grp_create_rsp) \
+M(NIX_MCAST_GRP_DESTROY, 0x802c, nix_mcast_grp_destroy, nix_mcast_grp_destroy_req, \
+ msg_rsp) \
+M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \
+ nix_mcast_grp_update_req, \
+ nix_mcast_grp_update_rsp) \
/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
mcs_alloc_rsrc_rsp) \
@@ -836,6 +843,9 @@ enum nix_af_status {
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
NIX_AF_ERR_LINK_CREDITS = -431,
+ NIX_AF_ERR_INVALID_MCAST_GRP = -436,
+ NIX_AF_ERR_INVALID_MCAST_DEL_REQ = -437,
+ NIX_AF_ERR_NON_CONTIG_MCE_LIST = -438,
};
/* For NIX RX vtag action */
@@ -1210,6 +1220,68 @@ struct nix_bp_cfg_rsp {
u8 chan_cnt; /* Number of channel for which bpids are assigned */
};
+struct nix_mcast_grp_create_req {
+ struct mbox_msghdr hdr;
+#define NIX_MCAST_INGRESS 0
+#define NIX_MCAST_EGRESS 1
+ u8 dir;
+ u8 reserved[11];
+ /* Reserving few bytes for future requirement */
+};
+
+struct nix_mcast_grp_create_rsp {
+ struct mbox_msghdr hdr;
+ /* This mcast_grp_idx should be passed during MCAM
+ * write entry for multicast. AF will identify the
+ * corresponding multicast table index associated
+ * with the group id and program the same to MCAM entry.
+ * This group id is also needed during group delete
+ * and update request.
+ */
+ u32 mcast_grp_idx;
+};
+
+struct nix_mcast_grp_destroy_req {
+ struct mbox_msghdr hdr;
+ /* Group id returned by nix_mcast_grp_create_rsp */
+ u32 mcast_grp_idx;
+ /* If AF is requesting for destroy, then set
+ * it to '1'. Otherwise keep it to '0'
+ */
+ u8 is_af;
+};
+
+struct nix_mcast_grp_update_req {
+ struct mbox_msghdr hdr;
+ /* Group id returned by nix_mcast_grp_create_rsp */
+ u32 mcast_grp_idx;
+ /* Number of multicast/mirror entries requested */
+ u32 num_mce_entry;
+#define NIX_MCE_ENTRY_MAX 64
+#define NIX_RX_RQ 0
+#define NIX_RX_RSS 1
+ /* Receive queue or RSS index within pf_func */
+ u32 rq_rss_index[NIX_MCE_ENTRY_MAX];
+ /* pcifunc is required for both ingress and egress multicast */
+ u16 pcifunc[NIX_MCE_ENTRY_MAX];
+ /* channel is required for egress multicast */
+ u16 channel[NIX_MCE_ENTRY_MAX];
+#define NIX_MCAST_OP_ADD_ENTRY 0
+#define NIX_MCAST_OP_DEL_ENTRY 1
+ /* Destination type. 0:Receive queue, 1:RSS*/
+ u8 dest_type[NIX_MCE_ENTRY_MAX];
+ u8 op;
+ /* If AF is requesting for update, then set
+ * it to '1'. Otherwise keep it to '0'
+ */
+ u8 is_af;
+};
+
+struct nix_mcast_grp_update_rsp {
+ struct mbox_msghdr hdr;
+ u32 mce_start_index;
+};
+
/* Global NIX inline IPSec configuration */
struct nix_inline_ipsec_cfg {
struct mbox_msghdr hdr;
@@ -1485,6 +1557,8 @@ struct flow_msg {
#define OTX2_FLOWER_MASK_MPLS_TTL GENMASK(7, 0)
#define OTX2_FLOWER_MASK_MPLS_NON_TTL GENMASK(31, 8)
u32 mpls_lse[4];
+ u8 icmp_type;
+ u8 icmp_code;
};
struct npc_install_flow_req {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 8c0732c9a7..b0b4dea548 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -214,6 +214,8 @@ enum key_fields {
NPC_MPLS3_TTL,
NPC_MPLS4_LBTCBOS,
NPC_MPLS4_TTL,
+ NPC_TYPE_ICMP,
+ NPC_CODE_ICMP,
NPC_HEADER_FIELDS_MAX,
NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
NPC_PF_FUNC, /* Valid when Tx */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 32645aefd5..6a911ea0cf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -156,7 +156,7 @@ int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
return start;
}
-static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
+void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
{
if (!rsrc->bmap)
return;
@@ -935,6 +935,9 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
hw->total_vfs = (cfg >> 20) & 0xFFF;
hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
+ if (!is_rvu_otx2(rvu))
+ rvu_apr_block_cn10k_init(rvu);
+
/* Init NPA LF's bitmap */
block = &hw->block[BLKADDR_NPA];
if (!block->implemented)
@@ -2633,6 +2636,10 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* 2. Flush and reset SSO/SSOW
* 3. Cleanup pools (NPA)
*/
+
+ /* Free multicast/mirror node associated with the 'pcifunc' */
+ rvu_nix_mcast_flr_free_entries(rvu, pcifunc);
+
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 185c296eaa..d44a400e1b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -116,11 +116,12 @@ struct rvu_block {
};
struct nix_mcast {
- struct qmem *mce_ctx;
- struct qmem *mcast_buf;
- int replay_pkind;
- int next_free_mce;
- struct mutex mce_lock; /* Serialize MCE updates */
+ struct qmem *mce_ctx;
+ struct qmem *mcast_buf;
+ int replay_pkind;
+ struct rsrc_bmap mce_counter[2];
+ /* Counters for both ingress and egress mcast lists */
+ struct mutex mce_lock; /* Serialize MCE updates */
};
struct nix_mce_list {
@@ -129,6 +130,23 @@ struct nix_mce_list {
int max;
};
+struct nix_mcast_grp_elem {
+ struct nix_mce_list mcast_mce_list;
+ u32 mcast_grp_idx;
+ u32 pcifunc;
+ int mcam_index;
+ int mce_start_index;
+ struct list_head list;
+ u8 dir;
+};
+
+struct nix_mcast_grp {
+ struct list_head mcast_grp_head;
+ int count;
+ int next_grp_index;
+ struct mutex mcast_grp_lock; /* Serialize MCE updates */
+};
+
/* layer metadata to uniquely identify a packet header field */
struct npc_layer_mdata {
u8 lid;
@@ -339,6 +357,7 @@ struct nix_hw {
struct rvu *rvu;
struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
struct nix_mcast mcast;
+ struct nix_mcast_grp mcast_grp;
struct nix_flowkey flowkey;
struct nix_mark_format mark_format;
struct nix_lso lso;
@@ -744,6 +763,7 @@ void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
+void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
int rvu_get_pf(u16 pcifunc);
@@ -850,6 +870,11 @@ u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu);
u32 convert_bytes_to_dwrr_mtu(u32 bytes);
void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
struct nix_txsch *txsch, bool enable);
+void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc);
+int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc,
+ u32 mcast_grp_idx);
+int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
+ u32 mcast_grp_idx, u16 mcam_index);
/* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu);
@@ -898,6 +923,10 @@ void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
void npc_mcam_disable_flows(struct rvu *rvu, u16 target);
void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, bool enable);
+u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index);
+void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, u64 cfg);
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, struct mcam_entry *entry,
u8 *intf, u8 *ena);
@@ -923,6 +952,8 @@ int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
u64 bcast_mcast_val, u64 bcast_mcast_mask);
void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx);
bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf);
+int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr);
+void npc_mcam_rsrcs_deinit(struct rvu *rvu);
/* CPT APIs */
int rvu_cpt_register_interrupts(struct rvu *rvu);
@@ -944,6 +975,7 @@ void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw);
/* CN10K RVU - LMT*/
void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
+void rvu_apr_block_cn10k_init(struct rvu *rvu);
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 72e060cf6b..e9bf9231b0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -160,6 +160,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
continue;
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ if (iter >= MAX_LMAC_COUNT)
+ continue;
lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
iter);
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 0e74c5a223..7fa98aeb36 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -559,3 +559,12 @@ void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
cfg |= BIT_ULL(1) | BIT_ULL(2);
rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg);
}
+
+void rvu_apr_block_cn10k_init(struct rvu *rvu)
+{
+ u64 reg;
+
+ reg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ reg |= FIELD_PREP(LMTST_THROTTLE_MASK, LMTST_WR_PEND_MAX);
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CFG, reg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index bd817ee887..e6d7914ce6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1825,6 +1825,8 @@ static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
@@ -1836,6 +1838,16 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
cq_ctx->bpid, cq_ctx->bp_ena);
+ if (!is_rvu_otx2(rvu)) {
+ seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high);
+ seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med);
+ seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low);
+ seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n",
+ cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 |
+ cq_ctx->lbpid_low);
+ seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena);
+ }
+
seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
cq_ctx->update_time, cq_ctx->avg_level);
seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
@@ -1847,6 +1859,11 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
cq_ctx->qsize, cq_ctx->caching);
seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
cq_ctx->substream, cq_ctx->ena);
+ if (!is_rvu_otx2(rvu)) {
+ seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac);
+ seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n",
+ cq_ctx->cpt_drop_err_en);
+ }
seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
cq_ctx->drop_ena, cq_ctx->drop);
seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
@@ -2889,6 +2906,14 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
rule->mask.mpls_lse[3]);
break;
+ case NPC_TYPE_ICMP:
+ seq_printf(s, "%d ", rule->packet.icmp_type);
+ seq_printf(s, "mask 0x%x\n", rule->mask.icmp_type);
+ break;
+ case NPC_CODE_ICMP:
+ seq_printf(s, "%d ", rule->packet.icmp_code);
+ seq_printf(s, "mask 0x%x\n", rule->mask.icmp_code);
+ break;
default:
seq_puts(s, "\n");
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 21b5d71c1e..96c04f7d93 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -5,7 +5,7 @@
*
*/
-#include<linux/bitfield.h>
+#include <linux/bitfield.h>
#include "rvu.h"
#include "rvu_reg.h"
@@ -1235,8 +1235,9 @@ static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
enum rvu_af_dl_param_id {
RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
- RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
+ RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+ RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
};
static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
@@ -1354,12 +1355,97 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink
return 0;
}
+static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+
+ ctx->val.vu16 = (u16)rvu_get_nixlf_count(rvu);
+
+ return 0;
+}
+
+static int rvu_af_dl_nix_maxlf_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_block *block;
+ int blkaddr = 0;
+
+ npc_mcam_rsrcs_deinit(rvu);
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ block = &rvu->hw->block[blkaddr];
+ block->lf.max = ctx->val.vu16;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ npc_mcam_rsrcs_init(rvu, blkaddr);
+
+ return 0;
+}
+
+static int rvu_af_dl_nix_maxlf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u16 max_nix0_lf, max_nix1_lf;
+ struct npc_mcam *mcam;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
+ max_nix0_lf = cfg & 0xFFF;
+ cfg = rvu_read64(rvu, BLKADDR_NIX1, NIX_AF_CONST2);
+ max_nix1_lf = cfg & 0xFFF;
+
+ /* Do not allow user to modify maximum NIX LFs while mcam entries
+ * have already been assigned.
+ */
+ mcam = &rvu->hw->mcam;
+ if (mcam->bmap_fcnt < mcam->bmap_entries) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "mcam entries have already been assigned, can't resize");
+ return -EPERM;
+ }
+
+ if (max_nix0_lf && val.vu16 > max_nix0_lf) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "requested nixlf is greater than the max supported nix0_lf");
+ return -EPERM;
+ }
+
+ if (max_nix1_lf && val.vu16 > max_nix1_lf) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "requested nixlf is greater than the max supported nix1_lf");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct devlink_param rvu_af_dl_params[] = {
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
"dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
rvu_af_dl_dwrr_mtu_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
+ "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_npc_mcam_high_zone_percent_get,
+ rvu_af_dl_npc_mcam_high_zone_percent_set,
+ rvu_af_dl_npc_mcam_high_zone_percent_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
+ "nix_maxlf", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_nix_maxlf_get,
+ rvu_af_dl_nix_maxlf_set,
+ rvu_af_dl_nix_maxlf_validate),
};
static const struct devlink_param rvu_af_dl_param_exact_match[] = {
@@ -1369,12 +1455,6 @@ static const struct devlink_param rvu_af_dl_param_exact_match[] = {
rvu_af_npc_exact_feature_get,
rvu_af_npc_exact_feature_disable,
rvu_af_npc_exact_feature_validate),
- DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
- "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- rvu_af_dl_npc_mcam_high_zone_percent_get,
- rvu_af_dl_npc_mcam_high_zone_percent_set,
- rvu_af_dl_npc_mcam_high_zone_percent_validate),
};
/* Devlink switch mode */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 58744313f0..42db213fb6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -72,12 +72,19 @@ enum nix_makr_fmt_indexes {
/* For now considering MC resources needed for broadcast
* pkt replication only. i.e 256 HWVFs + 12 PFs.
*/
-#define MC_TBL_SIZE MC_TBL_SZ_512
-#define MC_BUF_CNT MC_BUF_CNT_128
+#define MC_TBL_SIZE MC_TBL_SZ_2K
+#define MC_BUF_CNT MC_BUF_CNT_1024
+
+#define MC_TX_MAX 2048
struct mce {
struct hlist_node node;
+ u32 rq_rss_index;
u16 pcifunc;
+ u16 channel;
+ u8 dest_type;
+ u8 is_active;
+ u8 reserved[2];
};
int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
@@ -165,18 +172,33 @@ static void nix_mce_list_init(struct nix_mce_list *list, int max)
list->max = max;
}
-static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
+static int nix_alloc_mce_list(struct nix_mcast *mcast, int count, u8 dir)
{
+ struct rsrc_bmap *mce_counter;
int idx;
if (!mcast)
- return 0;
+ return -EINVAL;
- idx = mcast->next_free_mce;
- mcast->next_free_mce += count;
+ mce_counter = &mcast->mce_counter[dir];
+ if (!rvu_rsrc_check_contig(mce_counter, count))
+ return -ENOSPC;
+
+ idx = rvu_alloc_rsrc_contig(mce_counter, count);
return idx;
}
+static void nix_free_mce_list(struct nix_mcast *mcast, int count, int start, u8 dir)
+{
+ struct rsrc_bmap *mce_counter;
+
+ if (!mcast)
+ return;
+
+ mce_counter = &mcast->mce_counter[dir];
+ rvu_free_rsrc_contig(mce_counter, count, start);
+}
+
struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
{
int nix_blkaddr = 0, i = 0;
@@ -2956,7 +2978,8 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
}
static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
- int mce, u8 op, u16 pcifunc, int next, bool eol)
+ int mce, u8 op, u16 pcifunc, int next,
+ int index, u8 mce_op, bool eol)
{
struct nix_aq_enq_req aq_req;
int err;
@@ -2967,8 +2990,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
aq_req.qidx = mce;
/* Use RSS with RSS index 0 */
- aq_req.mce.op = 1;
- aq_req.mce.index = 0;
+ aq_req.mce.op = mce_op;
+ aq_req.mce.index = index;
aq_req.mce.eol = eol;
aq_req.mce.pf_func = pcifunc;
aq_req.mce.next = next;
@@ -2985,6 +3008,206 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
return 0;
}
+static void nix_delete_mcast_mce_list(struct nix_mce_list *mce_list)
+{
+ struct hlist_node *tmp;
+ struct mce *mce;
+
+ /* Scan through the current list */
+ hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
+ hlist_del(&mce->node);
+ kfree(mce);
+ }
+
+ mce_list->count = 0;
+ mce_list->max = 0;
+}
+
+static int nix_get_last_mce_list_index(struct nix_mcast_grp_elem *elem)
+{
+ return elem->mce_start_index + elem->mcast_mce_list.count - 1;
+}
+
+static int nix_update_ingress_mce_list_hw(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_mcast_grp_elem *elem)
+{
+ int idx, last_idx, next_idx, err;
+ struct nix_mce_list *mce_list;
+ struct mce *mce, *prev_mce;
+
+ mce_list = &elem->mcast_mce_list;
+ idx = elem->mce_start_index;
+ last_idx = nix_get_last_mce_list_index(elem);
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ if (idx > last_idx)
+ break;
+
+ if (!mce->is_active) {
+ if (idx == elem->mce_start_index) {
+ idx++;
+ prev_mce = mce;
+ elem->mce_start_index = idx;
+ continue;
+ } else if (idx == last_idx) {
+ err = nix_blk_setup_mce(rvu, nix_hw, idx - 1, NIX_AQ_INSTOP_WRITE,
+ prev_mce->pcifunc, next_idx,
+ prev_mce->rq_rss_index,
+ prev_mce->dest_type,
+ false);
+ if (err)
+ return err;
+
+ break;
+ }
+ }
+
+ next_idx = idx + 1;
+ /* EOL should be set in last MCE */
+ err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
+ mce->pcifunc, next_idx,
+ mce->rq_rss_index, mce->dest_type,
+ (next_idx > last_idx) ? true : false);
+ if (err)
+ return err;
+
+ idx++;
+ prev_mce = mce;
+ }
+
+ return 0;
+}
+
+static void nix_update_egress_mce_list_hw(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_mcast_grp_elem *elem)
+{
+ struct nix_mce_list *mce_list;
+ int idx, last_idx, next_idx;
+ struct mce *mce, *prev_mce;
+ u64 regval;
+ u8 eol;
+
+ mce_list = &elem->mcast_mce_list;
+ idx = elem->mce_start_index;
+ last_idx = nix_get_last_mce_list_index(elem);
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ if (idx > last_idx)
+ break;
+
+ if (!mce->is_active) {
+ if (idx == elem->mce_start_index) {
+ idx++;
+ prev_mce = mce;
+ elem->mce_start_index = idx;
+ continue;
+ } else if (idx == last_idx) {
+ regval = (next_idx << 16) | (1 << 12) | prev_mce->channel;
+ rvu_write64(rvu, nix_hw->blkaddr,
+ NIX_AF_TX_MCASTX(idx - 1),
+ regval);
+ break;
+ }
+ }
+
+ eol = 0;
+ next_idx = idx + 1;
+ /* EOL should be set in last MCE */
+ if (next_idx > last_idx)
+ eol = 1;
+
+ regval = (next_idx << 16) | (eol << 12) | mce->channel;
+ rvu_write64(rvu, nix_hw->blkaddr,
+ NIX_AF_TX_MCASTX(idx),
+ regval);
+ idx++;
+ prev_mce = mce;
+ }
+}
+
+static int nix_del_mce_list_entry(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_mcast_grp_elem *elem,
+ struct nix_mcast_grp_update_req *req)
+{
+ u32 num_entry = req->num_mce_entry;
+ struct nix_mce_list *mce_list;
+ struct mce *mce;
+ bool is_found;
+ int i;
+
+ mce_list = &elem->mcast_mce_list;
+ for (i = 0; i < num_entry; i++) {
+ is_found = false;
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ /* If already exists, then delete */
+ if (mce->pcifunc == req->pcifunc[i]) {
+ hlist_del(&mce->node);
+ kfree(mce);
+ mce_list->count--;
+ is_found = true;
+ break;
+ }
+ }
+
+ if (!is_found)
+ return NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
+ }
+
+ mce_list->max = mce_list->count;
+ /* Dump the updated list to HW */
+ if (elem->dir == NIX_MCAST_INGRESS)
+ return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
+
+ nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
+ return 0;
+}
+
+static int nix_add_mce_list_entry(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_mcast_grp_elem *elem,
+ struct nix_mcast_grp_update_req *req)
+{
+ u32 num_entry = req->num_mce_entry;
+ struct nix_mce_list *mce_list;
+ struct hlist_node *tmp;
+ struct mce *mce;
+ int i;
+
+ mce_list = &elem->mcast_mce_list;
+ for (i = 0; i < num_entry; i++) {
+ mce = kzalloc(sizeof(*mce), GFP_KERNEL);
+ if (!mce)
+ goto free_mce;
+
+ mce->pcifunc = req->pcifunc[i];
+ mce->channel = req->channel[i];
+ mce->rq_rss_index = req->rq_rss_index[i];
+ mce->dest_type = req->dest_type[i];
+ mce->is_active = 1;
+ hlist_add_head(&mce->node, &mce_list->head);
+ mce_list->count++;
+ }
+
+ mce_list->max += num_entry;
+
+ /* Dump the updated list to HW */
+ if (elem->dir == NIX_MCAST_INGRESS)
+ return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
+
+ nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
+ return 0;
+
+free_mce:
+ hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
+ hlist_del(&mce->node);
+ kfree(mce);
+ mce_list->count--;
+ }
+
+ return -ENOMEM;
+}
+
static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
u16 pcifunc, bool add)
{
@@ -3080,6 +3303,7 @@ int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
/* EOL should be set in last MCE */
err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
mce->pcifunc, next_idx,
+ 0, 1,
(next_idx > last_idx) ? true : false);
if (err)
goto end;
@@ -3160,6 +3384,16 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
return err;
}
+static void nix_setup_mcast_grp(struct nix_hw *nix_hw)
+{
+ struct nix_mcast_grp *mcast_grp = &nix_hw->mcast_grp;
+
+ INIT_LIST_HEAD(&mcast_grp->mcast_grp_head);
+ mutex_init(&mcast_grp->mcast_grp_lock);
+ mcast_grp->next_grp_index = 1;
+ mcast_grp->count = 0;
+}
+
static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
{
struct nix_mcast *mcast = &nix_hw->mcast;
@@ -3184,15 +3418,15 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
continue;
/* save start idx of broadcast mce list */
- pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
/* save start idx of multicast mce list */
- pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
/* save the start idx of promisc mce list */
- pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
for (idx = 0; idx < (numvfs + 1); idx++) {
@@ -3207,7 +3441,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
err = nix_blk_setup_mce(rvu, nix_hw,
pfvf->bcast_mce_idx + idx,
NIX_AQ_INSTOP_INIT,
- pcifunc, 0, true);
+ pcifunc, 0, 0, 1, true);
if (err)
return err;
@@ -3215,7 +3449,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
err = nix_blk_setup_mce(rvu, nix_hw,
pfvf->mcast_mce_idx + idx,
NIX_AQ_INSTOP_INIT,
- pcifunc, 0, true);
+ pcifunc, 0, 0, 1, true);
if (err)
return err;
@@ -3223,7 +3457,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
err = nix_blk_setup_mce(rvu, nix_hw,
pfvf->promisc_mce_idx + idx,
NIX_AQ_INSTOP_INIT,
- pcifunc, 0, true);
+ pcifunc, 0, 0, 1, true);
if (err)
return err;
}
@@ -3238,13 +3472,30 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
int err, size;
size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
- size = (1ULL << size);
+ size = BIT_ULL(size);
+
+ /* Allocate bitmap for rx mce entries */
+ mcast->mce_counter[NIX_MCAST_INGRESS].max = 256UL << MC_TBL_SIZE;
+ err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
+ if (err)
+ return -ENOMEM;
+
+ /* Allocate bitmap for tx mce entries */
+ mcast->mce_counter[NIX_MCAST_EGRESS].max = MC_TX_MAX;
+ err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
+ if (err) {
+ rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
+ return -ENOMEM;
+ }
/* Alloc memory for multicast/mirror replication entries */
err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
- (256UL << MC_TBL_SIZE), size);
- if (err)
+ mcast->mce_counter[NIX_MCAST_INGRESS].max, size);
+ if (err) {
+ rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
+ rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
return -ENOMEM;
+ }
rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
(u64)mcast->mce_ctx->iova);
@@ -3257,8 +3508,11 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
(8UL << MC_BUF_CNT), size);
- if (err)
+ if (err) {
+ rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
+ rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
return -ENOMEM;
+ }
rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
(u64)mcast->mcast_buf->iova);
@@ -3272,6 +3526,8 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
mutex_init(&mcast->mce_lock);
+ nix_setup_mcast_grp(nix_hw);
+
return nix_setup_mce_tables(rvu, nix_hw);
}
@@ -4465,18 +4721,18 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
*/
rvu_write64(rvu, blkaddr, NIX_AF_CFG,
rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
+ }
- /* Set chan/link to backpressure TL3 instead of TL2 */
- rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+ /* Set chan/link to backpressure TL3 instead of TL2 */
+ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
- /* Disable SQ manager's sticky mode operation (set TM6 = 0)
- * This sticky mode is known to cause SQ stalls when multiple
- * SQs are mapped to same SMQ and transmitting pkts at a time.
- */
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
- cfg &= ~BIT_ULL(15);
- rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
- }
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+ * This sticky mode is known to cause SQ stalls when multiple
+ * SQs are mapped to same SMQ and transmitting pkts at a time.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+ cfg &= ~BIT_ULL(15);
+ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
ltdefs = rvu->kpu.lt_def;
/* Calibrate X2P bus to check if CGX/LBK links are fine */
@@ -4698,6 +4954,74 @@ void rvu_nix_freemem(struct rvu *rvu)
}
}
+static void nix_mcast_update_action(struct rvu *rvu,
+ struct nix_mcast_grp_elem *elem)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_rx_action rx_action = { 0 };
+ struct nix_tx_action tx_action = { 0 };
+ int npc_blkaddr;
+
+ npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (elem->dir == NIX_MCAST_INGRESS) {
+ *(u64 *)&rx_action = npc_get_mcam_action(rvu, mcam,
+ npc_blkaddr,
+ elem->mcam_index);
+ rx_action.index = elem->mce_start_index;
+ npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
+ *(u64 *)&rx_action);
+ } else {
+ *(u64 *)&tx_action = npc_get_mcam_action(rvu, mcam,
+ npc_blkaddr,
+ elem->mcam_index);
+ tx_action.index = elem->mce_start_index;
+ npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
+ *(u64 *)&tx_action);
+ }
+}
+
+static void nix_mcast_update_mce_entry(struct rvu *rvu, u16 pcifunc, u8 is_active)
+{
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ struct nix_hw *nix_hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ mcast_grp = &nix_hw->mcast_grp;
+
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+ list_for_each_entry(elem, &mcast_grp->mcast_grp_head, list) {
+ struct nix_mce_list *mce_list;
+ struct mce *mce;
+
+ /* Iterate the group elements and disable the element which
+ * received the disable request.
+ */
+ mce_list = &elem->mcast_mce_list;
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ if (mce->pcifunc == pcifunc) {
+ mce->is_active = is_active;
+ break;
+ }
+ }
+
+ /* Dump the updated list to HW */
+ if (elem->dir == NIX_MCAST_INGRESS)
+ nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
+ else
+ nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
+
+ /* Update the multicast index in NPC rule */
+ nix_mcast_update_action(rvu, elem);
+ }
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+}
+
int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
@@ -4709,6 +5033,9 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
if (err)
return err;
+ /* Enable the interface if it is in any multicast list */
+ nix_mcast_update_mce_entry(rvu, pcifunc, 1);
+
rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
npc_mcam_enable_flows(rvu, pcifunc);
@@ -4733,6 +5060,9 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
return err;
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ /* Disable the interface if it is in any multicast list */
+ nix_mcast_update_mce_entry(rvu, pcifunc, 0);
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
@@ -5707,3 +6037,361 @@ int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *re
return 0;
}
+
+static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp *mcast_grp,
+ u32 mcast_grp_idx)
+{
+ struct nix_mcast_grp_elem *iter;
+ bool is_found = false;
+
+ list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) {
+ if (iter->mcast_grp_idx == mcast_grp_idx) {
+ is_found = true;
+ break;
+ }
+ }
+
+ if (is_found)
+ return iter;
+
+ return NULL;
+}
+
+int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx)
+{
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ struct nix_hw *nix_hw;
+ int blkaddr, ret;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ mcast_grp = &nix_hw->mcast_grp;
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+ elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
+ if (!elem)
+ ret = NIX_AF_ERR_INVALID_MCAST_GRP;
+ else
+ ret = elem->mce_start_index;
+
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+ return ret;
+}
+
+void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc)
+{
+ struct nix_mcast_grp_destroy_req dreq = { 0 };
+ struct nix_mcast_grp_update_req ureq = { 0 };
+ struct nix_mcast_grp_update_rsp ursp = { 0 };
+ struct nix_mcast_grp_elem *elem, *tmp;
+ struct nix_mcast_grp *mcast_grp;
+ struct nix_hw *nix_hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ mcast_grp = &nix_hw->mcast_grp;
+
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+ list_for_each_entry_safe(elem, tmp, &mcast_grp->mcast_grp_head, list) {
+ struct nix_mce_list *mce_list;
+ struct hlist_node *tmp;
+ struct mce *mce;
+
+ /* If the pcifunc which created the multicast/mirror
+ * group received an FLR, then delete the entire group.
+ */
+ if (elem->pcifunc == pcifunc) {
+ /* Delete group */
+ dreq.hdr.pcifunc = elem->pcifunc;
+ dreq.mcast_grp_idx = elem->mcast_grp_idx;
+ dreq.is_af = 1;
+ rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
+ continue;
+ }
+
+ /* Iterate the group elements and delete the element which
+ * received the FLR.
+ */
+ mce_list = &elem->mcast_mce_list;
+ hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
+ if (mce->pcifunc == pcifunc) {
+ ureq.hdr.pcifunc = pcifunc;
+ ureq.num_mce_entry = 1;
+ ureq.mcast_grp_idx = elem->mcast_grp_idx;
+ ureq.op = NIX_MCAST_OP_DEL_ENTRY;
+ ureq.pcifunc[0] = pcifunc;
+ ureq.is_af = 1;
+ rvu_mbox_handler_nix_mcast_grp_update(rvu, &ureq, &ursp);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+}
+
+int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
+ u32 mcast_grp_idx, u16 mcam_index)
+{
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ struct nix_hw *nix_hw;
+ int blkaddr, ret = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ mcast_grp = &nix_hw->mcast_grp;
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+ elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
+ if (!elem)
+ ret = NIX_AF_ERR_INVALID_MCAST_GRP;
+ else
+ elem->mcam_index = mcam_index;
+
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+ return ret;
+}
+
+int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu,
+ struct nix_mcast_grp_create_req *req,
+ struct nix_mcast_grp_create_rsp *rsp)
+{
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ struct nix_hw *nix_hw;
+ int blkaddr, err;
+
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mcast_grp = &nix_hw->mcast_grp;
+ elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+ if (!elem)
+ return -ENOMEM;
+
+ INIT_HLIST_HEAD(&elem->mcast_mce_list.head);
+ elem->mcam_index = -1;
+ elem->mce_start_index = -1;
+ elem->pcifunc = req->hdr.pcifunc;
+ elem->dir = req->dir;
+ elem->mcast_grp_idx = mcast_grp->next_grp_index++;
+
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+ list_add_tail(&elem->list, &mcast_grp->mcast_grp_head);
+ mcast_grp->count++;
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+
+ rsp->mcast_grp_idx = elem->mcast_grp_idx;
+ return 0;
+}
+
+int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu,
+ struct nix_mcast_grp_destroy_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_delete_flow_rsp uninstall_rsp = { 0 };
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ int blkaddr, err, ret = 0;
+ struct nix_mcast *mcast;
+ struct nix_hw *nix_hw;
+
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mcast_grp = &nix_hw->mcast_grp;
+
+ /* If AF is requesting for the deletion,
+ * then AF is already taking the lock
+ */
+ if (!req->is_af)
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+
+ elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
+ if (!elem) {
+ ret = NIX_AF_ERR_INVALID_MCAST_GRP;
+ goto unlock_grp;
+ }
+
+ /* If no mce entries are associated with the group
+ * then just remove it from the global list.
+ */
+ if (!elem->mcast_mce_list.count)
+ goto delete_grp;
+
+ /* Delete the associated mcam entry and
+ * remove all mce entries from the group
+ */
+ mcast = &nix_hw->mcast;
+ mutex_lock(&mcast->mce_lock);
+ if (elem->mcam_index != -1) {
+ uninstall_req.hdr.pcifunc = req->hdr.pcifunc;
+ uninstall_req.entry = elem->mcam_index;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
+ }
+
+ nix_free_mce_list(mcast, elem->mcast_mce_list.count,
+ elem->mce_start_index, elem->dir);
+ nix_delete_mcast_mce_list(&elem->mcast_mce_list);
+ mutex_unlock(&mcast->mce_lock);
+
+delete_grp:
+ list_del(&elem->list);
+ kfree(elem);
+ mcast_grp->count--;
+
+unlock_grp:
+ if (!req->is_af)
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+
+ return ret;
+}
+
+int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
+ struct nix_mcast_grp_update_req *req,
+ struct nix_mcast_grp_update_rsp *rsp)
+{
+ struct nix_mcast_grp_destroy_req dreq = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ int blkaddr, err, npc_blkaddr;
+ u16 prev_count, new_count;
+ struct nix_mcast *mcast;
+ struct nix_hw *nix_hw;
+ int i, ret;
+
+ if (!req->num_mce_entry)
+ return 0;
+
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mcast_grp = &nix_hw->mcast_grp;
+
+ /* If AF is requesting for the updation,
+ * then AF is already taking the lock
+ */
+ if (!req->is_af)
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+
+ elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
+ if (!elem) {
+ ret = NIX_AF_ERR_INVALID_MCAST_GRP;
+ goto unlock_grp;
+ }
+
+ /* If any pcifunc matches the group's pcifunc, then we can
+ * delete the entire group.
+ */
+ if (req->op == NIX_MCAST_OP_DEL_ENTRY) {
+ for (i = 0; i < req->num_mce_entry; i++) {
+ if (elem->pcifunc == req->pcifunc[i]) {
+ /* Delete group */
+ dreq.hdr.pcifunc = elem->pcifunc;
+ dreq.mcast_grp_idx = elem->mcast_grp_idx;
+ dreq.is_af = 1;
+ rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
+ ret = 0;
+ goto unlock_grp;
+ }
+ }
+ }
+
+ mcast = &nix_hw->mcast;
+ mutex_lock(&mcast->mce_lock);
+ npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (elem->mcam_index != -1)
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, false);
+
+ prev_count = elem->mcast_mce_list.count;
+ if (req->op == NIX_MCAST_OP_ADD_ENTRY) {
+ new_count = prev_count + req->num_mce_entry;
+ if (prev_count)
+ nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
+
+ elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
+
+ /* It is possible not to get contiguous memory */
+ if (elem->mce_start_index < 0) {
+ if (elem->mcam_index != -1) {
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
+ elem->mcam_index, true);
+ ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST;
+ goto unlock_mce;
+ }
+ }
+
+ ret = nix_add_mce_list_entry(rvu, nix_hw, elem, req);
+ if (ret) {
+ nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
+ if (prev_count)
+ elem->mce_start_index = nix_alloc_mce_list(mcast,
+ prev_count,
+ elem->dir);
+
+ if (elem->mcam_index != -1)
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
+ elem->mcam_index, true);
+
+ goto unlock_mce;
+ }
+ } else {
+ if (!prev_count || prev_count < req->num_mce_entry) {
+ if (elem->mcam_index != -1)
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
+ elem->mcam_index, true);
+ ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
+ goto unlock_mce;
+ }
+
+ nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
+ new_count = prev_count - req->num_mce_entry;
+ elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
+ ret = nix_del_mce_list_entry(rvu, nix_hw, elem, req);
+ if (ret) {
+ nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
+ elem->mce_start_index = nix_alloc_mce_list(mcast, prev_count, elem->dir);
+ if (elem->mcam_index != -1)
+ npc_enable_mcam_entry(rvu, mcam,
+ npc_blkaddr,
+ elem->mcam_index,
+ true);
+
+ goto unlock_mce;
+ }
+ }
+
+ if (elem->mcam_index == -1) {
+ rsp->mce_start_index = elem->mce_start_index;
+ ret = 0;
+ goto unlock_mce;
+ }
+
+ nix_mcast_update_action(rvu, elem);
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true);
+ rsp->mce_start_index = elem->mce_start_index;
+ ret = 0;
+
+unlock_mce:
+ mutex_unlock(&mcast->mce_lock);
+
+unlock_grp:
+ if (!req->is_af)
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 55639c133d..d94b7b88e1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -61,28 +61,6 @@ int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena)
return 0;
}
-static int npc_mcam_verify_pf_func(struct rvu *rvu,
- struct mcam_entry *entry_data, u8 intf,
- u16 pcifunc)
-{
- u16 pf_func, pf_func_mask;
-
- if (is_npc_intf_rx(intf))
- return 0;
-
- pf_func_mask = (entry_data->kw_mask[0] >> 32) &
- NPC_KEX_PF_FUNC_MASK;
- pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK;
-
- pf_func = be16_to_cpu((__force __be16)pf_func);
- if (pf_func_mask != NPC_KEX_PF_FUNC_MASK ||
- ((pf_func & ~RVU_PFVF_FUNC_MASK) !=
- (pcifunc & ~RVU_PFVF_FUNC_MASK)))
- return -EINVAL;
-
- return 0;
-}
-
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
{
int blkaddr;
@@ -599,8 +577,8 @@ static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg);
}
-static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
- int blkaddr, int index)
+u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
{
int bank = npc_get_bank(mcam, index);
@@ -609,6 +587,16 @@ static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
}
+void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, u64 cfg)
+{
+ int bank = npc_get_bank(mcam, index);
+
+ index &= (mcam->banksize - 1);
+ return rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank), cfg);
+}
+
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr)
{
@@ -1669,7 +1657,7 @@ static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz,
struct npc_coalesced_kpu_prfl *img_data = NULL;
int i = 0, rc = -EINVAL;
void __iomem *kpu_prfl_addr;
- u16 offset;
+ u32 offset;
img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr;
if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
@@ -1840,7 +1828,21 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
npc_program_kpu_profile(rvu, blkaddr, idx, &rvu->kpu.kpu[idx]);
}
-static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
+void npc_mcam_rsrcs_deinit(struct rvu *rvu)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ bitmap_free(mcam->bmap);
+ bitmap_free(mcam->bmap_reverse);
+ kfree(mcam->entry2pfvf_map);
+ kfree(mcam->cntr2pfvf_map);
+ kfree(mcam->entry2cntr_map);
+ kfree(mcam->cntr_refcnt);
+ kfree(mcam->entry2target_pffunc);
+ kfree(mcam->counters.bmap);
+}
+
+int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
{
int nixlf_count = rvu_get_nixlf_count(rvu);
struct npc_mcam *mcam = &rvu->hw->mcam;
@@ -1884,24 +1886,22 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
/* Allocate bitmaps for managing MCAM entries */
- mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries),
- sizeof(long), GFP_KERNEL);
+ mcam->bmap = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL);
if (!mcam->bmap)
return -ENOMEM;
- mcam->bmap_reverse = devm_kcalloc(rvu->dev,
- BITS_TO_LONGS(mcam->bmap_entries),
- sizeof(long), GFP_KERNEL);
+ mcam->bmap_reverse = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL);
if (!mcam->bmap_reverse)
- return -ENOMEM;
+ goto free_bmap;
mcam->bmap_fcnt = mcam->bmap_entries;
/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
- mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
- sizeof(u16), GFP_KERNEL);
+ mcam->entry2pfvf_map = kcalloc(mcam->bmap_entries, sizeof(u16),
+ GFP_KERNEL);
+
if (!mcam->entry2pfvf_map)
- return -ENOMEM;
+ goto free_bmap_reverse;
/* Reserve 1/8th of MCAM entries at the bottom for low priority
* allocations and another 1/8th at the top for high priority
@@ -1920,31 +1920,31 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
*/
err = rvu_alloc_bitmap(&mcam->counters);
if (err)
- return err;
+ goto free_entry_map;
- mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max,
- sizeof(u16), GFP_KERNEL);
+ mcam->cntr2pfvf_map = kcalloc(mcam->counters.max, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->cntr2pfvf_map)
- goto free_mem;
+ goto free_cntr_bmap;
/* Alloc memory for MCAM entry to counter mapping and for tracking
* counter's reference count.
*/
- mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
- sizeof(u16), GFP_KERNEL);
+ mcam->entry2cntr_map = kcalloc(mcam->bmap_entries, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->entry2cntr_map)
- goto free_mem;
+ goto free_cntr_map;
- mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max,
- sizeof(u16), GFP_KERNEL);
+ mcam->cntr_refcnt = kcalloc(mcam->counters.max, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->cntr_refcnt)
- goto free_mem;
+ goto free_entry_cntr_map;
/* Alloc memory for saving target device of mcam rule */
- mcam->entry2target_pffunc = devm_kcalloc(rvu->dev, mcam->total_entries,
- sizeof(u16), GFP_KERNEL);
+ mcam->entry2target_pffunc = kmalloc_array(mcam->total_entries,
+ sizeof(u16), GFP_KERNEL);
if (!mcam->entry2target_pffunc)
- goto free_mem;
+ goto free_cntr_refcnt;
for (index = 0; index < mcam->bmap_entries; index++) {
mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
@@ -1958,8 +1958,21 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
return 0;
-free_mem:
+free_cntr_refcnt:
+ kfree(mcam->cntr_refcnt);
+free_entry_cntr_map:
+ kfree(mcam->entry2cntr_map);
+free_cntr_map:
+ kfree(mcam->cntr2pfvf_map);
+free_cntr_bmap:
kfree(mcam->counters.bmap);
+free_entry_map:
+ kfree(mcam->entry2pfvf_map);
+free_bmap_reverse:
+ bitmap_free(mcam->bmap_reverse);
+free_bmap:
+ bitmap_free(mcam->bmap);
+
return -ENOMEM;
}
@@ -2167,7 +2180,7 @@ void rvu_npc_freemem(struct rvu *rvu)
struct npc_mcam *mcam = &rvu->hw->mcam;
kfree(pkind->rsrc.bmap);
- kfree(mcam->counters.bmap);
+ npc_mcam_rsrcs_deinit(rvu);
if (rvu->kpu_prfl_addr)
iounmap(rvu->kpu_prfl_addr);
else
@@ -2819,12 +2832,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
else
nix_intf = pfvf->nix_rx_intf;
- if (!is_pffunc_af(pcifunc) &&
- npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
- rc = NPC_MCAM_INVALID_REQ;
- goto exit;
- }
-
/* For AF installed rules, the nix_intf should be set to target NIX */
if (is_pffunc_af(req->hdr.pcifunc))
nix_intf = req->intf;
@@ -3176,10 +3183,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
if (!is_npc_interface_valid(rvu, req->intf))
return NPC_MCAM_INVALID_REQ;
- if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
- req->hdr.pcifunc))
- return NPC_MCAM_INVALID_REQ;
-
/* Try to allocate a MCAM entry */
entry_req.hdr.pcifunc = req->hdr.pcifunc;
entry_req.contig = true;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 114e4ec218..c75669c8fd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -51,6 +51,8 @@ static const char * const npc_flow_names[] = {
[NPC_MPLS3_TTL] = "lse depth 3 ttl",
[NPC_MPLS4_LBTCBOS] = "lse depth 4 label tc bos",
[NPC_MPLS4_TTL] = "lse depth 4",
+ [NPC_TYPE_ICMP] = "icmp type",
+ [NPC_CODE_ICMP] = "icmp code",
[NPC_UNKNOWN] = "unknown",
};
@@ -526,6 +528,8 @@ do { \
NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2);
NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2);
NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
+ NPC_SCAN_HDR(NPC_TYPE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 0, 1);
+ NPC_SCAN_HDR(NPC_CODE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 1, 1);
NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
@@ -555,7 +559,7 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u64 *features = &mcam->rx_features;
- u64 tcp_udp_sctp;
+ u64 proto_flags;
int hdr;
if (is_npc_intf_tx(intf))
@@ -566,18 +570,21 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
*features |= BIT_ULL(hdr);
}
- tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
+ proto_flags = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
- BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP);
+ BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
+ BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
+ BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP);
/* for tcp/udp/sctp corresponding layer type should be in the key */
- if (*features & tcp_udp_sctp) {
+ if (*features & proto_flags) {
if (!npc_check_field(rvu, blkaddr, NPC_LD, intf))
- *features &= ~tcp_udp_sctp;
+ *features &= ~proto_flags;
else
*features |= BIT_ULL(NPC_IPPROTO_TCP) |
BIT_ULL(NPC_IPPROTO_UDP) |
- BIT_ULL(NPC_IPPROTO_SCTP);
+ BIT_ULL(NPC_IPPROTO_SCTP) |
+ BIT_ULL(NPC_IPPROTO_ICMP);
}
/* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */
@@ -971,6 +978,10 @@ do { \
ntohs(mask->sport), 0);
NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0,
ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_TYPE_ICMP, icmp_type, pkt->icmp_type, 0,
+ mask->icmp_type, 0);
+ NPC_WRITE_FLOW(NPC_CODE_ICMP, icmp_code, pkt->icmp_code, 0,
+ mask->icmp_code, 0);
NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0,
ntohl(mask->spi), 0);
@@ -1106,13 +1117,40 @@ static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
}
}
-static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
- struct mcam_entry *entry,
- struct npc_install_flow_req *req,
- u16 target, bool pf_set_vfs_mac)
+static int npc_mcast_update_action_index(struct rvu *rvu, struct npc_install_flow_req *req,
+ u64 op, void *action)
+{
+ int mce_index;
+
+ /* If a PF/VF is installing a multicast rule then it is expected
+ * that the PF/VF should have created a group for the multicast/mirror
+ * list. Otherwise reject the configuration.
+ * During this scenario, req->index is set as multicast/mirror
+ * group index.
+ */
+ if (req->hdr.pcifunc &&
+ (op == NIX_RX_ACTIONOP_MCAST || op == NIX_TX_ACTIONOP_MCAST)) {
+ mce_index = rvu_nix_mcast_get_mce_index(rvu, req->hdr.pcifunc, req->index);
+ if (mce_index < 0)
+ return mce_index;
+
+ if (op == NIX_RX_ACTIONOP_MCAST)
+ ((struct nix_rx_action *)action)->index = mce_index;
+ else
+ ((struct nix_tx_action *)action)->index = mce_index;
+ }
+
+ return 0;
+}
+
+static int npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req,
+ u16 target, bool pf_set_vfs_mac)
{
struct rvu_switch *rswitch = &rvu->rswitch;
struct nix_rx_action action;
+ int ret;
if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac)
req->chan_mask = 0x0; /* Do not care channel */
@@ -1124,6 +1162,11 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
action.pf_func = target;
action.op = req->op;
action.index = req->index;
+
+ ret = npc_mcast_update_action_index(rvu, req, action.op, (void *)&action);
+ if (ret)
+ return ret;
+
action.match_id = req->match_id;
action.flow_key_alg = req->flow_key_alg;
@@ -1155,14 +1198,17 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) |
FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) |
FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4);
+
+ return 0;
}
-static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
- struct mcam_entry *entry,
- struct npc_install_flow_req *req, u16 target)
+static int npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req, u16 target)
{
struct nix_tx_action action;
u64 mask = ~0ULL;
+ int ret;
/* If AF is installing then do not care about
* PF_FUNC in Send Descriptor
@@ -1176,6 +1222,11 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
*(u64 *)&action = 0x00;
action.op = req->op;
action.index = req->index;
+
+ ret = npc_mcast_update_action_index(rvu, req, action.op, (void *)&action);
+ if (ret)
+ return ret;
+
action.match_id = req->match_id;
entry->action = *(u64 *)&action;
@@ -1191,6 +1242,8 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) |
FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) |
FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24);
+
+ return 0;
}
static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
@@ -1220,10 +1273,15 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
req->intf, blkaddr);
- if (is_npc_intf_rx(req->intf))
- npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
- else
- npc_update_tx_entry(rvu, pfvf, entry, req, target);
+ if (is_npc_intf_rx(req->intf)) {
+ err = npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
+ if (err)
+ return err;
+ } else {
+ err = npc_update_tx_entry(rvu, pfvf, entry, req, target);
+ if (err)
+ return err;
+ }
/* Default unicast rules do not exist for TX */
if (is_npc_intf_tx(req->intf))
@@ -1340,6 +1398,10 @@ find_rule:
return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc,
req->index, req->match_id);
+ if (owner && req->op == NIX_RX_ACTIONOP_MCAST)
+ return rvu_nix_mcast_update_mcam_entry(rvu, req->hdr.pcifunc,
+ req->index, entry_index);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 18c1c9f361..6f73ad9807 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -734,5 +734,7 @@
#define APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT 23
#define APR_LMT_MAP_ENT_SCH_ENA_SHIFT 22
#define APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT 21
+#define LMTST_THROTTLE_MASK GENMASK_ULL(38, 35)
+#define LMTST_WR_PEND_MAX 15
#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index edc9367b1b..5ef406c7e8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -340,11 +340,12 @@ struct nix_aq_res_s {
/* NIX Completion queue context structure */
struct nix_cq_ctx_s {
u64 base;
- u64 rsvd_64_67 : 4;
+ u64 lbp_ena : 1;
+ u64 lbpid_low : 3;
u64 bp_ena : 1;
- u64 rsvd_69_71 : 3;
+ u64 lbpid_med : 3;
u64 bpid : 9;
- u64 rsvd_81_83 : 3;
+ u64 lbpid_high : 3;
u64 qint_idx : 7;
u64 cq_err : 1;
u64 cint_idx : 7;
@@ -358,10 +359,14 @@ struct nix_cq_ctx_s {
u64 drop : 8;
u64 drop_ena : 1;
u64 ena : 1;
- u64 rsvd_210_211 : 2;
- u64 substream : 20;
+ u64 cpt_drop_err_en : 1;
+ u64 rsvd_211 : 1;
+ u64 substream : 12;
+ u64 stash_thresh : 4;
+ u64 lbp_frac : 4;
u64 caching : 1;
- u64 rsvd_233_235 : 3;
+ u64 stashing : 1;
+ u64 rsvd_234_235 : 2;
u64 qsize : 4;
u64 cq_err_int : 8;
u64 cq_err_int_ena : 8;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 8b7fc0af91..7f786de610 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -834,21 +834,26 @@ static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
return 0;
}
-/* RSS context configuration */
-static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
- const u8 *hkey, const u8 hfunc,
- u32 *rss_context, bool delete)
+/* Configure RSS table and hash key */
+static int otx2_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
+ u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
struct otx2_nic *pfvf = netdev_priv(dev);
struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
int ret, idx;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
- *rss_context >= MAX_RSS_GROUPS)
+ if (rxfh->rss_context)
+ rss_context = rxfh->rss_context;
+
+ if (rss_context != ETH_RXFH_CONTEXT_ALLOC &&
+ rss_context >= MAX_RSS_GROUPS)
return -EINVAL;
rss = &pfvf->hw.rss_info;
@@ -858,40 +863,45 @@ static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
return -EIO;
}
- if (hkey) {
- memcpy(rss->key, hkey, sizeof(rss->key));
+ if (rxfh->key) {
+ memcpy(rss->key, rxfh->key, sizeof(rss->key));
otx2_set_rss_key(pfvf);
}
- if (delete)
- return otx2_rss_ctx_delete(pfvf, *rss_context);
+ if (rxfh->rss_delete)
+ return otx2_rss_ctx_delete(pfvf, rss_context);
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- ret = otx2_rss_ctx_create(pfvf, rss_context);
+ if (rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+ ret = otx2_rss_ctx_create(pfvf, &rss_context);
+ rxfh->rss_context = rss_context;
if (ret)
return ret;
}
- if (indir) {
- rss_ctx = rss->rss_ctx[*rss_context];
+ if (rxfh->indir) {
+ rss_ctx = rss->rss_ctx[rss_context];
for (idx = 0; idx < rss->rss_size; idx++)
- rss_ctx->ind_tbl[idx] = indir[idx];
+ rss_ctx->ind_tbl[idx] = rxfh->indir[idx];
}
- otx2_set_rss_table(pfvf, *rss_context);
+ otx2_set_rss_table(pfvf, rss_context);
return 0;
}
-static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
- u8 *hkey, u8 *hfunc, u32 rss_context)
+/* Get RSS configuration */
+static int otx2_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
+ u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
struct otx2_nic *pfvf = netdev_priv(dev);
struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
+ u32 *indir = rxfh->indir;
int idx, rx_queues;
rss = &pfvf->hw.rss_info;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->rss_context)
+ rss_context = rxfh->rss_context;
if (!indir)
return 0;
@@ -913,30 +923,12 @@ static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
for (idx = 0; idx < rss->rss_size; idx++)
indir[idx] = rss_ctx->ind_tbl[idx];
}
- if (hkey)
- memcpy(hkey, rss->key, sizeof(rss->key));
+ if (rxfh->key)
+ memcpy(rxfh->key, rss->key, sizeof(rss->key));
return 0;
}
-/* Get RSS configuration */
-static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
- u8 *hkey, u8 *hfunc)
-{
- return otx2_get_rxfh_context(dev, indir, hkey, hfunc,
- DEFAULT_RSS_CONTEXT_GROUP);
-}
-
-/* Configure RSS table and hash key */
-static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
-{
-
- u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
-
- return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0);
-}
-
static u32 otx2_get_msglevel(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@@ -1317,6 +1309,7 @@ static void otx2_get_fec_stats(struct net_device *netdev,
}
static const struct ethtool_ops otx2_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
@@ -1339,8 +1332,6 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
- .get_rxfh_context = otx2_get_rxfh_context,
- .set_rxfh_context = otx2_set_rxfh_context,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
@@ -1440,6 +1431,7 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops otx2vf_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
@@ -1458,8 +1450,6 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
- .get_rxfh_context = otx2_get_rxfh_context,
- .set_rxfh_context = otx2_set_rxfh_context,
.get_ringparam = otx2_get_ringparam,
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index b40bd0e467..3f46d5e0fb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1933,7 +1933,7 @@ int otx2_open(struct net_device *netdev)
* mcam entries are enabled to receive the packets. Hence disable the
* packet I/O.
*/
- if (err == EIO)
+ if (err == -EIO)
goto err_disable_rxtx;
else if (err)
goto err_tx_stop_queues;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index db1e0e0e81..60ee7ae2c4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -29,6 +29,8 @@
#define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4)
+#define MCAST_INVALID_GRP (-1U)
+
struct otx2_tc_flow_stats {
u64 bytes;
u64 pkts;
@@ -47,6 +49,7 @@ struct otx2_tc_flow {
bool is_act_police;
u32 prio;
struct npc_install_flow_req req;
+ u32 mcast_grp_idx;
u64 rate;
u32 burst;
bool is_pps;
@@ -355,22 +358,96 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
return rc;
}
+static int otx2_tc_update_mcast(struct otx2_nic *nic,
+ struct npc_install_flow_req *req,
+ struct netlink_ext_ack *extack,
+ struct otx2_tc_flow *node,
+ struct nix_mcast_grp_update_req *ureq,
+ u8 num_intf)
+{
+ struct nix_mcast_grp_update_req *grp_update_req;
+ struct nix_mcast_grp_create_req *creq;
+ struct nix_mcast_grp_create_rsp *crsp;
+ u32 grp_index;
+ int rc;
+
+ mutex_lock(&nic->mbox.lock);
+ creq = otx2_mbox_alloc_msg_nix_mcast_grp_create(&nic->mbox);
+ if (!creq) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ creq->dir = NIX_MCAST_INGRESS;
+ /* Send message to AF */
+ rc = otx2_sync_mbox_msg(&nic->mbox);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to create multicast group");
+ goto error;
+ }
+
+ crsp = (struct nix_mcast_grp_create_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
+ 0,
+ &creq->hdr);
+ if (IS_ERR(crsp)) {
+ rc = PTR_ERR(crsp);
+ goto error;
+ }
+
+ grp_index = crsp->mcast_grp_idx;
+ grp_update_req = otx2_mbox_alloc_msg_nix_mcast_grp_update(&nic->mbox);
+ if (!grp_update_req) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ ureq->op = NIX_MCAST_OP_ADD_ENTRY;
+ ureq->mcast_grp_idx = grp_index;
+ ureq->num_mce_entry = num_intf;
+ ureq->pcifunc[0] = nic->pcifunc;
+ ureq->channel[0] = nic->hw.tx_chan_base;
+
+ ureq->dest_type[0] = NIX_RX_RSS;
+ ureq->rq_rss_index[0] = 0;
+ memcpy(&ureq->hdr, &grp_update_req->hdr, sizeof(struct mbox_msghdr));
+ memcpy(grp_update_req, ureq, sizeof(struct nix_mcast_grp_update_req));
+
+ /* Send message to AF */
+ rc = otx2_sync_mbox_msg(&nic->mbox);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
+ goto error;
+ }
+
+ mutex_unlock(&nic->mbox.lock);
+ req->op = NIX_RX_ACTIONOP_MCAST;
+ req->index = grp_index;
+ node->mcast_grp_idx = grp_index;
+ return 0;
+
+error:
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+}
+
static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct flow_action *flow_action,
struct npc_install_flow_req *req,
struct flow_cls_offload *f,
struct otx2_tc_flow *node)
{
+ struct nix_mcast_grp_update_req dummy_grp_update_req = { 0 };
struct netlink_ext_ack *extack = f->common.extack;
+ bool pps = false, mcast = false;
struct flow_action_entry *act;
struct net_device *target;
struct otx2_nic *priv;
u32 burst, mark = 0;
u8 nr_police = 0;
- bool pps = false;
+ u8 num_intf = 1;
+ int err, i;
u64 rate;
- int err;
- int i;
if (!flow_action_has_entries(flow_action)) {
NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
@@ -442,11 +519,30 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
req->index = act->rx_queue;
break;
+ case FLOW_ACTION_MIRRED_INGRESS:
+ target = act->dev;
+ priv = netdev_priv(target);
+ dummy_grp_update_req.pcifunc[num_intf] = priv->pcifunc;
+ dummy_grp_update_req.channel[num_intf] = priv->hw.tx_chan_base;
+ dummy_grp_update_req.dest_type[num_intf] = NIX_RX_RSS;
+ dummy_grp_update_req.rq_rss_index[num_intf] = 0;
+ mcast = true;
+ num_intf++;
+ break;
+
default:
return -EOPNOTSUPP;
}
}
+ if (mcast) {
+ err = otx2_tc_update_mcast(nic, req, extack, node,
+ &dummy_grp_update_req,
+ num_intf);
+ if (err)
+ return err;
+ }
+
if (nr_police > 1) {
NL_SET_ERR_MSG_MOD(extack,
"rate limit police offload requires a single action");
@@ -541,6 +637,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_IPSEC) |
BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) {
netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
dissector->used_keys);
@@ -591,6 +688,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control match;
+ u32 val;
flow_rule_match_control(rule, &match);
if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
@@ -599,12 +697,14 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ val = match.key->flags & FLOW_DIS_IS_FRAGMENT;
if (ntohs(flow_spec->etype) == ETH_P_IP) {
- flow_spec->ip_flag = IPV4_FLAG_MORE;
+ flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0;
flow_mask->ip_flag = IPV4_FLAG_MORE;
req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
} else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
- flow_spec->next_header = IPPROTO_FRAGMENT;
+ flow_spec->next_header = val ?
+ IPPROTO_FRAGMENT : 0;
flow_mask->next_header = 0xff;
req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
} else {
@@ -815,6 +915,19 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
+ struct flow_match_icmp match;
+
+ flow_rule_match_icmp(rule, &match);
+
+ flow_spec->icmp_type = match.key->type;
+ flow_mask->icmp_type = match.mask->type;
+ req->features |= BIT_ULL(NPC_TYPE_ICMP);
+
+ flow_spec->icmp_code = match.key->code;
+ flow_mask->icmp_code = match.mask->code;
+ req->features |= BIT_ULL(NPC_CODE_ICMP);
+ }
return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
}
@@ -1052,6 +1165,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+ struct nix_mcast_grp_destroy_req *grp_destroy_req;
struct otx2_tc_flow *flow_node;
int err;
@@ -1085,6 +1199,15 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
mutex_unlock(&nic->mbox.lock);
}
+ /* Remove the multicast/mirror related nodes */
+ if (flow_node->mcast_grp_idx != MCAST_INVALID_GRP) {
+ mutex_lock(&nic->mbox.lock);
+ grp_destroy_req = otx2_mbox_alloc_msg_nix_mcast_grp_destroy(&nic->mbox);
+ grp_destroy_req->mcast_grp_idx = flow_node->mcast_grp_idx;
+ otx2_sync_mbox_msg(&nic->mbox);
+ mutex_unlock(&nic->mbox.lock);
+ }
+
free_mcam_flow:
otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
@@ -1124,6 +1247,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
spin_lock_init(&new_node->lock);
new_node->cookie = tc_flow_cmd->cookie;
new_node->prio = tc_flow_cmd->common.prio;
+ new_node->mcast_grp_idx = MCAST_INVALID_GRP;
memset(&dummy, 0, sizeof(struct npc_install_flow_req));
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 1e77bbf5d2..1723e9912a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -382,6 +382,7 @@ static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent,
otx2_qos_read_txschq_cfg_tl(node, cfg);
cnt = cfg->static_node_pos[node->level];
cfg->schq_contig_list[node->level][cnt] = node->schq;
+ cfg->schq_index_used[node->level][cnt] = true;
cfg->schq_contig[node->level]++;
cfg->static_node_pos[node->level]++;
otx2_qos_read_txschq_cfg_schq(node, cfg);