summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:12 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:12 +0000
commit8665bd53f2f2e27e5511d90428cb3f60e6d0ce15 (patch)
tree8d58900dc0ebd4a3011f92c128d2fe45bc7c4bf2 /drivers/net/ethernet/broadcom
parentAdding debian version 6.7.12-1. (diff)
downloadlinux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.tar.xz
linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.zip
Merging upstream version 6.8.9.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c52
-rw-r--r--drivers/net/ethernet/broadcom/b44.c14
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c27
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c2758
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h503
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c740
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h521
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c39
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c22
23 files changed, 3445 insertions, 1339 deletions
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 9cae5a3090..529172a87a 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -391,7 +391,9 @@ static void umac_reset(struct bcmasp_intf *intf)
umac_wl(intf, 0x0, UMC_CMD);
umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
usleep_range(10, 100);
- umac_wl(intf, 0x0, UMC_CMD);
+ /* We hold the umac in reset and bring it out of
+ * reset when phy link is up.
+ */
}
static void umac_set_hw_addr(struct bcmasp_intf *intf,
@@ -411,6 +413,8 @@ static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
u32 reg;
reg = umac_rl(intf, UMC_CMD);
+ if (reg & UMC_CMD_SW_RESET)
+ return;
if (enable)
reg |= mask;
else
@@ -429,13 +433,10 @@ static void umac_init(struct bcmasp_intf *intf)
umac_wl(intf, 0x800, UMC_FRM_LEN);
umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
- umac_enable_set(intf, UMC_CMD_PROMISC, 1);
}
-static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
{
- struct bcmasp_intf *intf =
- container_of(napi, struct bcmasp_intf, tx_napi);
struct bcmasp_intf_stats64 *stats = &intf->stats64;
struct device *kdev = &intf->parent->pdev->dev;
unsigned long read, released = 0;
@@ -478,10 +479,16 @@ static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
DESC_RING_COUNT);
}
- /* Ensure all descriptors have been written to DRAM for the hardware
- * to see updated contents.
- */
- wmb();
+ return released;
+}
+
+static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmasp_intf *intf =
+ container_of(napi, struct bcmasp_intf, tx_napi);
+ int released = 0;
+
+ released = bcmasp_tx_reclaim(intf);
napi_complete(&intf->tx_napi);
@@ -656,6 +663,12 @@ static void bcmasp_adj_link(struct net_device *dev)
UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
UMC_CMD_TX_PAUSE_IGNORE);
reg |= cmd_bits;
+ if (reg & UMC_CMD_SW_RESET) {
+ reg &= ~UMC_CMD_SW_RESET;
+ umac_wl(intf, reg, UMC_CMD);
+ udelay(2);
+ reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC;
+ }
umac_wl(intf, reg, UMC_CMD);
intf->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
@@ -684,6 +697,8 @@ static int bcmasp_init_rx(struct bcmasp_intf *intf)
intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
+ if (!buffer_pg)
+ return -ENOMEM;
dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
DMA_FROM_DEVICE);
@@ -785,6 +800,7 @@ static int bcmasp_init_tx(struct bcmasp_intf *intf)
intf->tx_spb_index = 0;
intf->tx_spb_clean_index = 0;
+ memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
@@ -895,6 +911,8 @@ static void bcmasp_netif_deinit(struct net_device *dev)
} while (timeout-- > 0);
tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
+ bcmasp_tx_reclaim(intf);
+
umac_enable_set(intf, UMC_CMD_TX_EN, 0);
phy_stop(dev->phydev);
@@ -1061,9 +1079,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
umac_init(intf);
- /* Disable the UniMAC RX/TX */
- umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 0);
-
umac_set_hw_addr(intf, dev->dev_addr);
intf->old_duplex = -1;
@@ -1083,9 +1098,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
bcmasp_enable_rx(intf, 1);
- /* Turn on UniMAC TX/RX */
- umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 1);
-
intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
bcmasp_netif_start(dev);
@@ -1095,6 +1107,7 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
return 0;
err_reclaim_tx:
+ netif_napi_del(&intf->tx_napi);
bcmasp_reclaim_free_all_tx(intf);
err_phy_disconnect:
if (phydev)
@@ -1321,7 +1334,14 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
if (intf->wolopts & WAKE_FILTER)
bcmasp_netfilt_suspend(intf);
- /* UniMAC receive needs to be turned on */
+ /* Bring UniMAC out of reset if needed and enable RX */
+ reg = umac_rl(intf, UMC_CMD);
+ if (reg & UMC_CMD_SW_RESET)
+ reg &= ~UMC_CMD_SW_RESET;
+
+ reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC;
+ umac_wl(intf, reg, UMC_CMD);
+
umac_enable_set(intf, UMC_CMD_RX_EN, 1);
if (intf->parent->wol_irq > 0) {
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 3e4fb3c3e8..1be6d14030 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2009,12 +2009,14 @@ static int b44_set_pauseparam(struct net_device *dev,
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
- if (bp->flags & B44_FLAG_PAUSE_AUTO) {
- b44_halt(bp);
- b44_init_rings(bp);
- b44_init_hw(bp, B44_FULL_RESET);
- } else {
- __b44_set_flow_ctrl(bp, bp->flags);
+ if (netif_running(dev)) {
+ if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET);
+ } else {
+ __b44_set_flow_ctrl(bp, bp->flags);
+ }
}
spin_unlock_irq(&bp->lock);
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 3e7c8671cd..72df1bb101 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -793,5 +793,6 @@ static struct platform_driver bcm4908_enet_driver = {
};
module_platform_driver(bcm4908_enet_driver);
+MODULE_DESCRIPTION("Broadcom BCM4908 Gigabit Ethernet driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
index 9b83d53616..50b8e97a81 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -260,4 +260,5 @@ void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA MDIO helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 6e4f36aaf5..36f9bad28e 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -362,4 +362,5 @@ module_init(bgmac_init)
module_exit(bgmac_exit)
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA interface driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 0b21fd5bd4..77425c7a32 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -298,4 +298,5 @@ static struct platform_driver bgmac_enet_driver = {
};
module_platform_driver(bgmac_enet_driver);
+MODULE_DESCRIPTION("Broadcom iProc GBit platform interface driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 448a1b90de..6ffdc42294 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1626,4 +1626,5 @@ int bgmac_enet_resume(struct bgmac *bgmac)
EXPORT_SYMBOL_GPL(bgmac_enet_resume);
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e9c1e1bb55..528441b28c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -147,10 +147,11 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
phy_fw_ver[0] = '\0';
bnx2x_get_ext_phy_fw_version(&bp->link_params,
- phy_fw_ver, PHY_FW_VER_LEN);
- strscpy(buf, bp->fw_ver, buf_len);
- snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
- "bc %d.%d.%d%s%s",
+ phy_fw_ver, sizeof(phy_fw_ver));
+ /* This may become truncated. */
+ scnprintf(buf, buf_len,
+ "%sbc %d.%d.%d%s%s",
+ bp->fw_ver,
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index bda3ccc28e..0bc7690cde 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1132,7 +1132,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
}
memset(version, 0, sizeof(version));
- bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
+ bnx2x_fill_fw_str(bp, version, sizeof(version));
strlcat(info->fw_version, version, sizeof(info->fw_version));
strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
@@ -3486,16 +3486,15 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
return T_ETH_INDIRECTION_TABLE_SIZE;
}
-static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int bnx2x_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct bnx2x *bp = netdev_priv(dev);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
size_t i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
/* Get the current configuration of the RSS indirection table */
@@ -3511,13 +3510,14 @@ static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
* queue.
*/
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
- indir[i] = ind_table[i] - bp->fp->cl_id;
+ rxfh->indir[i] = ind_table[i] - bp->fp->cl_id;
return 0;
}
-static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int bnx2x_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
@@ -3525,11 +3525,12 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
/* We require at least one supported parameter to be changed and no
* change in any of the unsupported parameters
*/
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
@@ -3542,7 +3543,7 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
* align the received table to the Client ID of the leading RSS
* queue
*/
- bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
+ bp->rss_conf_obj.ind_table[i] = rxfh->indir[i] + bp->fp->cl_id;
}
if (bp->state == BNX2X_STATE_OPEN)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 02808513ff..ea310057fe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6163,8 +6163,8 @@ static void bnx2x_link_int_ack(struct link_params *params,
static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
{
- str[0] = '\0';
- (*len)--;
+ if (*len)
+ str[0] = '\0';
return 0;
}
@@ -6173,7 +6173,7 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
u16 ret;
if (*len < 10) {
- /* Need more than 10chars for this format */
+ /* Need more than 10 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
@@ -6188,8 +6188,8 @@ static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
{
u16 ret;
- if (*len < 10) {
- /* Need more than 10chars for this format */
+ if (*len < 9) {
+ /* Need more than 9 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
@@ -6208,7 +6208,7 @@ int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
int status = 0;
u8 *ver_p = version;
u16 remain_len = len;
- if (version == NULL || params == NULL)
+ if (version == NULL || params == NULL || len == 0)
return -EINVAL;
bp = params->bp;
@@ -11546,7 +11546,7 @@ static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
str[2] = (spirom_ver & 0xFF0000) >> 16;
str[3] = (spirom_ver & 0xFF000000) >> 24;
str[4] = '\0';
- *len -= 5;
+ *len -= 4;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 22c8bfb5ed..e133a412a1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -120,6 +120,10 @@ static const struct {
[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
+ [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
+ [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
+ [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
+ [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
[BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
[BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
[BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
@@ -174,6 +178,10 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
+ { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
+ { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
+ { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
+ { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
@@ -254,22 +262,28 @@ static bool bnxt_vf_pciid(enum board_idx idx)
writel(DB_CP_IRQ_DIS_FLAGS, db)
#define BNXT_DB_CQ(db, idx) \
- writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
+ writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
#define BNXT_DB_NQ_P5(db, idx) \
- bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), \
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
(db)->doorbell)
+#define BNXT_DB_NQ_P7(db, idx) \
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
+ DB_RING_IDX(db, idx), (db)->doorbell)
+
#define BNXT_DB_CQ_ARM(db, idx) \
- writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
+ writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
#define BNXT_DB_NQ_ARM_P5(db, idx) \
- bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
- (db)->doorbell)
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
+ DB_RING_IDX(db, idx), (db)->doorbell)
static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ BNXT_DB_NQ_P7(db, idx);
+ else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
BNXT_DB_NQ_P5(db, idx);
else
BNXT_DB_CQ(db, idx);
@@ -277,7 +291,7 @@ static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
BNXT_DB_NQ_ARM_P5(db, idx);
else
BNXT_DB_CQ_ARM(db, idx);
@@ -285,9 +299,9 @@ static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
- RING_CMP(idx), db->doorbell);
+ DB_RING_IDX(db, idx), db->doorbell);
else
BNXT_DB_CQ(db, idx);
}
@@ -321,7 +335,7 @@ static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
if (!rxr->bnapi->in_reset) {
rxr->bnapi->in_reset = true;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
else
set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
@@ -331,16 +345,16 @@ static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
}
void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- int idx)
+ u16 curr)
{
struct bnxt_napi *bnapi = txr->bnapi;
if (bnapi->tx_fault)
return;
- netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_pkts:%d cons:%u prod:%u i:%d)",
- txr->txq_index, bnapi->tx_pkts,
- txr->tx_cons, txr->tx_prod, idx);
+ netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
+ txr->txq_index, txr->tx_hw_cons,
+ txr->tx_cons, txr->tx_prod, curr);
WARN_ON_ONCE(1);
bnapi->tx_fault = 1;
bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
@@ -381,6 +395,8 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
u16 prod)
{
+ /* Sync BD data before updating doorbell */
+ wmb();
bnxt_db_write(bp, &txr->tx_db, prod);
txr->kick_pending = 0;
}
@@ -388,7 +404,7 @@ static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- struct tx_bd *txbd;
+ struct tx_bd *txbd, *txbd0;
struct tx_bd_ext *txbd1;
struct netdev_queue *txq;
int i;
@@ -430,11 +446,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb_headlen(skb);
last_frag = skb_shinfo(skb)->nr_frags;
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
- txbd->tx_bd_opaque = prod;
-
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
tx_buf->skb = skb;
tx_buf->nr_frags = last_frag;
@@ -519,12 +533,15 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
txbd->tx_bd_haddr = txr->data_mapping;
+ txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
prod = NEXT_TX(prod);
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
memcpy(txbd, tx_push1, sizeof(*txbd));
prod = NEXT_TX(prod);
tx_push->doorbell =
- cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
+ cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
+ DB_RING_IDX(&txr->tx_db, prod));
WRITE_ONCE(txr->tx_prod, prod);
tx_buf->is_push = 1;
@@ -562,19 +579,29 @@ normal_tx:
((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
+ txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
prod = NEXT_TX(prod);
txbd1 = (struct tx_bd_ext *)
- &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
txbd1->tx_bd_hsize_lflags = lflags;
if (skb_is_gso(skb)) {
+ bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
u32 hdr_len;
- if (skb->encapsulation)
- hdr_len = skb_inner_tcp_all_headers(skb);
- else
+ if (skb->encapsulation) {
+ if (udp_gso)
+ hdr_len = skb_inner_transport_offset(skb) +
+ sizeof(struct udphdr);
+ else
+ hdr_len = skb_inner_tcp_all_headers(skb);
+ } else if (udp_gso) {
+ hdr_len = skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+ } else {
hdr_len = skb_tcp_all_headers(skb);
+ }
txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
TX_BD_FLAGS_T_IPID |
@@ -601,11 +628,12 @@ normal_tx:
txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
txbd1->tx_bd_cfa_action =
cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
+ txbd0 = txbd;
for (i = 0; i < last_frag; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX(prod);
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
len = skb_frag_size(frag);
mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
@@ -614,7 +642,7 @@ normal_tx:
if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
goto tx_dma_error;
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
dma_unmap_addr_set(tx_buf, mapping, mapping);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
@@ -632,22 +660,26 @@ normal_tx:
skb_tx_timestamp(skb);
- /* Sync BD data before updating doorbell */
- wmb();
-
prod = NEXT_TX(prod);
WRITE_ONCE(txr->tx_prod, prod);
- if (!netdev_xmit_more() || netif_xmit_stopped(txq))
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
bnxt_txr_db_kick(bp, txr, prod);
- else
+ } else {
+ if (free_size >= bp->tx_wake_thresh)
+ txbd0->tx_bd_len_flags_type |=
+ cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
txr->kick_pending = 1;
+ }
tx_done:
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
- if (netdev_xmit_more() && !tx_buf->is_push)
+ if (netdev_xmit_more() && !tx_buf->is_push) {
+ txbd0->tx_bd_len_flags_type &=
+ cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
bnxt_txr_db_kick(bp, txr, prod);
+ }
netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
bp->tx_wake_thresh);
@@ -662,7 +694,7 @@ tx_dma_error:
/* start back at beginning and unmap skb */
prod = txr->tx_prod;
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), DMA_TO_DEVICE);
prod = NEXT_TX(prod);
@@ -670,7 +702,7 @@ tx_dma_error:
/* unmap remaining mapped pages */
for (i = 0; i < last_frag; i++) {
prod = NEXT_TX(prod);
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
DMA_TO_DEVICE);
@@ -686,31 +718,32 @@ tx_kick_pending:
return NETDEV_TX_OK;
}
-static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ int budget)
{
- struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
- u16 cons = txr->tx_cons;
struct pci_dev *pdev = bp->pdev;
- int nr_pkts = bnapi->tx_pkts;
- int i;
+ u16 hw_cons = txr->tx_hw_cons;
unsigned int tx_bytes = 0;
+ u16 cons = txr->tx_cons;
+ int tx_pkts = 0;
- for (i = 0; i < nr_pkts; i++) {
+ while (RING_TX(bp, cons) != hw_cons) {
struct bnxt_sw_tx_bd *tx_buf;
struct sk_buff *skb;
int j, last;
- tx_buf = &txr->tx_buf_ring[cons];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
cons = NEXT_TX(cons);
skb = tx_buf->skb;
tx_buf->skb = NULL;
if (unlikely(!skb)) {
- bnxt_sched_reset_txr(bp, txr, i);
+ bnxt_sched_reset_txr(bp, txr, cons);
return;
}
+ tx_pkts++;
tx_bytes += skb->len;
if (tx_buf->is_push) {
@@ -724,7 +757,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
for (j = 0; j < last; j++) {
cons = NEXT_TX(cons);
- tx_buf = &txr->tx_buf_ring[cons];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
dma_unmap_page(
&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
@@ -732,7 +765,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
DMA_TO_DEVICE);
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_CHIP_P5(bp)) {
/* PTP worker takes ownership of the skb */
if (!bnxt_get_tx_ts_p5(bp, skb))
skb = NULL;
@@ -747,14 +780,25 @@ next_tx_int:
dev_consume_skb_any(skb);
}
- bnapi->tx_pkts = 0;
WRITE_ONCE(txr->tx_cons, cons);
- __netif_txq_completed_wake(txq, nr_pkts, tx_bytes,
+ __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
}
+static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+{
+ struct bnxt_tx_ring_info *txr;
+ int i;
+
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
+ __bnxt_tx_int(bp, txr, budget);
+ }
+ bnapi->events &= ~BNXT_TX_CMP_EVENT;
+}
+
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
unsigned int *offset,
@@ -803,8 +847,8 @@ static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp)
{
- struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
- struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
+ struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
dma_addr_t mapping;
if (BNXT_RX_PAGE_MODE(bp)) {
@@ -837,9 +881,10 @@ void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
{
u16 prod = rxr->rx_prod;
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct bnxt *bp = rxr->bnapi->bp;
struct rx_bd *cons_bd, *prod_bd;
- prod_rx_buf = &rxr->rx_buf_ring[prod];
+ prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf->data = data;
@@ -847,8 +892,8 @@ void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
prod_rx_buf->mapping = cons_rx_buf->mapping;
- prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
- cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+ prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
+ cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
}
@@ -868,7 +913,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
u16 prod, gfp_t gfp)
{
struct rx_bd *rxbd =
- &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
struct bnxt_sw_rx_agg_bd *rx_agg_buf;
struct page *page;
dma_addr_t mapping;
@@ -885,7 +930,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
__set_bit(sw_prod, rxr->rx_agg_bmap);
rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
- rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
+ rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
rx_agg_buf->page = page;
rx_agg_buf->offset = offset;
@@ -927,7 +972,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
bool p5_tpa = false;
u32 i;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
@@ -961,13 +1006,13 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
prod_rx_buf->mapping = cons_rx_buf->mapping;
- prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
prod_bd->rx_bd_opaque = sw_prod;
prod = NEXT_RX_AGG(prod);
- sw_prod = NEXT_RX_AGG(sw_prod);
+ sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
}
rxr->rx_agg_prod = prod;
rxr->rx_sw_agg_prod = sw_prod;
@@ -1094,7 +1139,7 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
u32 i, total_frag_len = 0;
bool p5_tpa = false;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
@@ -1249,7 +1294,7 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
struct rx_tpa_end_cmp *tpa_end = cmp;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
agg_bufs = TPA_END_AGG_BUFS(tpa_end);
@@ -1290,8 +1335,39 @@ static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
return map->agg_id_tbl[agg_id];
}
+static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
+ struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ tpa_info->cfa_code_valid = 1;
+ tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
+ tpa_info->vlan_valid = 0;
+ if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
+ tpa_info->vlan_valid = 1;
+ tpa_info->metadata =
+ le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+ }
+}
+
+static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
+ struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ tpa_info->vlan_valid = 0;
+ if (TPA_START_VLAN_VALID(tpa_start)) {
+ u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
+ u32 vlan_proto = ETH_P_8021Q;
+
+ tpa_info->vlan_valid = 1;
+ if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
+ vlan_proto = ETH_P_8021AD;
+ tpa_info->metadata = vlan_proto << 16 |
+ TPA_START_METADATA0_TCI(tpa_start1);
+ }
+}
+
static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
- struct rx_tpa_start_cmp *tpa_start,
+ u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
struct rx_tpa_start_cmp_ext *tpa_start1)
{
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
@@ -1300,7 +1376,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct rx_bd *prod_bd;
dma_addr_t mapping;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
agg_id = TPA_START_AGG_ID_P5(tpa_start);
agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
} else {
@@ -1309,7 +1385,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
cons = tpa_start->rx_tpa_start_cmp_opaque;
prod = rxr->rx_prod;
cons_rx_buf = &rxr->rx_buf_ring[cons];
- prod_rx_buf = &rxr->rx_buf_ring[prod];
+ prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
tpa_info = &rxr->rx_tpa[agg_id];
if (unlikely(cons != rxr->rx_next_cons ||
@@ -1320,17 +1396,13 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
bnxt_sched_reset_rxr(bp, rxr);
return;
}
- /* Store cfa_code in tpa_info to use in tpa_end
- * completion processing.
- */
- tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
prod_rx_buf->data = tpa_info->data;
prod_rx_buf->data_ptr = tpa_info->data_ptr;
mapping = tpa_info->mapping;
prod_rx_buf->mapping = mapping;
- prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
@@ -1343,12 +1415,13 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
RX_TPA_START_CMP_LEN_SHIFT;
if (likely(TPA_START_HASH_VALID(tpa_start))) {
- u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
-
tpa_info->hash_type = PKT_HASH_TYPE_L4;
tpa_info->gso_type = SKB_GSO_TCPV4;
+ if (TPA_START_IS_IPV6(tpa_start1))
+ tpa_info->gso_type = SKB_GSO_TCPV6;
/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
- if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
+ else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP &&
+ TPA_START_HASH_TYPE(tpa_start) == 3)
tpa_info->gso_type = SKB_GSO_TCPV6;
tpa_info->rss_hash =
le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
@@ -1358,13 +1431,16 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
}
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
- tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
+ bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
+ else
+ bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
tpa_info->agg_count = 0;
rxr->rx_prod = NEXT_RX(prod);
- cons = NEXT_RX(cons);
- rxr->rx_next_cons = NEXT_RX(cons);
+ cons = RING_RX(bp, NEXT_RX(cons));
+ rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
cons_rx_buf = &rxr->rx_buf_ring[cons];
bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
@@ -1563,7 +1639,7 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
skb_shinfo(skb)->gso_size =
le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
skb_shinfo(skb)->gso_type = tpa_info->gso_type;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
else
payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
@@ -1594,6 +1670,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ struct net_device *dev = bp->dev;
u8 *data_ptr, agg_bufs;
unsigned int len;
struct bnxt_tpa_info *tpa_info;
@@ -1611,7 +1688,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
agg_id = TPA_END_AGG_ID_P5(tpa_end);
agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
@@ -1658,7 +1735,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
} else {
@@ -1668,7 +1745,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
@@ -1684,7 +1761,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) {
skb_free_frag(data);
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@@ -1695,19 +1772,20 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
}
- skb->protocol =
- eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
+ if (tpa_info->cfa_code_valid)
+ dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
+ skb->protocol = eth_type_trans(skb, dev);
if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
- if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
- (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
+ if (tpa_info->vlan_valid &&
+ (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
__be16 vlan_proto = htons(tpa_info->metadata >>
RX_CMP_FLAGS2_METADATA_TPID_SFT);
u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
@@ -1774,6 +1852,64 @@ ts_valid:
return true;
}
+static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
+ struct rx_cmp *rxcmp,
+ struct rx_cmp_ext *rxcmp1)
+{
+ __be16 vlan_proto;
+ u16 vtag;
+
+ if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+ __le32 flags2 = rxcmp1->rx_cmp_flags2;
+ u32 meta_data;
+
+ if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
+ return skb;
+
+ meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
+ vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
+ if (eth_type_vlan(vlan_proto))
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ else
+ goto vlan_err;
+ } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
+ if (RX_CMP_VLAN_VALID(rxcmp)) {
+ u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
+
+ if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
+ vlan_proto = htons(ETH_P_8021Q);
+ else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
+ vlan_proto = htons(ETH_P_8021AD);
+ else
+ goto vlan_err;
+ vtag = RX_CMP_METADATA0_TCI(rxcmp1);
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ }
+ }
+ return skb;
+vlan_err:
+ dev_kfree_skb(skb);
+ return NULL;
+}
+
+static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
+ struct rx_cmp *rxcmp)
+{
+ u8 ext_op;
+
+ ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
+ switch (ext_op) {
+ case EXT_OP_INNER_4:
+ case EXT_OP_OUTER_4:
+ case EXT_OP_INNFL_3:
+ case EXT_OP_OUTFL_3:
+ return PKT_HASH_TYPE_L4;
+ default:
+ return PKT_HASH_TYPE_L3;
+ }
+}
+
/* returns the following:
* 1 - 1 packet successfully received
* 0 - successful TPA_START, packet not completed yet
@@ -1790,7 +1926,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct rx_cmp *rxcmp;
struct rx_cmp_ext *rxcmp1;
u32 tmp_raw_cons = *raw_cons;
- u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
@@ -1827,8 +1963,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
dma_rmb();
prod = rxr->rx_prod;
- if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
- bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
+ cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
+ bnxt_tpa_start(bp, rxr, cmp_type,
+ (struct rx_tpa_start_cmp *)rxcmp,
(struct rx_tpa_start_cmp_ext *)rxcmp1);
*event |= BNXT_RX_EVENT;
@@ -1893,7 +2031,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
!(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
netdev_warn_once(bp->dev, "RX buffer error %x\n",
rx_err);
@@ -1913,11 +2051,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
cp_cons, agg_bufs,
false);
- if (!frag_len) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!frag_len)
+ goto oom_next_rx;
}
xdp_active = true;
}
@@ -1940,9 +2075,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else
bnxt_xdp_buff_frags_free(rxr, &xdp);
}
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
} else {
u32 payload;
@@ -1953,60 +2086,52 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
payload = 0;
skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
payload | len);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
}
if (agg_bufs) {
if (!xdp_active) {
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
} else {
skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
}
}
if (RX_CMP_HASH_VALID(rxcmp)) {
- u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
- enum pkt_hash_types type = PKT_HASH_TYPE_L4;
+ enum pkt_hash_types type;
- /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
- if (hash_type != 1 && hash_type != 3)
- type = PKT_HASH_TYPE_L3;
+ if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
+ type = bnxt_rss_ext_op(bp, rxcmp);
+ } else {
+ u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
+
+ /* RSS profiles 1 and 3 with extract code 0 for inner
+ * 4-tuple
+ */
+ if (hash_type != 1 && hash_type != 3)
+ type = PKT_HASH_TYPE_L3;
+ else
+ type = PKT_HASH_TYPE_L4;
+ }
skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
}
- cfa_code = RX_CMP_CFA_CODE(rxcmp1);
- skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
-
- if ((rxcmp1->rx_cmp_flags2 &
- cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
- (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
- u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
- u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
- __be16 vlan_proto = htons(meta_data >>
- RX_CMP_FLAGS2_METADATA_TPID_SFT);
+ if (cmp_type == CMP_TYPE_RX_L2_CMP)
+ dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
+ skb->protocol = eth_type_trans(skb, dev);
- if (eth_type_vlan(vlan_proto)) {
- __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
- } else {
- dev_kfree_skb(skb);
+ if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
+ skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
+ if (!skb)
goto next_rx;
- }
}
skb_checksum_none_assert(skb);
@@ -2023,7 +2148,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u64 ns, ts;
if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
@@ -2047,12 +2172,17 @@ next_rx:
next_rx_no_len:
rxr->rx_prod = NEXT_RX(prod);
- rxr->rx_next_cons = NEXT_RX(cons);
+ rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
return rc;
+
+oom_next_rx:
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
}
/* In netpoll mode, if we are using a combined completion ring, we need to
@@ -2086,7 +2216,8 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
*/
dma_rmb();
cmp_type = RX_CMP_TYPE(rxcmp);
- if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+ if (cmp_type == CMP_TYPE_RX_L2_CMP ||
+ cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
@@ -2098,7 +2229,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
}
rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
if (rc && rc != -EBUSY)
- cpr->sw_stats.rx.rx_netpoll_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1;
return rc;
}
@@ -2146,6 +2277,10 @@ static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
+ return link_info->force_link_speed2;
if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
return link_info->force_pam4_link_speed;
return link_info->force_link_speed;
@@ -2153,6 +2288,28 @@ static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ link_info->req_link_speed = link_info->force_link_speed2;
+ link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
+ switch (link_info->req_link_speed) {
+ case BNXT_LINK_SPEED_50GB_PAM4:
+ case BNXT_LINK_SPEED_100GB_PAM4:
+ case BNXT_LINK_SPEED_200GB_PAM4:
+ case BNXT_LINK_SPEED_400GB_PAM4:
+ link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
+ break;
+ case BNXT_LINK_SPEED_100GB_PAM4_112:
+ case BNXT_LINK_SPEED_200GB_PAM4_112:
+ case BNXT_LINK_SPEED_400GB_PAM4_112:
+ link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
+ break;
+ default:
+ link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
+ }
+ return;
+ }
link_info->req_link_speed = link_info->force_link_speed;
link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
if (link_info->force_pam4_link_speed) {
@@ -2163,12 +2320,25 @@ static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ link_info->advertising = link_info->auto_link_speeds2;
+ return;
+ }
link_info->advertising = link_info->auto_link_speeds;
link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
}
static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ if (link_info->req_link_speed != link_info->force_link_speed2)
+ return true;
+ return false;
+ }
if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
link_info->req_link_speed != link_info->force_link_speed)
return true;
@@ -2180,6 +2350,13 @@ static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ if (link_info->advertising != link_info->auto_link_speeds2)
+ return true;
+ return false;
+ }
if (link_info->advertising != link_info->auto_link_speeds ||
link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
return true;
@@ -2430,7 +2607,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr;
u16 grp_idx;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
goto async_event_process_exit;
netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
@@ -2603,7 +2780,6 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct bnxt_napi *bnapi = cpr->bnapi;
u32 raw_cons = cpr->cp_raw_cons;
u32 cons;
- int tx_pkts = 0;
int rx_pkts = 0;
u8 event = 0;
struct tx_cmp *txcmp;
@@ -2611,6 +2787,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cpr->has_more_work = 0;
cpr->had_work_done = 1;
while (1) {
+ u8 cmp_type;
int rc;
cons = RING_CMP(raw_cons);
@@ -2623,17 +2800,31 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
* reading any further.
*/
dma_rmb();
- if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
- tx_pkts++;
+ cmp_type = TX_CMP_TYPE(txcmp);
+ if (cmp_type == CMP_TYPE_TX_L2_CMP ||
+ cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
+ u32 opaque = txcmp->tx_cmp_opaque;
+ struct bnxt_tx_ring_info *txr;
+ u16 tx_freed;
+
+ txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
+ event |= BNXT_TX_CMP_EVENT;
+ if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
+ txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
+ else
+ txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
+ tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
+ bp->tx_ring_mask;
/* return full budget so NAPI will complete. */
- if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
+ if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
rx_pkts = budget;
raw_cons = NEXT_RAW_CMP(raw_cons);
if (budget)
cpr->has_more_work = 1;
break;
}
- } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+ } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
+ cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
if (likely(budget))
rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
else
@@ -2650,12 +2841,9 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
break;
- } else if (unlikely((TX_CMP_TYPE(txcmp) ==
- CMPL_BASE_TYPE_HWRM_DONE) ||
- (TX_CMP_TYPE(txcmp) ==
- CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
- (TX_CMP_TYPE(txcmp) ==
- CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
+ } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
+ cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
+ cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
bnxt_hwrm_handler(bp, txcmp);
}
raw_cons = NEXT_RAW_CMP(raw_cons);
@@ -2670,7 +2858,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
xdp_do_flush();
if (event & BNXT_TX_EVENT) {
- struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+ struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
u16 prod = txr->tx_prod;
/* Sync BD data before updating doorbell */
@@ -2680,7 +2868,6 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
cpr->cp_raw_cons = raw_cons;
- bnapi->tx_pkts += tx_pkts;
bnapi->events |= event;
return rx_pkts;
}
@@ -2688,7 +2875,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
int budget)
{
- if (bnapi->tx_pkts && !bnapi->tx_fault)
+ if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
bnapi->tx_int(bp, bnapi, budget);
if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
@@ -2701,7 +2888,7 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
}
- bnapi->events = 0;
+ bnapi->events &= BNXT_TX_CMP_EVENT;
}
static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
@@ -2841,10 +3028,10 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int i, work_done = 0;
- for (i = 0; i < 2; i++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
+ for (i = 0; i < cpr->cp_ring_count; i++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
- if (cpr2) {
+ if (cpr2->had_nqe_notify) {
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
cpr->has_more_work |= cpr2->has_more_work;
@@ -2859,14 +3046,22 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int i;
- for (i = 0; i < 2; i++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
+ for (i = 0; i < cpr->cp_ring_count; i++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
struct bnxt_db_info *db;
- if (cpr2 && cpr2->had_work_done) {
+ if (cpr2->had_work_done) {
+ u32 tgl = 0;
+
+ if (dbr_type == DBR_TYPE_CQ_ARMALL) {
+ cpr2->had_nqe_notify = 0;
+ tgl = cpr2->toggle;
+ }
db = &cpr2->cp_db;
- bnxt_writeq(bp, db->db_key64 | dbr_type |
- RING_CMP(cpr2->cp_raw_cons), db->doorbell);
+ bnxt_writeq(bp,
+ db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
+ DB_RING_IDX(db, cpr2->cp_raw_cons),
+ db->doorbell);
cpr2->had_work_done = 0;
}
}
@@ -2893,6 +3088,8 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
work_done = __bnxt_poll_cqs(bp, bnapi, budget);
}
while (1) {
+ u16 type;
+
cons = RING_CMP(raw_cons);
nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
@@ -2914,15 +3111,21 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
*/
dma_rmb();
- if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
+ type = le16_to_cpu(nqcmp->type);
+ if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
+ u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
struct bnxt_cp_ring_info *cpr2;
/* No more budget for RX work */
- if (budget && work_done >= budget && idx == BNXT_RX_HDL)
+ if (budget && work_done >= budget &&
+ cq_type == BNXT_NQ_HDL_TYPE_RX)
break;
- cpr2 = cpr->cp_ring_arr[idx];
+ idx = BNXT_NQ_HDL_IDX(idx);
+ cpr2 = &cpr->cp_ring_arr[idx];
+ cpr2->had_nqe_notify = 1;
+ cpr2->toggle = NQE_CN_TOGGLE(type);
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
cpr->has_more_work |= cpr2->has_more_work;
@@ -2937,8 +3140,9 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
}
poll_done:
- cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL];
- if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
+ cpr_rx = &cpr->cp_ring_arr[0];
+ if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
+ (bp->flags & BNXT_FLAG_DIM)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
@@ -3112,20 +3316,20 @@ static void bnxt_free_skbs(struct bnxt *bp)
bnxt_free_rx_skbs(bp);
}
-static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
+static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
{
- u8 init_val = mem_init->init_val;
- u16 offset = mem_init->offset;
+ u8 init_val = ctxm->init_value;
+ u16 offset = ctxm->init_offset;
u8 *p2 = p;
int i;
if (!init_val)
return;
- if (offset == BNXT_MEM_INVALID_OFFSET) {
+ if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
memset(p, init_val, len);
return;
}
- for (i = 0; i < len; i += mem_init->size)
+ for (i = 0; i < len; i += ctxm->entry_size)
*(p2 + i + offset) = init_val;
}
@@ -3192,8 +3396,8 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i])
return -ENOMEM;
- if (rmem->mem_init)
- bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
+ if (rmem->ctx_mem)
+ bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
rmem->page_size);
if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
@@ -3240,7 +3444,7 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
int i, j;
bp->max_tpa = MAX_TPA;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (!bp->max_tpa_v2)
return 0;
bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
@@ -3255,7 +3459,7 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
if (!rxr->rx_tpa)
return -ENOMEM;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
for (j = 0; j < bp->max_tpa; j++) {
agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
@@ -3313,6 +3517,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.pool_size += bp->rx_ring_size;
pp.nid = dev_to_node(&bp->pdev->dev);
pp.napi = &rxr->bnapi->napi;
+ pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
pp.dma_dir = bp->rx_dir;
pp.max_len = PAGE_SIZE;
@@ -3410,6 +3615,15 @@ static void bnxt_free_tx_rings(struct bnxt *bp)
}
}
+#define BNXT_TC_TO_RING_BASE(bp, tc) \
+ ((tc) * (bp)->tx_nr_rings_per_tc)
+
+#define BNXT_RING_TO_TC_OFF(bp, tx) \
+ ((tx) % (bp)->tx_nr_rings_per_tc)
+
+#define BNXT_RING_TO_TC(bp, tx) \
+ ((tx) / (bp)->tx_nr_rings_per_tc)
+
static int bnxt_alloc_tx_rings(struct bnxt *bp)
{
int i, j, rc;
@@ -3465,7 +3679,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
spin_lock_init(&txr->xdp_tx_lock);
if (i < bp->tx_nr_rings_xdp)
continue;
- if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
+ if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
j++;
}
return 0;
@@ -3548,36 +3762,33 @@ static void bnxt_free_cp_rings(struct bnxt *bp)
bnxt_free_ring(bp, &ring->ring_mem);
- for (j = 0; j < 2; j++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+ if (!cpr->cp_ring_arr)
+ continue;
- if (cpr2) {
- ring = &cpr2->cp_ring_struct;
- bnxt_free_ring(bp, &ring->ring_mem);
- bnxt_free_cp_arrays(cpr2);
- kfree(cpr2);
- cpr->cp_ring_arr[j] = NULL;
- }
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
+
+ ring = &cpr2->cp_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+ bnxt_free_cp_arrays(cpr2);
}
+ kfree(cpr->cp_ring_arr);
+ cpr->cp_ring_arr = NULL;
+ cpr->cp_ring_count = 0;
}
}
-static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
+static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
{
struct bnxt_ring_mem_info *rmem;
struct bnxt_ring_struct *ring;
- struct bnxt_cp_ring_info *cpr;
int rc;
- cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
- if (!cpr)
- return NULL;
-
rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
if (rc) {
bnxt_free_cp_arrays(cpr);
- kfree(cpr);
- return NULL;
+ return -ENOMEM;
}
ring = &cpr->cp_ring_struct;
rmem = &ring->ring_mem;
@@ -3590,23 +3801,26 @@ static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
if (rc) {
bnxt_free_ring(bp, rmem);
bnxt_free_cp_arrays(cpr);
- kfree(cpr);
- cpr = NULL;
}
- return cpr;
+ return rc;
}
static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
- int i, rc, ulp_base_vec, ulp_msix;
+ int i, j, rc, ulp_base_vec, ulp_msix;
+ int tcs = bp->num_tc;
+ if (!tcs)
+ tcs = 1;
ulp_msix = bnxt_get_ulp_msix_num(bp);
ulp_base_vec = bnxt_get_ulp_msix_base(bp);
- for (i = 0; i < bp->cp_nr_rings; i++) {
+ for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_cp_ring_info *cpr;
+ struct bnxt_cp_ring_info *cpr, *cpr2;
struct bnxt_ring_struct *ring;
+ int cp_count = 0, k;
+ int rx = 0, tx = 0;
if (!bnapi)
continue;
@@ -3624,35 +3838,55 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
else
ring->map_idx = i;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
if (i < bp->rx_nr_rings) {
- struct bnxt_cp_ring_info *cpr2 =
- bnxt_alloc_cp_sub_ring(bp);
-
- cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
- if (!cpr2)
- return -ENOMEM;
- cpr2->bnapi = bnapi;
+ cp_count++;
+ rx = 1;
+ }
+ if (i < bp->tx_nr_rings_xdp) {
+ cp_count++;
+ tx = 1;
+ } else if ((sh && i < bp->tx_nr_rings) ||
+ (!sh && i >= bp->rx_nr_rings)) {
+ cp_count += tcs;
+ tx = 1;
}
- if ((sh && i < bp->tx_nr_rings) ||
- (!sh && i >= bp->rx_nr_rings)) {
- struct bnxt_cp_ring_info *cpr2 =
- bnxt_alloc_cp_sub_ring(bp);
- cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
- if (!cpr2)
- return -ENOMEM;
+ cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
+ GFP_KERNEL);
+ if (!cpr->cp_ring_arr)
+ return -ENOMEM;
+ cpr->cp_ring_count = cp_count;
+
+ for (k = 0; k < cp_count; k++) {
+ cpr2 = &cpr->cp_ring_arr[k];
+ rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
+ if (rc)
+ return rc;
cpr2->bnapi = bnapi;
+ cpr2->cp_idx = k;
+ if (!k && rx) {
+ bp->rx_ring[i].rx_cpr = cpr2;
+ cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
+ } else {
+ int n, tc = k - rx;
+
+ n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
+ bp->tx_ring[n].tx_cpr = cpr2;
+ cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
+ }
}
+ if (tx)
+ j++;
}
return 0;
}
static void bnxt_init_ring_struct(struct bnxt *bp)
{
- int i;
+ int i, j;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -3697,18 +3931,16 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
rmem->vmem = (void **)&rxr->rx_agg_ring;
skip_rx:
- txr = bnapi->tx_ring;
- if (!txr)
- continue;
-
- ring = &txr->tx_ring_struct;
- rmem = &ring->ring_mem;
- rmem->nr_pages = bp->tx_nr_pages;
- rmem->page_size = HW_RXBD_RING_SIZE;
- rmem->pg_arr = (void **)txr->tx_desc_ring;
- rmem->dma_arr = txr->tx_desc_mapping;
- rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
- rmem->vmem = (void **)&txr->tx_buf_ring;
+ bnxt_for_each_napi_tx(j, bnapi, txr) {
+ ring = &txr->tx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->tx_nr_pages;
+ rmem->page_size = HW_TXBD_RING_SIZE;
+ rmem->pg_arr = (void **)txr->tx_desc_ring;
+ rmem->dma_arr = txr->tx_desc_mapping;
+ rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
+ rmem->vmem = (void **)&txr->tx_buf_ring;
+ }
}
}
@@ -3799,6 +4031,9 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
ring = &rxr->rx_ring_struct;
bnxt_init_rxbd_pages(ring, type);
+ netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
+ &rxr->bnapi->napi);
+
if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
bpf_prog_add(bp->xdp_prog, 1);
rxr->xdp_prog = bp->xdp_prog;
@@ -3829,11 +4064,10 @@ static void bnxt_init_cp_rings(struct bnxt *bp)
ring->fw_ring_id = INVALID_HW_RING_ID;
cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
- for (j = 0; j < 2; j++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
-
- if (!cpr2)
- continue;
+ if (!cpr->cp_ring_arr)
+ continue;
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
ring = &cpr2->cp_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
@@ -3876,6 +4110,11 @@ static int bnxt_init_tx_rings(struct bnxt *bp)
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ if (i >= bp->tx_nr_rings_xdp)
+ netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
+ NETDEV_QUEUE_TYPE_TX,
+ &txr->bnapi->napi);
}
return 0;
@@ -3921,7 +4160,7 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
int num_vnics = 1;
#ifdef CONFIG_RFS_ACCEL
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
+ if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS)
num_vnics += bp->rx_nr_rings;
#endif
@@ -3952,13 +4191,22 @@ static void bnxt_init_vnics(struct bnxt *bp)
vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
if (bp->vnic_info[i].rss_hash_key) {
- if (i == 0)
+ if (!i) {
+ u8 *key = (void *)vnic->rss_hash_key;
+ int k;
+
+ bp->toeplitz_prefix = 0;
get_random_bytes(vnic->rss_hash_key,
HW_HASH_KEY_SIZE);
- else
+ for (k = 0; k < 8; k++) {
+ bp->toeplitz_prefix <<= 8;
+ bp->toeplitz_prefix |= key[k];
+ }
+ } else {
memcpy(vnic->rss_hash_key,
bp->vnic_info[0].rss_hash_key,
HW_HASH_KEY_SIZE);
+ }
}
}
}
@@ -4194,7 +4442,7 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
}
}
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
goto vnic_skip_grps;
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
@@ -4208,13 +4456,13 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
goto out;
}
vnic_skip_grps:
- if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
+ if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
!(vnic->flags & BNXT_VNIC_RSS_FLAG))
continue;
/* Allocate rss table and hash key */
size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
@@ -4324,7 +4572,7 @@ static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
- !(bp->flags & BNXT_FLAG_CHIP_P5))
+ !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return -EOPNOTSUPP;
rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
@@ -4362,7 +4610,7 @@ static void bnxt_init_stats(struct bnxt *bp)
stats = &cpr->stats;
rc = bnxt_hwrm_func_qstat_ext(bp, stats);
if (rc) {
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
mask = (1ULL << 48) - 1;
else
mask = -1ULL;
@@ -4508,7 +4756,7 @@ alloc_tx_ext_stats:
static void bnxt_clear_ring_indices(struct bnxt *bp)
{
- int i;
+ int i, j;
if (!bp->bnapi)
return;
@@ -4525,10 +4773,10 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
cpr = &bnapi->cp_ring;
cpr->cp_raw_cons = 0;
- txr = bnapi->tx_ring;
- if (txr) {
+ bnxt_for_each_napi_tx(j, bnapi, txr) {
txr->tx_prod = 0;
txr->tx_cons = 0;
+ txr->tx_hw_cons = 0;
}
rxr = bnapi->rx_ring;
@@ -4538,12 +4786,12 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
rxr->rx_sw_agg_prod = 0;
rxr->rx_next_cons = 0;
}
+ bnapi->events = 0;
}
}
-static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
+static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
-#ifdef CONFIG_RFS_ACCEL
int i;
/* Under rtnl_lock and all our NAPIs have been disabled. It's
@@ -4555,40 +4803,73 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
struct bnxt_ntuple_filter *fltr;
head = &bp->ntp_fltr_hash_tbl[i];
- hlist_for_each_entry_safe(fltr, tmp, head, hash) {
- hlist_del(&fltr->hash);
+ hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
+ bnxt_del_l2_filter(bp, fltr->l2_fltr);
+ if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
+ continue;
+ hlist_del(&fltr->base.hash);
+ clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
+ bp->ntp_fltr_count--;
kfree(fltr);
}
}
- if (irq_reinit) {
- bitmap_free(bp->ntp_fltr_bmap);
- bp->ntp_fltr_bmap = NULL;
- }
+ if (!all)
+ return;
+
+ bitmap_free(bp->ntp_fltr_bmap);
+ bp->ntp_fltr_bmap = NULL;
bp->ntp_fltr_count = 0;
-#endif
}
static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
{
-#ifdef CONFIG_RFS_ACCEL
int i, rc = 0;
- if (!(bp->flags & BNXT_FLAG_RFS))
+ if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
return 0;
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
+ bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_MAX_FLTR, GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
rc = -ENOMEM;
return rc;
-#else
- return 0;
-#endif
+}
+
+static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
+{
+ int i;
+
+ for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ struct bnxt_l2_filter *fltr;
+
+ head = &bp->l2_fltr_hash_tbl[i];
+ hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
+ if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
+ continue;
+ hlist_del(&fltr->base.hash);
+ if (fltr->base.flags) {
+ clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
+ bp->ntp_fltr_count--;
+ }
+ kfree(fltr);
+ }
+ }
+}
+
+static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
+ get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
}
static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
@@ -4598,7 +4879,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_rx_rings(bp);
bnxt_free_cp_rings(bp);
bnxt_free_all_cp_arrays(bp);
- bnxt_free_ntp_fltrs(bp, irq_re_init);
+ bnxt_free_ntp_fltrs(bp, false);
+ bnxt_free_l2_filters(bp, false);
if (irq_re_init) {
bnxt_free_ring_stats(bp);
if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
@@ -4641,7 +4923,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
bp->bnapi[i] = bnapi;
bp->bnapi[i]->index = i;
bp->bnapi[i]->bp = bp;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_cp_ring_info *cpr =
&bp->bnapi[i]->cp_ring;
@@ -4659,11 +4941,13 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
rxr->rx_ring_struct.ring_mem.flags =
BNXT_RMEM_RING_PTE_FLAG;
rxr->rx_agg_ring_struct.ring_mem.flags =
BNXT_RMEM_RING_PTE_FLAG;
+ } else {
+ rxr->rx_cpr = &bp->bnapi[i]->cp_ring;
}
rxr->bnapi = bp->bnapi[i];
bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
@@ -4686,22 +4970,33 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
else
j = bp->rx_nr_rings;
- for (i = 0; i < bp->tx_nr_rings; i++, j++) {
+ for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+ struct bnxt_napi *bnapi2;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
txr->tx_ring_struct.ring_mem.flags =
BNXT_RMEM_RING_PTE_FLAG;
- txr->bnapi = bp->bnapi[j];
- bp->bnapi[j]->tx_ring = txr;
bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
if (i >= bp->tx_nr_rings_xdp) {
+ int k = j + BNXT_RING_TO_TC_OFF(bp, i);
+
+ bnapi2 = bp->bnapi[k];
txr->txq_index = i - bp->tx_nr_rings_xdp;
- bp->bnapi[j]->tx_int = bnxt_tx_int;
+ txr->tx_napi_idx =
+ BNXT_RING_TO_TC(bp, txr->txq_index);
+ bnapi2->tx_ring[txr->tx_napi_idx] = txr;
+ bnapi2->tx_int = bnxt_tx_int;
} else {
- bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
- bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
+ bnapi2 = bp->bnapi[j];
+ bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
+ bnapi2->tx_ring[0] = txr;
+ bnapi2->tx_int = bnxt_tx_int_xdp;
+ j++;
}
+ txr->bnapi = bnapi2;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ txr->tx_cpr = &bnapi2->cp_ring;
}
rc = bnxt_alloc_stats(bp);
@@ -4913,6 +5208,8 @@ int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
return hwrm_req_send(bp, req);
}
+static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
+
static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
{
struct hwrm_tunnel_dst_port_free_input *req;
@@ -4942,6 +5239,11 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
bp->nge_port = 0;
bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
break;
+ case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
+ req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
+ bp->vxlan_gpe_port = 0;
+ bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
+ break;
default:
break;
}
@@ -4950,6 +5252,8 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
if (rc)
netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
rc);
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, true);
return rc;
}
@@ -4985,9 +5289,16 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
bp->nge_port = port;
bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
break;
+ case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
+ bp->vxlan_gpe_port = port;
+ bp->vxlan_gpe_fw_dst_port_id =
+ le16_to_cpu(resp->tunnel_dst_port_id);
+ break;
default:
break;
}
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, true);
err_out:
hwrm_req_drop(bp, req);
@@ -5013,25 +5324,301 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
return hwrm_req_send_silent(bp, req);
}
+void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
+{
+ if (!atomic_dec_and_test(&fltr->refcnt))
+ return;
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return;
+ }
+ hlist_del_rcu(&fltr->base.hash);
+ if (fltr->base.flags) {
+ clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
+ bp->ntp_fltr_count--;
+ }
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ kfree_rcu(fltr, base.rcu);
+}
+
+static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u32 idx)
+{
+ struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
+ struct bnxt_l2_filter *fltr;
+
+ hlist_for_each_entry_rcu(fltr, head, base.hash) {
+ struct bnxt_l2_key *l2_key = &fltr->l2_key;
+
+ if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
+ l2_key->vlan == key->vlan)
+ return fltr;
+ }
+ return NULL;
+}
+
+static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u32 idx)
+{
+ struct bnxt_l2_filter *fltr = NULL;
+
+ rcu_read_lock();
+ fltr = __bnxt_lookup_l2_filter(bp, key, idx);
+ if (fltr)
+ atomic_inc(&fltr->refcnt);
+ rcu_read_unlock();
+ return fltr;
+}
+
+#define BNXT_IPV4_4TUPLE(bp, fkeys) \
+ (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
+ (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
+ ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
+ (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
+
+#define BNXT_IPV6_4TUPLE(bp, fkeys) \
+ (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
+ (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
+ ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
+ (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
+
+static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
+{
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+ if (BNXT_IPV4_4TUPLE(bp, fkeys))
+ return sizeof(fkeys->addrs.v4addrs) +
+ sizeof(fkeys->ports);
+
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
+ return sizeof(fkeys->addrs.v4addrs);
+ }
+
+ if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
+ if (BNXT_IPV6_4TUPLE(bp, fkeys))
+ return sizeof(fkeys->addrs.v6addrs) +
+ sizeof(fkeys->ports);
+
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
+ return sizeof(fkeys->addrs.v6addrs);
+ }
+
+ return 0;
+}
+
+static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
+ const unsigned char *key)
+{
+ u64 prefix = bp->toeplitz_prefix, hash = 0;
+ struct bnxt_ipv4_tuple tuple4;
+ struct bnxt_ipv6_tuple tuple6;
+ int i, j, len = 0;
+ u8 *four_tuple;
+
+ len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
+ if (!len)
+ return 0;
+
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+ tuple4.v4addrs = fkeys->addrs.v4addrs;
+ tuple4.ports = fkeys->ports;
+ four_tuple = (unsigned char *)&tuple4;
+ } else {
+ tuple6.v6addrs = fkeys->addrs.v6addrs;
+ tuple6.ports = fkeys->ports;
+ four_tuple = (unsigned char *)&tuple6;
+ }
+
+ for (i = 0, j = 8; i < len; i++, j++) {
+ u8 byte = four_tuple[i];
+ int bit;
+
+ for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
+ if (byte & 0x80)
+ hash ^= prefix;
+ }
+ prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
+ }
+
+ /* The valid part of the hash is in the upper 32 bits. */
+ return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
+}
+
#ifdef CONFIG_RFS_ACCEL
-static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
- struct bnxt_ntuple_filter *fltr)
+static struct bnxt_l2_filter *
+bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
+{
+ struct bnxt_l2_filter *fltr;
+ u32 idx;
+
+ idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
+ BNXT_L2_FLTR_HASH_MASK;
+ fltr = bnxt_lookup_l2_filter(bp, key, idx);
+ return fltr;
+}
+#endif
+
+static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
+ struct bnxt_l2_key *key, u32 idx)
+{
+ struct hlist_head *head;
+
+ ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
+ fltr->l2_key.vlan = key->vlan;
+ fltr->base.type = BNXT_FLTR_TYPE_L2;
+ if (fltr->base.flags) {
+ int bit_id;
+
+ bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
+ BNXT_MAX_FLTR, 0);
+ if (bit_id < 0)
+ return -ENOMEM;
+ fltr->base.sw_id = (u16)bit_id;
+ }
+ head = &bp->l2_fltr_hash_tbl[idx];
+ hlist_add_head_rcu(&fltr->base.hash, head);
+ set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
+ atomic_set(&fltr->refcnt, 1);
+ return 0;
+}
+
+static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ gfp_t gfp)
+{
+ struct bnxt_l2_filter *fltr;
+ u32 idx;
+ int rc;
+
+ idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
+ BNXT_L2_FLTR_HASH_MASK;
+ fltr = bnxt_lookup_l2_filter(bp, key, idx);
+ if (fltr)
+ return fltr;
+
+ fltr = kzalloc(sizeof(*fltr), gfp);
+ if (!fltr)
+ return ERR_PTR(-ENOMEM);
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ rc = bnxt_init_l2_filter(bp, fltr, key, idx);
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ if (rc) {
+ bnxt_del_l2_filter(bp, fltr);
+ fltr = ERR_PTR(rc);
+ }
+ return fltr;
+}
+
+static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
+{
+#ifdef CONFIG_BNXT_SRIOV
+ struct bnxt_vf_info *vf = &pf->vf[vf_idx];
+
+ return vf->fw_fid;
+#else
+ return INVALID_HW_RING_ID;
+#endif
+}
+
+int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_free_input *req;
+ u16 target_id = 0xffff;
+ int rc;
+
+ if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ if (fltr->base.vf_idx >= pf->active_vfs)
+ return -EINVAL;
+
+ target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
+ if (target_id == INVALID_HW_RING_ID)
+ return -EINVAL;
+ }
+
+ rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
+ if (rc)
+ return rc;
+
+ req->target_id = cpu_to_le16(target_id);
+ req->l2_filter_id = fltr->base.filter_id;
+ return hwrm_req_send(bp, req);
+}
+
+int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_alloc_output *resp;
+ struct hwrm_cfa_l2_filter_alloc_input *req;
+ u16 target_id = 0xffff;
+ int rc;
+
+ if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ if (fltr->base.vf_idx >= pf->active_vfs)
+ return -EINVAL;
+
+ target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
+ }
+ rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
+ if (rc)
+ return rc;
+
+ req->target_id = cpu_to_le16(target_id);
+ req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
+
+ if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ req->flags |=
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+ req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
+ req->enables =
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+ ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
+ eth_broadcast_addr(req->l2_addr_mask);
+
+ if (fltr->l2_key.vlan) {
+ req->enables |=
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
+ req->num_vlans = 1;
+ req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
+ req->l2_ivlan_mask = cpu_to_le16(0xfff);
+ }
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ fltr->base.filter_id = resp->l2_filter_id;
+ set_bit(BNXT_FLTR_VALID, &fltr->base.state);
+ }
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr)
{
struct hwrm_cfa_ntuple_filter_free_input *req;
int rc;
+ set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
if (rc)
return rc;
- req->ntuple_filter_id = fltr->filter_id;
+ req->ntuple_filter_id = fltr->base.filter_id;
return hwrm_req_send(bp, req);
}
#define BNXT_NTP_FLTR_FLAGS \
(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
- CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
@@ -5047,12 +5634,21 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
#define BNXT_NTP_TUNNEL_FLTR_FLAG \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
-static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
- struct bnxt_ntuple_filter *fltr)
+void bnxt_fill_ipv6_mask(__be32 mask[4])
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ mask[i] = cpu_to_be32(~0);
+}
+
+int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr)
{
struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct hwrm_cfa_ntuple_filter_alloc_input *req;
struct flow_keys *keys = &fltr->fkeys;
+ struct bnxt_l2_filter *l2_fltr;
struct bnxt_vnic_info *vnic;
u32 flags = 0;
int rc;
@@ -5061,42 +5657,47 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
if (rc)
return rc;
- req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
+ l2_fltr = fltr->l2_fltr;
+ req->l2_filter_id = l2_fltr->base.filter_id;
+
if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
- req->dst_id = cpu_to_le16(fltr->rxq);
+ req->dst_id = cpu_to_le16(fltr->base.rxq);
} else {
- vnic = &bp->vnic_info[fltr->rxq + 1];
+ vnic = &bp->vnic_info[fltr->base.rxq + 1];
req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
}
req->flags = cpu_to_le32(flags);
req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
req->ethertype = htons(ETH_P_IP);
- memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
req->ip_protocol = keys->basic.ip_proto;
if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
- int i;
-
req->ethertype = htons(ETH_P_IPV6);
req->ip_addr_type =
CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
- *(struct in6_addr *)&req->src_ipaddr[0] =
- keys->addrs.v6addrs.src;
- *(struct in6_addr *)&req->dst_ipaddr[0] =
- keys->addrs.v6addrs.dst;
- for (i = 0; i < 4; i++) {
- req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
- req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
+ *(struct in6_addr *)&req->src_ipaddr[0] =
+ keys->addrs.v6addrs.src;
+ bnxt_fill_ipv6_mask(req->src_ipaddr_mask);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
+ *(struct in6_addr *)&req->dst_ipaddr[0] =
+ keys->addrs.v6addrs.dst;
+ bnxt_fill_ipv6_mask(req->dst_ipaddr_mask);
}
} else {
- req->src_ipaddr[0] = keys->addrs.v4addrs.src;
- req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
- req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
+ req->src_ipaddr[0] = keys->addrs.v4addrs.src;
+ req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
+ req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+ req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ }
}
if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
@@ -5104,80 +5705,85 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
}
- req->src_port = keys->ports.src;
- req->src_port_mask = cpu_to_be16(0xffff);
- req->dst_port = keys->ports.dst;
- req->dst_port_mask = cpu_to_be16(0xffff);
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+ req->src_port = keys->ports.src;
+ req->src_port_mask = cpu_to_be16(0xffff);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ req->dst_port = keys->ports.dst;
+ req->dst_port_mask = cpu_to_be16(0xffff);
+ }
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
- fltr->filter_id = resp->ntuple_filter_id;
+ fltr->base.filter_id = resp->ntuple_filter_id;
hwrm_req_drop(bp, req);
return rc;
}
-#endif
static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
const u8 *mac_addr)
{
- struct hwrm_cfa_l2_filter_alloc_output *resp;
- struct hwrm_cfa_l2_filter_alloc_input *req;
+ struct bnxt_l2_filter *fltr;
+ struct bnxt_l2_key key;
int rc;
- rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
- if (rc)
- return rc;
+ ether_addr_copy(key.dst_mac_addr, mac_addr);
+ key.vlan = 0;
+ fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
+ if (IS_ERR(fltr))
+ return PTR_ERR(fltr);
- req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
- if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
- req->flags |=
- cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
- req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
- req->enables =
- cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
- CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
- CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
- memcpy(req->l2_addr, mac_addr, ETH_ALEN);
- req->l2_addr_mask[0] = 0xff;
- req->l2_addr_mask[1] = 0xff;
- req->l2_addr_mask[2] = 0xff;
- req->l2_addr_mask[3] = 0xff;
- req->l2_addr_mask[4] = 0xff;
- req->l2_addr_mask[5] = 0xff;
-
- resp = hwrm_req_hold(bp, req);
- rc = hwrm_req_send(bp, req);
- if (!rc)
- bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
- resp->l2_filter_id;
- hwrm_req_drop(bp, req);
+ fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
+ rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
+ if (rc)
+ bnxt_del_l2_filter(bp, fltr);
+ else
+ bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
return rc;
}
-static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
+static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
{
- struct hwrm_cfa_l2_filter_free_input *req;
u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
- int rc;
/* Any associated ntuple filters will also be cleared by firmware. */
- rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
- if (rc)
- return rc;
- hwrm_req_hold(bp, req);
for (i = 0; i < num_of_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
for (j = 0; j < vnic->uc_filter_count; j++) {
- req->l2_filter_id = vnic->fw_l2_filter_id[j];
+ struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
- rc = hwrm_req_send(bp, req);
+ bnxt_hwrm_l2_filter_free(bp, fltr);
+ bnxt_del_l2_filter(bp, fltr);
}
vnic->uc_filter_count = 0;
}
- hwrm_req_drop(bp, req);
- return rc;
+}
+
+#define BNXT_DFLT_TUNL_TPA_BMAP \
+ (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
+ VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
+ VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
+
+static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
+ struct hwrm_vnic_tpa_cfg_input *req)
+{
+ u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
+ return;
+
+ if (bp->vxlan_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
+ if (bp->vxlan_gpe_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
+ if (bp->nge_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
+
+ req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
+ req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
}
static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
@@ -5226,7 +5832,7 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
nsegs = (MAX_SKB_FRAGS - n) / n;
}
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
segs = MAX_TPA_SEGS_P5;
max_aggs = bp->max_tpa;
} else {
@@ -5236,6 +5842,7 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
req->max_aggs = cpu_to_le16(max_aggs);
req->min_agg_len = cpu_to_le32(512);
+ bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
}
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -5252,35 +5859,25 @@ static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- struct bnxt_napi *bnapi = rxr->bnapi;
- struct bnxt_cp_ring_info *cpr;
-
- cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
- return cpr->cp_ring_struct.fw_ring_id;
- } else {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
+ else
return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
- }
}
static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- struct bnxt_napi *bnapi = txr->bnapi;
- struct bnxt_cp_ring_info *cpr;
-
- cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
- return cpr->cp_ring_struct.fw_ring_id;
- } else {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ return txr->tx_cpr->cp_ring_struct.fw_ring_id;
+ else
return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
- }
}
static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
{
int entries;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
else
entries = HW_HASH_INDEX_SIZE;
@@ -5330,8 +5927,12 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ if (!rx_rings)
+ return 0;
+ return bnxt_calc_nr_ring_pages(rx_rings - 1,
+ BNXT_RSS_TABLE_ENTRIES_P5);
+ }
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
return 2;
return 1;
@@ -5376,7 +5977,7 @@ static void
__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
struct bnxt_vnic_info *vnic)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
bnxt_fill_hw_rss_tbl_p5(bp, vnic);
else
bnxt_fill_hw_rss_tbl(bp, vnic);
@@ -5401,7 +6002,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
struct hwrm_vnic_rss_cfg_input *req;
int rc;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
return 0;
@@ -5566,7 +6167,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
if (rc)
return rc;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
req->default_rx_ring_id =
@@ -5666,7 +6267,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
if (rc)
return rc;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
goto vnic_no_ring_grps;
/* map ring groups to this vnic */
@@ -5701,7 +6302,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
int rc;
bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
- bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
+ bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
+ bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
if (bp->hwrm_spec_code < 0x10600)
return 0;
@@ -5714,9 +6316,9 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (!rc) {
u32 flags = le32_to_cpu(resp->flags);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
- bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
+ bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
if (flags &
VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
@@ -5725,18 +6327,22 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
* VLAN_STRIP_CAP properly.
*/
if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
- (BNXT_CHIP_P5_THOR(bp) &&
+ (BNXT_CHIP_P5(bp) &&
!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
- bp->fw_cap |= BNXT_FW_CAP_RSS_HASH_TYPE_DELTA;
+ bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
+ bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
if (bp->max_tpa_v2) {
- if (BNXT_CHIP_P5_THOR(bp))
+ if (BNXT_CHIP_P5(bp))
bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
else
- bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
+ bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
}
+ if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
+ bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
}
hwrm_req_drop(bp, req);
return rc;
@@ -5749,7 +6355,7 @@ static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
int rc;
u16 i;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
@@ -5782,7 +6388,7 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
struct hwrm_ring_grp_free_input *req;
u16 i;
- if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return;
if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
@@ -5842,12 +6448,15 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
req->length = cpu_to_le32(bp->tx_ring_mask + 1);
req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
req->queue_id = cpu_to_le16(ring->queue_id);
+ if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
+ req->cmpl_coal_cnt =
+ RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
break;
}
case HWRM_RING_ALLOC_RX:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
req->length = cpu_to_le32(bp->rx_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u16 flags = 0;
/* Association of rx ring with stats context */
@@ -5862,7 +6471,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
}
break;
case HWRM_RING_ALLOC_AGG:
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
/* Association of agg ring with rx ring */
grp_info = &bp->grp_info[ring->grp_idx];
@@ -5880,7 +6489,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
case HWRM_RING_ALLOC_CMPL:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
req->length = cpu_to_le32(bp->cp_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
/* Association of cp ring with nq */
grp_info = &bp->grp_info[map_index];
req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
@@ -5948,14 +6557,34 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
}
}
+static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
+ u32 ring_type)
+{
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_ring_mask = bp->tx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ db->db_ring_mask = bp->rx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ db->db_ring_mask = bp->rx_agg_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ case HWRM_RING_ALLOC_NQ:
+ db->db_ring_mask = bp->cp_ring_mask;
+ break;
+ }
+ if (bp->flags & BNXT_FLAG_CHIP_P7) {
+ db->db_epoch_mask = db->db_ring_mask + 1;
+ db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
+ }
+}
+
static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
u32 map_idx, u32 xid)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- if (BNXT_PF(bp))
- db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
- else
- db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
switch (ring_type) {
case HWRM_RING_ALLOC_TX:
db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
@@ -5972,6 +6601,11 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
break;
}
db->db_key64 |= (u64)xid << DBR_XID_SFT;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ db->db_key64 |= DBR_VALID;
+
+ db->doorbell = bp->bar1 + bp->db_offset;
} else {
db->doorbell = bp->bar1 + map_idx * 0x80;
switch (ring_type) {
@@ -5987,6 +6621,7 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
break;
}
}
+ bnxt_set_db_mask(bp, db, ring_type);
}
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
@@ -5995,7 +6630,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
int i, rc = 0;
u32 type;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = HWRM_RING_ALLOC_NQ;
else
type = HWRM_RING_ALLOC_CMPL;
@@ -6031,15 +6666,13 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
struct bnxt_ring_struct *ring;
u32 map_idx;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
struct bnxt_napi *bnapi = txr->bnapi;
- struct bnxt_cp_ring_info *cpr, *cpr2;
u32 type2 = HWRM_RING_ALLOC_CMPL;
- cpr = &bnapi->cp_ring;
- cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_TX_HDL;
+ ring->handle = BNXT_SET_NQ_HDL(cpr2);
map_idx = bnapi->index;
rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
if (rc)
@@ -6071,14 +6704,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
if (!agg_rings)
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
u32 type2 = HWRM_RING_ALLOC_CMPL;
- struct bnxt_cp_ring_info *cpr2;
- cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_RX_HDL;
+ ring->handle = BNXT_SET_NQ_HDL(cpr2);
rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
if (rc)
goto err_out;
@@ -6186,7 +6817,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = RING_FREE_REQ_RING_TYPE_RX_AGG;
else
type = RING_FREE_REQ_RING_TYPE_RX;
@@ -6213,7 +6844,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
*/
bnxt_disable_int_sync(bp);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = RING_FREE_REQ_RING_TYPE_NQ;
else
type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
@@ -6223,18 +6854,16 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_ring_struct *ring;
int j;
- for (j = 0; j < 2; j++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+ for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
- if (cpr2) {
- ring = &cpr2->cp_ring_struct;
- if (ring->fw_ring_id == INVALID_HW_RING_ID)
- continue;
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_L2_CMPL,
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
+ ring = &cpr2->cp_ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ continue;
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
}
ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
@@ -6246,6 +6875,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared);
static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
bool shared);
@@ -6282,14 +6913,16 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
cp = le16_to_cpu(resp->alloc_cmpl_rings);
stats = le16_to_cpu(resp->alloc_stat_ctx);
hw_resc->resv_irqs = cp;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
int rx = hw_resc->resv_rx_rings;
int tx = hw_resc->resv_tx_rings;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx >>= 1;
if (cp < (rx + tx)) {
- bnxt_trim_rings(bp, &rx, &tx, cp, false);
+ rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
+ if (rc)
+ goto get_rings_exit;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
hw_resc->resv_rx_rings = rx;
@@ -6301,8 +6934,9 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_cp_rings = cp;
hw_resc->resv_stat_ctxs = stats;
}
+get_rings_exit:
hwrm_req_drop(bp, req);
- return 0;
+ return rc;
}
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
@@ -6346,7 +6980,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (BNXT_NEW_RM(bp)) {
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
enables |= tx_rings + ring_grps ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
@@ -6362,16 +6996,17 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
req->num_rx_rings = cpu_to_le16(rx_rings);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
+
req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
req->num_msix = cpu_to_le16(cp_rings);
- req->num_rsscos_ctxs =
- cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
} else {
req->num_cmpl_rings = cpu_to_le16(cp_rings);
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
req->num_rsscos_ctxs = cpu_to_le16(1);
- if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
+ if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
bnxt_rfs_supported(bp))
req->num_rsscos_ctxs =
cpu_to_le16(ring_grps + 1);
@@ -6397,7 +7032,7 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
enables |= tx_rings + ring_grps ?
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
} else {
@@ -6412,9 +7047,11 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req->num_tx_rings = cpu_to_le16(tx_rings);
req->num_rx_rings = cpu_to_le16(rx_rings);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
+
req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
} else {
req->num_cmpl_rings = cpu_to_le16(cp_rings);
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -6508,7 +7145,7 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
{
int cp;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return bnxt_nq_rings_in_use(bp);
cp = bp->tx_nr_rings + bp->rx_nr_rings;
@@ -6565,7 +7202,8 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
bnxt_check_rss_tbl_no_rmgr(bp);
return false;
}
- if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
+ if ((bp->flags & BNXT_FLAG_RFS) &&
+ !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
@@ -6573,9 +7211,9 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
(hw_resc->resv_hw_ring_grps != grp &&
- !(bp->flags & BNXT_FLAG_CHIP_P5)))
+ !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
return true;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
hw_resc->resv_irqs != nq)
return true;
return false;
@@ -6590,13 +7228,15 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
int grp, rx_rings, rc;
int vnic = 1, stat;
bool sh = false;
+ int tx_cp;
if (!bnxt_need_reserve_rings(bp))
return 0;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
+ if ((bp->flags & BNXT_FLAG_RFS) &&
+ !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
@@ -6639,7 +7279,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx = rx_rings << 1;
- cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
+ tx_cp = bnxt_num_tx_to_cp(bp, tx);
+ cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
bp->tx_nr_rings = tx;
/* If we cannot reserve all the RX rings, reset the RSS map only
@@ -6686,7 +7327,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
req->flags = cpu_to_le32(flags);
@@ -6708,7 +7349,7 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
else
@@ -6902,10 +7543,40 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
return hwrm_req_send(bp, req_rx);
}
+static int
+bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
+{
+ u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
+
+ req->ring_id = cpu_to_le16(ring_id);
+ return hwrm_req_send(bp, req);
+}
+
+static int
+bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
+{
+ struct bnxt_tx_ring_info *txr;
+ int i, rc;
+
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ u16 ring_id;
+
+ ring_id = bnxt_cp_ring_for_tx(bp, txr);
+ req->ring_id = cpu_to_le16(ring_id);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ return rc;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ return 0;
+ }
+ return 0;
+}
+
int bnxt_hwrm_set_coal(struct bnxt *bp)
{
- struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
- *req;
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
int i, rc;
rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
@@ -6926,29 +7597,19 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_coal *hw_coal;
- u16 ring_id;
- req = req_rx;
- if (!bnapi->rx_ring) {
- ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
- req = req_tx;
- } else {
- ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
- }
- req->ring_id = cpu_to_le16(ring_id);
-
- rc = hwrm_req_send(bp, req);
+ if (!bnapi->rx_ring)
+ rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
+ else
+ rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
if (rc)
break;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
- if (bnapi->rx_ring && bnapi->tx_ring) {
- req = req_tx;
- ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
- req->ring_id = cpu_to_le16(ring_id);
- rc = hwrm_req_send(bp, req);
+ if (bnapi->rx_ring && bnapi->tx_ring[0]) {
+ rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
if (rc)
break;
}
@@ -7044,7 +7705,6 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
{
struct hwrm_func_qcfg_output *resp;
struct hwrm_func_qcfg_input *req;
- u32 min_db_offset = 0;
u16 flags;
int rc;
@@ -7102,16 +7762,17 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (bp->db_size)
goto func_qcfg_exit;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
+ if (BNXT_CHIP_P5(bp)) {
if (BNXT_PF(bp))
- min_db_offset = DB_PF_OFFSET_P5;
+ bp->db_offset = DB_PF_OFFSET_P5;
else
- min_db_offset = DB_VF_OFFSET_P5;
+ bp->db_offset = DB_VF_OFFSET_P5;
}
bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
1024);
if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
- bp->db_size <= min_db_offset)
+ bp->db_size <= bp->db_offset)
bp->db_size = pci_resource_len(bp->pdev, 2);
func_qcfg_exit:
@@ -7119,37 +7780,99 @@ func_qcfg_exit:
return rc;
}
-static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
- struct hwrm_func_backing_store_qcaps_output *resp)
+static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
+ u8 init_val, u8 init_offset,
+ bool init_mask_set)
{
- struct bnxt_mem_init *mem_init;
- u16 init_mask;
- u8 init_val;
- u8 *offset;
- int i;
+ ctxm->init_value = init_val;
+ ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
+ if (init_mask_set)
+ ctxm->init_offset = init_offset * 4;
+ else
+ ctxm->init_value = 0;
+}
+
+static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ u16 type;
+
+ for (type = 0; type < ctx_max; type++) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ int n = 1;
- init_val = resp->ctx_kind_initializer;
- init_mask = le16_to_cpu(resp->ctx_init_mask);
- offset = &resp->qp_init_offset;
- mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
- for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
- mem_init->init_val = init_val;
- mem_init->offset = BNXT_MEM_INVALID_OFFSET;
- if (!init_mask)
+ if (!ctxm->max_entries)
continue;
- if (i == BNXT_CTX_MEM_INIT_STAT)
- offset = &resp->stat_init_offset;
- if (init_mask & (1 << i))
- mem_init->offset = *offset * 4;
- else
- mem_init->init_val = 0;
+
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
+ if (!ctxm->pg_info)
+ return -ENOMEM;
}
- ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
+ return 0;
+}
+
+#define BNXT_CTX_INIT_VALID(flags) \
+ (!!((flags) & \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
+
+static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
+{
+ struct hwrm_func_backing_store_qcaps_v2_output *resp;
+ struct hwrm_func_backing_store_qcaps_v2_input *req;
+ struct bnxt_ctx_mem_info *ctx;
+ u16 type;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
+ if (rc)
+ return rc;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ bp->ctx = ctx;
+
+ resp = hwrm_req_hold(bp, req);
+
+ for (type = 0; type < BNXT_CTX_V2_MAX; ) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ u8 init_val, init_off, i;
+ __le32 *p;
+ u32 flags;
+
+ req->type = cpu_to_le16(type);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto ctx_done;
+ flags = le32_to_cpu(resp->flags);
+ type = le16_to_cpu(resp->next_valid_type);
+ if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
+ continue;
+
+ ctxm->type = le16_to_cpu(resp->type);
+ ctxm->entry_size = le16_to_cpu(resp->entry_size);
+ ctxm->flags = flags;
+ ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
+ ctxm->entry_multiple = resp->entry_multiple;
+ ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
+ ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
+ init_val = resp->ctx_init_value;
+ init_off = resp->ctx_init_offset;
+ bnxt_init_ctx_initializer(ctxm, init_val, init_off,
+ BNXT_CTX_INIT_VALID(flags));
+ ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
+ BNXT_MAX_SPLIT_ENTRY);
+ for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
+ i++, p++)
+ ctxm->split[i] = le32_to_cpu(*p);
+ }
+ rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
+
+ctx_done:
+ hwrm_req_drop(bp, req);
+ return rc;
}
static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
@@ -7161,6 +7884,9 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
return 0;
+ if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
+ return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
+
rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
if (rc)
return rc;
@@ -7168,48 +7894,84 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send_silent(bp, req);
if (!rc) {
- struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_type *ctxm;
struct bnxt_ctx_mem_info *ctx;
- int i, tqm_rings;
+ u8 init_val, init_idx = 0;
+ u16 init_mask;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = bp->ctx;
if (!ctx) {
- rc = -ENOMEM;
- goto ctx_err;
- }
- ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
- ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
- ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
- ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
- ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
- ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
- ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
- ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
- ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
- ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
- ctx->vnic_max_vnic_entries =
- le16_to_cpu(resp->vnic_max_vnic_entries);
- ctx->vnic_max_ring_table_entries =
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ bp->ctx = ctx;
+ }
+ init_val = resp->ctx_kind_initializer;
+ init_mask = le16_to_cpu(resp->ctx_init_mask);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
+ ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
+ ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
+ ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
+ ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
+ ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
+ ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
+ ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
+ ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
+ ctxm->max_entries = ctxm->vnic_entries +
le16_to_cpu(resp->vnic_max_ring_table_entries);
- ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
- ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
- ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
- ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
- ctx->tqm_min_entries_per_ring =
- le32_to_cpu(resp->tqm_min_entries_per_ring);
- ctx->tqm_max_entries_per_ring =
- le32_to_cpu(resp->tqm_max_entries_per_ring);
- ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
- if (!ctx->tqm_entries_multiple)
- ctx->tqm_entries_multiple = 1;
- ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
- ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
- ctx->mrav_num_entries_units =
+ ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val,
+ resp->vnic_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
+ ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val,
+ resp->stat_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
+ ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
+ ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
+ ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
+ ctxm->entry_multiple = resp->tqm_entries_multiple;
+ if (!ctxm->entry_multiple)
+ ctxm->entry_multiple = 1;
+
+ memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
+ ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
+ ctxm->mrav_num_entries_units =
le16_to_cpu(resp->mrav_num_entries_units);
- ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
- ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
+ bnxt_init_ctx_initializer(ctxm, init_val,
+ resp->mrav_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
- bnxt_init_ctx_initializer(ctx, resp);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
+ ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
+ ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count)
@@ -7217,16 +7979,11 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
- tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
- ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
- if (!ctx_pg) {
- kfree(ctx);
- rc = -ENOMEM;
- goto ctx_err;
- }
- for (i = 0; i < tqm_rings; i++, ctx_pg++)
- ctx->tqm_mem[i] = ctx_pg;
- bp->ctx = ctx;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
+ memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
+ ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
+
+ rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
} else {
rc = 0;
}
@@ -7265,6 +8022,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
struct hwrm_func_backing_store_cfg_input *req;
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_type *ctxm;
void **__req = (void **)&req;
u32 req_len = sizeof(*req);
__le32 *num_entries;
@@ -7286,82 +8044,102 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
req->enables = cpu_to_le32(enables);
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
- ctx_pg = &ctx->qp_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ ctx_pg = ctxm->pg_info;
req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
- req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
- req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
- req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
+ req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
+ req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
+ req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->qpc_pg_size_qpc_lvl,
&req->qpc_page_dir);
+
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
+ req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
- ctx_pg = &ctx->srq_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ ctx_pg = ctxm->pg_info;
req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
- req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
- req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
+ req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
+ req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->srq_pg_size_srq_lvl,
&req->srq_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
- ctx_pg = &ctx->cq_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
+ ctx_pg = ctxm->pg_info;
req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
- req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
- req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
+ req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
+ req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->cq_pg_size_cq_lvl,
&req->cq_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
- ctx_pg = &ctx->vnic_mem;
- req->vnic_num_vnic_entries =
- cpu_to_le16(ctx->vnic_max_vnic_entries);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
+ ctx_pg = ctxm->pg_info;
+ req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
req->vnic_num_ring_table_entries =
- cpu_to_le16(ctx->vnic_max_ring_table_entries);
- req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
+ cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
+ req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->vnic_pg_size_vnic_lvl,
&req->vnic_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
- ctx_pg = &ctx->stat_mem;
- req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
- req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
+ ctx_pg = ctxm->pg_info;
+ req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
+ req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->stat_pg_size_stat_lvl,
&req->stat_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
- ctx_pg = &ctx->mrav_mem;
+ u32 units;
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
+ ctx_pg = ctxm->pg_info;
req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
- if (ctx->mrav_num_entries_units)
- flags |=
- FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
- req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
+ units = ctxm->mrav_num_entries_units;
+ if (units) {
+ u32 num_mr, num_ah = ctxm->mrav_av_entries;
+ u32 entries;
+
+ num_mr = ctx_pg->entries - num_ah;
+ entries = ((num_mr / units) << 16) | (num_ah / units);
+ req->mrav_num_entries = cpu_to_le32(entries);
+ flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
+ }
+ req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->mrav_pg_size_mrav_lvl,
&req->mrav_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
- ctx_pg = &ctx->tim_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
+ ctx_pg = ctxm->pg_info;
req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
- req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
+ req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->tim_pg_size_tim_lvl,
&req->tim_page_dir);
}
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
for (i = 0, num_entries = &req->tqm_sp_num_entries,
pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req->tqm_sp_page_dir,
- ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
+ ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
+ ctx_pg = ctxm->pg_info;
i < BNXT_MAX_TQM_RINGS;
+ ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena))
continue;
- req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
- ctx_pg = ctx->tqm_mem[i];
+ req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
*num_entries = cpu_to_le32(ctx_pg->entries);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
@@ -7385,7 +8163,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
- u8 depth, struct bnxt_mem_init *mem_init)
+ u8 depth, struct bnxt_ctx_mem_type *ctxm)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc;
@@ -7423,7 +8201,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES;
- rmem->mem_init = mem_init;
+ rmem->ctx_mem = ctxm;
if (i == (nr_tbls - 1)) {
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
@@ -7438,7 +8216,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth)
rmem->depth = 1;
- rmem->mem_init = mem_init;
+ rmem->ctx_mem = ctxm;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
}
return rc;
@@ -7473,41 +8251,144 @@ static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
ctx_pg->nr_pages = 0;
}
+static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, u32 entries,
+ u8 pg_lvl)
+{
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, rc = 0, n = 1;
+ u32 mem_size;
+
+ if (!ctxm->entry_size || !ctx_pg)
+ return -EINVAL;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ if (ctxm->entry_multiple)
+ entries = roundup(entries, ctxm->entry_multiple);
+ entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
+ mem_size = entries * ctxm->entry_size;
+ for (i = 0; i < n && !rc; i++) {
+ ctx_pg[i].entries = entries;
+ rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
+ ctxm->init_value ? ctxm : NULL);
+ }
+ return rc;
+}
+
+static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm,
+ bool last)
+{
+ struct hwrm_func_backing_store_cfg_v2_input *req;
+ u32 instance_bmap = ctxm->instance_bmap;
+ int i, j, rc = 0, n = 1;
+ __le32 *p;
+
+ if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
+ return 0;
+
+ if (instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ else
+ instance_bmap = 1;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
+ if (rc)
+ return rc;
+ hwrm_req_hold(bp, req);
+ req->type = cpu_to_le16(ctxm->type);
+ req->entry_size = cpu_to_le16(ctxm->entry_size);
+ req->subtype_valid_cnt = ctxm->split_entry_cnt;
+ for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
+ p[i] = cpu_to_le32(ctxm->split[i]);
+ for (i = 0, j = 0; j < n && !rc; i++) {
+ struct bnxt_ctx_pg_info *ctx_pg;
+
+ if (!(instance_bmap & (1 << i)))
+ continue;
+ req->instance = cpu_to_le16(i);
+ ctx_pg = &ctxm->pg_info[j++];
+ if (!ctx_pg->entries)
+ continue;
+ req->num_entries = cpu_to_le32(ctx_pg->entries);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req->page_size_pbl_level,
+ &req->page_dir);
+ if (last && j == n)
+ req->flags =
+ cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
+ rc = hwrm_req_send(bp, req);
+ }
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ struct bnxt_ctx_mem_type *ctxm;
+ u16 last_type;
+ int rc = 0;
+ u16 type;
+
+ if (!ena)
+ return 0;
+ else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
+ last_type = BNXT_CTX_MAX - 1;
+ else
+ last_type = BNXT_CTX_L2_MAX - 1;
+ ctx->ctx_arr[last_type].last = 1;
+
+ for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
+ ctxm = &ctx->ctx_arr[type];
+
+ rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
void bnxt_free_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
- int i;
+ u16 type;
if (!ctx)
return;
- if (ctx->tqm_mem[0]) {
- for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
- bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
- kfree(ctx->tqm_mem[0]);
- ctx->tqm_mem[0] = NULL;
+ for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, n = 1;
+
+ if (!ctx_pg)
+ continue;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ for (i = 0; i < n; i++)
+ bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
+
+ kfree(ctx_pg);
+ ctxm->pg_info = NULL;
}
- bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
ctx->flags &= ~BNXT_CTX_FLAG_INITED;
+ kfree(ctx);
+ bp->ctx = NULL;
}
static int bnxt_alloc_ctx_mem(struct bnxt *bp)
{
- struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_type *ctxm;
struct bnxt_ctx_mem_info *ctx;
- struct bnxt_mem_init *init;
- u32 mem_size, ena, entries;
- u32 entries_sp, min;
+ u32 l2_qps, qp1_qps, max_qps;
+ u32 ena, entries_sp, entries;
+ u32 srqs, max_srqs, min;
u32 num_mr, num_ah;
u32 extra_srqs = 0;
u32 extra_qps = 0;
+ u32 fast_qpmd_qps;
u8 pg_lvl = 1;
int i, rc;
@@ -7521,120 +8402,98 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ l2_qps = ctxm->qp_l2_entries;
+ qp1_qps = ctxm->qp_qp1_entries;
+ fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
+ max_qps = ctxm->max_entries;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ srqs = ctxm->srq_l2_entries;
+ max_srqs = ctxm->max_entries;
+ ena = 0;
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2;
- extra_qps = 65536;
- extra_srqs = 8192;
+ extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fw supports RoCE fast qp destroy feature */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (fast_qpmd_qps)
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
}
- ctx_pg = &ctx->qp_mem;
- ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
- extra_qps;
- if (ctx->qp_entry_size) {
- mem_size = ctx->qp_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
+ pg_lvl);
+ if (rc)
+ return rc;
- ctx_pg = &ctx->srq_mem;
- ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
- if (ctx->srq_entry_size) {
- mem_size = ctx->srq_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
+ if (rc)
+ return rc;
- ctx_pg = &ctx->cq_mem;
- ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
- if (ctx->cq_entry_size) {
- mem_size = ctx->cq_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
+ extra_qps * 2, pg_lvl);
+ if (rc)
+ return rc;
- ctx_pg = &ctx->vnic_mem;
- ctx_pg->entries = ctx->vnic_max_vnic_entries +
- ctx->vnic_max_ring_table_entries;
- if (ctx->vnic_entry_size) {
- mem_size = ctx->vnic_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
- ctx_pg = &ctx->stat_mem;
- ctx_pg->entries = ctx->stat_max_entries;
- if (ctx->stat_entry_size) {
- mem_size = ctx->stat_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
- ena = 0;
if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
goto skip_rdma;
- ctx_pg = &ctx->mrav_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
/* 128K extra is needed to accommodate static AH context
* allocation by f/w.
*/
- num_mr = 1024 * 256;
- num_ah = 1024 * 128;
- ctx_pg->entries = num_mr + num_ah;
- if (ctx->mrav_entry_size) {
- mem_size = ctx->mrav_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
- if (rc)
- return rc;
- }
- ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
- if (ctx->mrav_num_entries_units)
- ctx_pg->entries =
- ((num_mr / ctx->mrav_num_entries_units) << 16) |
- (num_ah / ctx->mrav_num_entries_units);
-
- ctx_pg = &ctx->tim_mem;
- ctx_pg->entries = ctx->qp_mem.entries;
- if (ctx->tim_entry_size) {
- mem_size = ctx->tim_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
- if (rc)
- return rc;
- }
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
+
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
+ if (rc)
+ return rc;
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
skip_rdma:
- min = ctx->tqm_min_entries_per_ring;
- entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
- 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
- entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
- entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
- entries = roundup(entries, ctx->tqm_entries_multiple);
- entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
- for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
- ctx_pg = ctx->tqm_mem[i];
- ctx_pg->entries = i ? entries : entries_sp;
- if (ctx->tqm_entry_size) {
- mem_size = ctx->tqm_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
- NULL);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
+ min = ctxm->min_entries;
+ entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
+ 2 * (extra_qps + qp1_qps) + min;
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
+ entries = l2_qps + 2 * (extra_qps + qp1_qps);
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
+ if (rc)
+ return rc;
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
- }
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
- rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
+
+ if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
+ rc = bnxt_backing_store_cfg_v2(bp, ena);
+ else
+ rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
if (rc) {
netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
rc);
@@ -7682,7 +8541,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u16 max_msix = le16_to_cpu(resp->max_msix);
hw_resc->max_nqs = max_msix;
@@ -7711,7 +8570,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
u8 flags;
int rc;
- if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_THOR(bp)) {
+ if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) {
rc = -ENODEV;
goto no_ptp;
}
@@ -7743,7 +8602,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
- } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
} else {
@@ -7815,10 +8674,16 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
+ bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
flags_ext2 = le32_to_cpu(resp->flags_ext2);
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
+ if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
+ bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -8033,7 +8898,7 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
BNXT_FW_HEALTH_WIN_BASE +
BNXT_GRC_REG_CHIP_NUM);
}
- if (!BNXT_CHIP_P5(bp))
+ if (!BNXT_CHIP_P5_PLUS(bp))
return;
status_loc = BNXT_GRC_REG_STATUS_P5 |
@@ -8450,7 +9315,7 @@ static void bnxt_accumulate_all_stats(struct bnxt *bp)
int i;
/* Chip bug. Counter intermittently becomes 0. */
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
ignore_zero = true;
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -8644,7 +9509,7 @@ static void bnxt_clear_vnic(struct bnxt *bp)
return;
bnxt_hwrm_clear_vnic_filter(bp);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
/* clear all RSS setting before free vnic ctx */
bnxt_hwrm_clear_vnic_rss(bp);
bnxt_hwrm_vnic_ctx_free(bp);
@@ -8653,7 +9518,7 @@ static void bnxt_clear_vnic(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_TPA)
bnxt_set_tpa(bp, false);
bnxt_hwrm_vnic_free(bp);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
bnxt_hwrm_vnic_ctx_free(bp);
}
@@ -8810,7 +9675,7 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return __bnxt_setup_vnic_p5(bp, vnic_id);
else
return __bnxt_setup_vnic(bp, vnic_id);
@@ -8818,10 +9683,9 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
-#ifdef CONFIG_RFS_ACCEL
int i, rc = 0;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -8834,7 +9698,7 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
vnic = &bp->vnic_info[vnic_id];
vnic->flags |= BNXT_VNIC_RFS_FLAG;
- if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
if (rc) {
@@ -8847,9 +9711,6 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
break;
}
return rc;
-#else
- return 0;
-#endif
}
/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
@@ -8928,7 +9789,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rc = bnxt_setup_vnic(bp, 0);
if (rc)
goto err_out;
- if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
+ if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
bnxt_hwrm_update_rss_hash_cfg(bp);
if (bp->flags & BNXT_FLAG_RFS) {
@@ -9046,8 +9907,8 @@ static int bnxt_set_real_num_queues(struct bnxt *bp)
return rc;
}
-static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
- bool shared)
+static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared)
{
int _rx = *rx, _tx = *tx;
@@ -9070,19 +9931,59 @@ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
return 0;
}
+static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
+{
+ return (tx - tx_xdp) / tx_sets + tx_xdp;
+}
+
+int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
+{
+ int tcs = bp->num_tc;
+
+ if (!tcs)
+ tcs = 1;
+ return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
+}
+
+static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
+{
+ int tcs = bp->num_tc;
+
+ return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
+ bp->tx_nr_rings_xdp;
+}
+
+static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool sh)
+{
+ int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
+
+ if (tx_cp != *tx) {
+ int tx_saved = tx_cp, rc;
+
+ rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
+ if (rc)
+ return rc;
+ if (tx_cp != tx_saved)
+ *tx = bnxt_num_cp_to_tx(bp, tx_cp);
+ return 0;
+ }
+ return __bnxt_trim_rings(bp, rx, tx, max, sh);
+}
+
static void bnxt_setup_msix(struct bnxt *bp)
{
const int len = sizeof(bp->irq_tbl[0].name);
struct net_device *dev = bp->dev;
int tcs, i;
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
if (tcs) {
int i, off, count;
for (i = 0; i < tcs; i++) {
count = bp->tx_nr_rings_per_tc;
- off = i * count;
+ off = BNXT_TC_TO_RING_BASE(bp, i);
netdev_set_tc_queue(dev, i, count, off);
}
}
@@ -9108,8 +10009,10 @@ static void bnxt_setup_inta(struct bnxt *bp)
{
const int len = sizeof(bp->irq_tbl[0].name);
- if (netdev_get_num_tc(bp->dev))
+ if (bp->num_tc) {
netdev_reset_tc(bp->dev);
+ bp->num_tc = 0;
+ }
snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
0);
@@ -9137,7 +10040,6 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
return rc;
}
-#ifdef CONFIG_RFS_ACCEL
static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
{
return bp->hw_resc.max_rsscos_ctxs;
@@ -9147,7 +10049,6 @@ static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
{
return bp->hw_resc.max_vnics;
}
-#endif
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
{
@@ -9163,7 +10064,7 @@ static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
{
unsigned int cp = bp->hw_resc.max_cp_rings;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
cp -= bnxt_get_ulp_msix_num(bp);
return cp;
@@ -9173,7 +10074,7 @@ static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
@@ -9189,7 +10090,7 @@ unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
unsigned int cp;
cp = bnxt_get_max_func_cp_rings_for_en(bp);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return cp - bp->rx_nr_rings - bp->tx_nr_rings;
else
return cp - bp->cp_nr_rings;
@@ -9208,7 +10109,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)
int max_idx, avail_msix;
max_idx = bp->total_irqs;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
max_idx = min_t(int, bp->total_irqs, max_cp);
avail_msix = max_idx - bp->cp_nr_rings;
if (!BNXT_NEW_RM(bp) || avail_msix >= num)
@@ -9232,7 +10133,7 @@ static int bnxt_get_num_msix(struct bnxt *bp)
static int bnxt_init_msix(struct bnxt *bp)
{
- int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
+ int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp;
struct msix_entry *msix_ent;
total_vecs = bnxt_get_num_msix(bp);
@@ -9274,9 +10175,10 @@ static int bnxt_init_msix(struct bnxt *bp)
if (rc)
goto msix_setup_exit;
+ tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
bp->cp_nr_rings = (min == 1) ?
- max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
- bp->tx_nr_rings + bp->rx_nr_rings;
+ max_t(int, tx_cp, bp->rx_nr_rings) :
+ tx_cp + bp->rx_nr_rings;
} else {
rc = -ENOMEM;
@@ -9336,8 +10238,8 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
- int tcs = netdev_get_num_tc(bp->dev);
bool irq_cleared = false;
+ int tcs = bp->num_tc;
int rc;
if (!bnxt_need_reserve_rings(bp))
@@ -9363,6 +10265,7 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
netdev_reset_tc(bp->dev);
+ bp->num_tc = 0;
if (bp->tx_nr_rings_xdp)
bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
else
@@ -9439,6 +10342,7 @@ static int bnxt_request_irq(struct bnxt *bp)
if (rc)
break;
+ netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
irq->requested = 1;
if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
@@ -9466,6 +10370,11 @@ static void bnxt_del_napi(struct bnxt *bp)
if (!bp->bnapi)
return;
+ for (i = 0; i < bp->rx_nr_rings; i++)
+ netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
+ for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
+ netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
+
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -9486,7 +10395,7 @@ static void bnxt_init_napi(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_USING_MSIX) {
int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
poll_fn = bnxt_poll_p5;
else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
cp_nr_rings--;
@@ -9542,8 +10451,6 @@ static void bnxt_enable_napi(struct bnxt *bp)
cpr = &bnapi->cp_ring;
bnapi->in_reset = false;
- bnapi->tx_pkts = 0;
-
if (bnapi->rx_ring) {
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
@@ -9647,7 +10554,10 @@ void bnxt_report_link(struct bnxt *bp)
signal = "(NRZ) ";
break;
case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
- signal = "(PAM4) ";
+ signal = "(PAM4 56Gbps) ";
+ break;
+ case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
+ signal = "(PAM4 112Gbps) ";
break;
default:
break;
@@ -9675,7 +10585,9 @@ static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
if (!resp->supported_speeds_auto_mode &&
!resp->supported_speeds_force_mode &&
!resp->supported_pam4_speeds_auto_mode &&
- !resp->supported_pam4_speeds_force_mode)
+ !resp->supported_pam4_speeds_force_mode &&
+ !resp->supported_speeds2_auto_mode &&
+ !resp->supported_speeds2_force_mode)
return true;
return false;
}
@@ -9721,6 +10633,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
/* Phy re-enabled, reprobe the speeds */
link_info->support_auto_speeds = 0;
link_info->support_pam4_auto_speeds = 0;
+ link_info->support_auto_speeds2 = 0;
}
}
if (resp->supported_speeds_auto_mode)
@@ -9729,6 +10642,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (resp->supported_pam4_speeds_auto_mode)
link_info->support_pam4_auto_speeds =
le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
+ if (resp->supported_speeds2_auto_mode)
+ link_info->support_auto_speeds2 =
+ le16_to_cpu(resp->supported_speeds2_auto_mode);
bp->port_count = resp->port_cnt;
@@ -9746,9 +10662,19 @@ static bool bnxt_support_dropped(u16 advertising, u16 supported)
static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
/* Check if any advertised speeds are no longer supported. The caller
* holds the link_lock mutex, so we can modify link_info settings.
*/
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ if (bnxt_support_dropped(link_info->advertising,
+ link_info->support_auto_speeds2)) {
+ link_info->advertising = link_info->support_auto_speeds2;
+ return true;
+ }
+ return false;
+ }
if (bnxt_support_dropped(link_info->advertising,
link_info->support_auto_speeds)) {
link_info->advertising = link_info->support_auto_speeds;
@@ -9797,18 +10723,25 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->lp_pause = resp->link_partner_adv_pause;
link_info->force_pause_setting = resp->force_pause;
link_info->duplex_setting = resp->duplex_cfg;
- if (link_info->phy_link_status == BNXT_LINK_LINK)
+ if (link_info->phy_link_status == BNXT_LINK_LINK) {
link_info->link_speed = le16_to_cpu(resp->link_speed);
- else
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
+ link_info->active_lanes = resp->active_lanes;
+ } else {
link_info->link_speed = 0;
+ link_info->active_lanes = 0;
+ }
link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
link_info->force_pam4_link_speed =
le16_to_cpu(resp->force_pam4_link_speed);
+ link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
link_info->support_speeds = le16_to_cpu(resp->support_speeds);
link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
+ link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
link_info->auto_pam4_link_speeds =
le16_to_cpu(resp->auto_pam4_link_speed_mask);
+ link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
link_info->lp_auto_link_speeds =
le16_to_cpu(resp->link_partner_adv_speeds);
link_info->lp_auto_pam4_link_speeds =
@@ -9947,7 +10880,11 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_
{
if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
- if (bp->link_info.advertising) {
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ req->enables |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
+ req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
+ } else if (bp->link_info.advertising) {
req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
}
@@ -9961,7 +10898,12 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
} else {
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
- if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
+ netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
+ (u32)bp->link_info.req_link_speed);
+ } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
} else {
@@ -10226,8 +11168,6 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_ulp_stop(bp);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
@@ -10578,6 +11518,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
+ if (bp->ptp_cfg)
+ atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
return 0;
@@ -11115,7 +12057,6 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
- struct hwrm_cfa_l2_filter_free_input *req;
struct netdev_hw_addr *ha;
int i, off = 0, rc;
bool uc_update;
@@ -11127,16 +12068,12 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
if (!uc_update)
goto skip_uc;
- rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
- if (rc)
- return rc;
- hwrm_req_hold(bp, req);
for (i = 1; i < vnic->uc_filter_count; i++) {
- req->l2_filter_id = vnic->fw_l2_filter_id[i];
+ struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
- rc = hwrm_req_send(bp, req);
+ bnxt_hwrm_l2_filter_free(bp, fltr);
+ bnxt_del_l2_filter(bp, fltr);
}
- hwrm_req_drop(bp, req);
vnic->uc_filter_count = 1;
@@ -11213,7 +12150,7 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp)
/* If the chip and firmware supports RFS */
static bool bnxt_rfs_supported(struct bnxt *bp)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
return true;
return false;
@@ -11223,7 +12160,7 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
return false;
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true;
- if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
return true;
return false;
}
@@ -11231,10 +12168,9 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
/* If runtime conditions support RFS */
static bool bnxt_rfs_capable(struct bnxt *bp)
{
-#ifdef CONFIG_RFS_ACCEL
int vnics, max_vnics, max_rss_ctxs;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return bnxt_rfs_supported(bp);
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
@@ -11244,7 +12180,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
/* RSS contexts not a limiting factor */
- if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
max_rss_ctxs = max_vnics;
if (vnics > max_vnics || vnics > max_rss_ctxs) {
if (bp->rx_nr_rings > 1)
@@ -11267,9 +12203,6 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
return false;
-#else
- return false;
-#endif
}
static netdev_features_t bnxt_fix_features(struct net_device *dev,
@@ -11336,7 +12269,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
update_tpa = true;
if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
(flags & BNXT_FLAG_TPA) == 0 ||
- (bp->flags & BNXT_FLAG_CHIP_P5))
+ (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
re_init = true;
}
@@ -11445,9 +12378,10 @@ static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
struct udphdr *uh = udp_hdr(skb);
__be16 udp_port = uh->dest;
- if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
+ if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
+ udp_port != bp->vxlan_gpe_port)
return false;
- if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
+ if (skb->inner_protocol == htons(ETH_P_TEB)) {
struct ethhdr *eh = inner_eth_hdr(skb);
switch (eh->h_proto) {
@@ -11458,6 +12392,11 @@ static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
skb_inner_network_offset(skb),
NULL);
}
+ } else if (skb->inner_protocol == htons(ETH_P_IP)) {
+ return true;
+ } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
+ return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
+ NULL);
}
return false;
}
@@ -11578,15 +12517,13 @@ static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
{
- struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
- int i = bnapi->index;
-
- if (!txr)
- return;
+ struct bnxt_tx_ring_info *txr;
+ int i = bnapi->index, j;
- netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
- i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
- txr->tx_cons);
+ bnxt_for_each_napi_tx(j, bnapi, txr)
+ netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
+ i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
+ txr->tx_cons);
}
static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
@@ -11749,8 +12686,7 @@ static void bnxt_timer(struct timer_list *t)
if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
- netif_carrier_ok(dev))
+ if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
bnxt_restart_timer:
@@ -11832,6 +12768,16 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
bnxt_rtnl_unlock_sp(bp);
}
+static void bnxt_fw_fatal_close(struct bnxt *bp)
+{
+ bnxt_tx_disable(bp);
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ bnxt_free_irq(bp);
+ bnxt_clear_int_mode(bp);
+ pci_disable_device(bp->pdev);
+}
+
static void bnxt_fw_reset_close(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
@@ -11845,12 +12791,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
if (val == 0xffff)
bp->fw_reset_min_dsecs = 0;
- bnxt_tx_disable(bp);
- bnxt_disable_napi(bp);
- bnxt_disable_int_sync(bp);
- bnxt_free_irq(bp);
- bnxt_clear_int_mode(bp);
- pci_disable_device(bp->pdev);
+ bnxt_fw_fatal_close(bp);
}
__bnxt_close_nic(bp, true, false);
bnxt_vf_reps_free(bp);
@@ -11859,8 +12800,6 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
if (pci_is_enabled(bp->pdev))
pci_disable_device(bp->pdev);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
}
static bool is_bnxt_fw_ok(struct bnxt *bp)
@@ -12003,7 +12942,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
{
int i;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -12016,12 +12955,11 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
continue;
cpr = &bnapi->cp_ring;
- for (j = 0; j < 2; j++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
u32 val[2];
- if (!cpr2 || cpr2->has_more_work ||
- !bnxt_has_work(bp, cpr2))
+ if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
continue;
if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
@@ -12182,36 +13120,42 @@ static void bnxt_sp_task(struct work_struct *work)
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
+static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
+ int *max_cp);
+
/* Under rtnl_lock */
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
- int max_rx, max_tx, tx_sets = 1;
+ int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
int tx_rings_needed, stats;
int rx_rings = rx;
- int cp, vnics, rc;
+ int cp, vnics;
if (tcs)
tx_sets = tcs;
- rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
- if (rc)
- return rc;
+ _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
- if (max_rx < rx)
+ if (max_rx < rx_rings)
return -ENOMEM;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx_rings <<= 1;
+
tx_rings_needed = tx * tx_sets + tx_xdp;
if (max_tx < tx_rings_needed)
return -ENOMEM;
vnics = 1;
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
- vnics += rx_rings;
+ if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) ==
+ BNXT_FLAG_RFS)
+ vnics += rx;
- if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx_rings <<= 1;
- cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
+ tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp);
+ cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
+ if (max_cp < cp)
+ return -ENOMEM;
stats = cp;
if (BNXT_NEW_RM(bp)) {
cp += bnxt_get_ulp_msix_num(bp);
@@ -12286,10 +13230,10 @@ static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
{
u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
return true;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
return true;
return false;
@@ -12372,15 +13316,15 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
{
- bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
+ bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
- if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
+ if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
bp->rss_hash_delta = bp->rss_hash_cfg;
if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
- bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
+ bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
}
@@ -12850,7 +13794,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
{
struct bnxt *bp = netdev_priv(dev);
bool sh = false;
- int rc;
+ int rc, tx_cp;
if (tc > bp->max_tc) {
netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
@@ -12858,7 +13802,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
return -EINVAL;
}
- if (netdev_get_num_tc(dev) == tc)
+ if (bp->num_tc == tc)
return 0;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@@ -12876,13 +13820,16 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
if (tc) {
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
netdev_set_num_tc(dev, tc);
+ bp->num_tc = tc;
} else {
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
netdev_reset_tc(dev);
+ bp->num_tc = 0;
}
bp->tx_nr_rings += bp->tx_nr_rings_xdp;
- bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
- bp->tx_nr_rings + bp->rx_nr_rings;
+ tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
+ bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
+ tx_cp + bp->rx_nr_rings;
if (netif_running(bp->dev))
return bnxt_open_nic(bp, true, false);
@@ -12932,38 +13879,102 @@ static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
-#ifdef CONFIG_RFS_ACCEL
+u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
+ const struct sk_buff *skb)
+{
+ struct bnxt_vnic_info *vnic;
+
+ if (skb)
+ return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
+
+ vnic = &bp->vnic_info[0];
+ return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
+}
+
+int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
+ u32 idx)
+{
+ struct hlist_head *head;
+ int bit_id;
+
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, BNXT_MAX_FLTR, 0);
+ if (bit_id < 0) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return -ENOMEM;
+ }
+
+ fltr->base.sw_id = (u16)bit_id;
+ fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
+ fltr->base.flags |= BNXT_ACT_RING_DST;
+ head = &bp->ntp_fltr_hash_tbl[idx];
+ hlist_add_head_rcu(&fltr->base.hash, head);
+ set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
+ bp->ntp_fltr_count++;
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return 0;
+}
+
static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct bnxt_ntuple_filter *f2)
{
struct flow_keys *keys1 = &f1->fkeys;
struct flow_keys *keys2 = &f2->fkeys;
+ if (f1->ntuple_flags != f2->ntuple_flags)
+ return false;
+
if (keys1->basic.n_proto != keys2->basic.n_proto ||
keys1->basic.ip_proto != keys2->basic.ip_proto)
return false;
if (keys1->basic.n_proto == htons(ETH_P_IP)) {
- if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
- keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
+ if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
+ keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src) ||
+ ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
+ keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst))
return false;
} else {
- if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
- sizeof(keys1->addrs.v6addrs.src)) ||
- memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
- sizeof(keys1->addrs.v6addrs.dst)))
+ if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
+ memcmp(&keys1->addrs.v6addrs.src,
+ &keys2->addrs.v6addrs.src,
+ sizeof(keys1->addrs.v6addrs.src))) ||
+ ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
+ memcmp(&keys1->addrs.v6addrs.dst,
+ &keys2->addrs.v6addrs.dst,
+ sizeof(keys1->addrs.v6addrs.dst))))
return false;
}
- if (keys1->ports.ports == keys2->ports.ports &&
- keys1->control.flags == keys2->control.flags &&
- ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
- ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
+ if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) &&
+ keys1->ports.src != keys2->ports.src) ||
+ ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) &&
+ keys1->ports.dst != keys2->ports.dst))
+ return false;
+
+ if (keys1->control.flags == keys2->control.flags &&
+ f1->l2_fltr == f2->l2_fltr)
return true;
return false;
}
+struct bnxt_ntuple_filter *
+bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr, u32 idx)
+{
+ struct bnxt_ntuple_filter *f;
+ struct hlist_head *head;
+
+ head = &bp->ntp_fltr_hash_tbl[idx];
+ hlist_for_each_entry_rcu(f, head, base.hash) {
+ if (bnxt_fltr_match(f, fltr))
+ return f;
+ }
+ return NULL;
+}
+
+#ifdef CONFIG_RFS_ACCEL
static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
@@ -12971,29 +13982,31 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
struct bnxt_ntuple_filter *fltr, *new_fltr;
struct flow_keys *fkeys;
struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
- int rc = 0, idx, bit_id, l2_idx = 0;
- struct hlist_head *head;
+ struct bnxt_l2_filter *l2_fltr;
+ int rc = 0, idx;
u32 flags;
- if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
- int off = 0, j;
+ if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
+ l2_fltr = bp->vnic_info[0].l2_filters[0];
+ atomic_inc(&l2_fltr->refcnt);
+ } else {
+ struct bnxt_l2_key key;
- netif_addr_lock_bh(dev);
- for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
- if (ether_addr_equal(eth->h_dest,
- vnic->uc_list + off)) {
- l2_idx = j + 1;
- break;
- }
- }
- netif_addr_unlock_bh(dev);
- if (!l2_idx)
+ ether_addr_copy(key.dst_mac_addr, eth->h_dest);
+ key.vlan = 0;
+ l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
+ if (!l2_fltr)
return -EINVAL;
+ if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
+ bnxt_del_l2_filter(bp, l2_fltr);
+ return -EINVAL;
+ }
}
new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
- if (!new_fltr)
+ if (!new_fltr) {
+ bnxt_del_l2_filter(bp, l2_fltr);
return -ENOMEM;
+ }
fkeys = &new_fltr->fkeys;
if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
@@ -13020,49 +14033,52 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
goto err_free;
}
- memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
- memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
+ new_fltr->l2_fltr = l2_fltr;
+ new_fltr->ntuple_flags = BNXT_NTUPLE_MATCH_ALL;
- idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
- head = &bp->ntp_fltr_hash_tbl[idx];
+ idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
rcu_read_lock();
- hlist_for_each_entry_rcu(fltr, head, hash) {
- if (bnxt_fltr_match(fltr, new_fltr)) {
- rc = fltr->sw_id;
- rcu_read_unlock();
- goto err_free;
- }
- }
- rcu_read_unlock();
-
- spin_lock_bh(&bp->ntp_fltr_lock);
- bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
- BNXT_NTP_FLTR_MAX_FLTR, 0);
- if (bit_id < 0) {
- spin_unlock_bh(&bp->ntp_fltr_lock);
- rc = -ENOMEM;
+ fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
+ if (fltr) {
+ rc = fltr->base.sw_id;
+ rcu_read_unlock();
goto err_free;
}
+ rcu_read_unlock();
- new_fltr->sw_id = (u16)bit_id;
new_fltr->flow_id = flow_id;
- new_fltr->l2_fltr_idx = l2_idx;
- new_fltr->rxq = rxq_index;
- hlist_add_head_rcu(&new_fltr->hash, head);
- bp->ntp_fltr_count++;
- spin_unlock_bh(&bp->ntp_fltr_lock);
-
- bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
-
- return new_fltr->sw_id;
+ new_fltr->base.rxq = rxq_index;
+ rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
+ if (!rc) {
+ bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
+ return new_fltr->base.sw_id;
+ }
err_free:
+ bnxt_del_l2_filter(bp, l2_fltr);
kfree(new_fltr);
return rc;
}
+#endif
+
+void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
+{
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return;
+ }
+ hlist_del_rcu(&fltr->base.hash);
+ bp->ntp_fltr_count--;
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ bnxt_del_l2_filter(bp, fltr->l2_fltr);
+ clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
+ kfree_rcu(fltr, base.rcu);
+}
static void bnxt_cfg_ntp_filters(struct bnxt *bp)
{
+#ifdef CONFIG_RFS_ACCEL
int i;
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
@@ -13072,13 +14088,15 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
int rc;
head = &bp->ntp_fltr_hash_tbl[i];
- hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+ hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
bool del = false;
- if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
- if (rps_may_expire_flow(bp->dev, fltr->rxq,
+ if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
+ if (fltr->base.flags & BNXT_ACT_NO_AGING)
+ continue;
+ if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
fltr->flow_id,
- fltr->sw_id)) {
+ fltr->base.sw_id)) {
bnxt_hwrm_cfa_ntuple_filter_free(bp,
fltr);
del = true;
@@ -13089,30 +14107,16 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
if (rc)
del = true;
else
- set_bit(BNXT_FLTR_VALID, &fltr->state);
+ set_bit(BNXT_FLTR_VALID, &fltr->base.state);
}
- if (del) {
- spin_lock_bh(&bp->ntp_fltr_lock);
- hlist_del_rcu(&fltr->hash);
- bp->ntp_fltr_count--;
- spin_unlock_bh(&bp->ntp_fltr_lock);
- synchronize_rcu();
- clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
- kfree(fltr);
- }
+ if (del)
+ bnxt_del_ntp_filter(bp, fltr);
}
}
+#endif
}
-#else
-
-static void bnxt_cfg_ntp_filters(struct bnxt *bp)
-{
-}
-
-#endif /* CONFIG_RFS_ACCEL */
-
static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int entry, struct udp_tunnel_info *ti)
{
@@ -13120,9 +14124,11 @@ static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int tabl
unsigned int cmd;
if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
- cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
+ cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
+ else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
+ cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
else
- cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
+ cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
}
@@ -13135,8 +14141,10 @@ static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int ta
if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
- else
+ else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
+ else
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
}
@@ -13150,6 +14158,16 @@ static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
},
+}, bnxt_udp_tunnels_p7 = {
+ .set_port = bnxt_udp_tunnel_set_port,
+ .unset_port = bnxt_udp_tunnel_unset_port,
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
+ },
};
static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
@@ -13257,6 +14275,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_ptp_clear(bp);
unregister_netdev(dev);
+ bnxt_free_l2_filters(bp, true);
+ bnxt_free_ntp_fltrs(bp, true);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
/* Flush any pending tasks */
cancel_work_sync(&bp->sp_task);
@@ -13279,8 +14299,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
bnxt_free_port_stats(bp);
@@ -13349,7 +14367,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
bnxt_get_ulp_msix_num(bp),
hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
*max_cp = min_t(int, *max_cp, max_irq);
max_ring_grps = hw_resc->max_hw_ring_grps;
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -13358,8 +14376,14 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
}
if (bp->flags & BNXT_FLAG_AGG_RINGS)
*max_rx >>= 1;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ int rc;
+
+ rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
+ if (rc) {
+ *max_rx = 0;
+ *max_tx = 0;
+ }
/* On P5 chips, max_cp output param should be available NQs */
*max_cp = max_irq;
}
@@ -13660,7 +14684,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
max_irqs = bnxt_get_max_irq(pdev);
- dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
+ dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
+ max_irqs);
if (!dev)
return -ENOMEM;
@@ -13702,10 +14727,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_PF(bp))
bnxt_vpd_read_info(bp);
- if (BNXT_CHIP_P5(bp)) {
- bp->flags |= BNXT_FLAG_CHIP_P5;
- if (BNXT_CHIP_SR2(bp))
- bp->flags |= BNXT_FLAG_CHIP_SR2;
+ if (BNXT_CHIP_P5_PLUS(bp)) {
+ bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
+ if (BNXT_CHIP_P7(bp))
+ bp->flags |= BNXT_FLAG_CHIP_P7;
}
rc = bnxt_alloc_rss_indir_tbl(bp);
@@ -13730,6 +14755,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
NETIF_F_RXCSUM | NETIF_F_GRO;
+ if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
+ dev->hw_features |= NETIF_F_GSO_UDP_L4;
if (BNXT_SUPPORTS_TPA(bp))
dev->hw_features |= NETIF_F_LRO;
@@ -13740,7 +14767,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
- dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
+ if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
+ dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
+ else
+ dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_GRE_CSUM;
@@ -13768,7 +14800,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->gro_func = bnxt_gro_func_5730x;
if (BNXT_CHIP_P4(bp))
bp->gro_func = bnxt_gro_func_5731x;
- else if (BNXT_CHIP_P5(bp))
+ else if (BNXT_CHIP_P5_PLUS(bp))
bp->gro_func = bnxt_gro_func_5750x;
}
if (!BNXT_CHIP_P4_PLUS(bp))
@@ -13794,6 +14826,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ bnxt_init_l2_fltr_tbl(bp);
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
@@ -13876,8 +14909,6 @@ init_err_pci_clean:
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
@@ -13930,8 +14961,6 @@ static int bnxt_suspend(struct device *device)
bnxt_hwrm_func_drv_unrgtr(bp);
pci_disable_device(bp->pdev);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
rtnl_unlock();
return rc;
}
@@ -14008,6 +15037,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
+ bool abort = false;
netdev_info(netdev, "PCI I/O error detected\n");
@@ -14016,22 +15046,31 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
bnxt_ulp_stop(bp);
- if (state == pci_channel_io_perm_failure) {
+ if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ netdev_err(bp->dev, "Firmware reset already in progress\n");
+ abort = true;
+ }
+
+ if (abort || state == pci_channel_io_perm_failure) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
}
- if (state == pci_channel_io_frozen)
+ /* Link is not reliable anymore if state is pci_channel_io_frozen
+ * so we disable bus master to prevent any potential bad DMAs before
+ * freeing kernel memory.
+ */
+ if (state == pci_channel_io_frozen) {
set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+ bnxt_fw_fatal_close(bp);
+ }
if (netif_running(netdev))
- bnxt_close(netdev);
+ __bnxt_close_nic(bp, true, true);
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
rtnl_unlock();
/* Request a slot slot reset. */
@@ -14111,6 +15150,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
}
reset_exit:
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_clear_reservations(bp, true);
rtnl_unlock();
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index a7d7b09ea1..47338b48ca 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -18,7 +18,7 @@
*/
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 10
-#define DRV_VER_UPD 2
+#define DRV_VER_UPD 3
#include <linux/ethtool.h>
#include <linux/interrupt.h>
@@ -61,6 +61,24 @@ struct tx_bd {
__le64 tx_bd_haddr;
} __packed;
+#define TX_OPAQUE_IDX_MASK 0x0000ffff
+#define TX_OPAQUE_BDS_MASK 0x00ff0000
+#define TX_OPAQUE_BDS_SHIFT 16
+#define TX_OPAQUE_RING_MASK 0xff000000
+#define TX_OPAQUE_RING_SHIFT 24
+
+#define SET_TX_OPAQUE(bp, txr, idx, bds) \
+ (((txr)->tx_napi_idx << TX_OPAQUE_RING_SHIFT) | \
+ ((bds) << TX_OPAQUE_BDS_SHIFT) | ((idx) & (bp)->tx_ring_mask))
+
+#define TX_OPAQUE_IDX(opq) ((opq) & TX_OPAQUE_IDX_MASK)
+#define TX_OPAQUE_RING(opq) (((opq) & TX_OPAQUE_RING_MASK) >> \
+ TX_OPAQUE_RING_SHIFT)
+#define TX_OPAQUE_BDS(opq) (((opq) & TX_OPAQUE_BDS_MASK) >> \
+ TX_OPAQUE_BDS_SHIFT)
+#define TX_OPAQUE_PROD(bp, opq) ((TX_OPAQUE_IDX(opq) + TX_OPAQUE_BDS(opq)) &\
+ (bp)->tx_ring_mask)
+
struct tx_bd_ext {
__le32 tx_bd_hsize_lflags;
#define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0)
@@ -121,11 +139,15 @@ struct tx_cmp {
__le32 tx_cmp_flags_type;
#define CMP_TYPE (0x3f << 0)
#define CMP_TYPE_TX_L2_CMP 0
+ #define CMP_TYPE_TX_L2_COAL_CMP 2
+ #define CMP_TYPE_TX_L2_PKT_TS_CMP 4
#define CMP_TYPE_RX_L2_CMP 17
#define CMP_TYPE_RX_AGG_CMP 18
#define CMP_TYPE_RX_L2_TPA_START_CMP 19
#define CMP_TYPE_RX_L2_TPA_END_CMP 21
#define CMP_TYPE_RX_TPA_AGG_CMP 22
+ #define CMP_TYPE_RX_L2_V3_CMP 23
+ #define CMP_TYPE_RX_L2_TPA_START_V3_CMP 25
#define CMP_TYPE_STATUS_CMP 32
#define CMP_TYPE_REMOTE_DRIVER_REQ 34
#define CMP_TYPE_REMOTE_DRIVER_RESP 36
@@ -152,9 +174,13 @@ struct tx_cmp {
#define TX_CMP_ERRORS_DMA_ERROR (1 << 6)
#define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7)
- __le32 tx_cmp_unsed_3;
+ __le32 sq_cons_idx;
+ #define TX_CMP_SQ_CONS_IDX_MASK 0x00ffffff
};
+#define TX_CMP_SQ_CONS_IDX(txcmp) \
+ (le32_to_cpu((txcmp)->sq_cons_idx) & TX_CMP_SQ_CONS_IDX_MASK)
+
struct rx_cmp {
__le32 rx_cmp_len_flags_type;
#define RX_CMP_CMP_TYPE (0x3f << 0)
@@ -182,8 +208,20 @@ struct rx_cmp {
#define RX_CMP_AGG_BUFS_SHIFT 1
#define RX_CMP_RSS_HASH_TYPE (0x7f << 9)
#define RX_CMP_RSS_HASH_TYPE_SHIFT 9
+ #define RX_CMP_V3_RSS_EXT_OP_LEGACY (0xf << 12)
+ #define RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT 12
+ #define RX_CMP_V3_RSS_EXT_OP_NEW (0xf << 8)
+ #define RX_CMP_V3_RSS_EXT_OP_NEW_SHIFT 8
#define RX_CMP_PAYLOAD_OFFSET (0xff << 16)
#define RX_CMP_PAYLOAD_OFFSET_SHIFT 16
+ #define RX_CMP_SUB_NS_TS (0xf << 16)
+ #define RX_CMP_SUB_NS_TS_SHIFT 16
+ #define RX_CMP_METADATA1 (0xf << 28)
+ #define RX_CMP_METADATA1_SHIFT 28
+ #define RX_CMP_METADATA1_TPID_SEL (0x7 << 28)
+ #define RX_CMP_METADATA1_TPID_8021Q (0x1 << 28)
+ #define RX_CMP_METADATA1_TPID_8021AD (0x0 << 28)
+ #define RX_CMP_METADATA1_VALID (0x8 << 28)
__le32 rx_cmp_rss_hash;
};
@@ -203,6 +241,30 @@ struct rx_cmp {
(((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+#define RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp) \
+ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_LEGACY) >>\
+ RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT)
+
+#define RX_CMP_V3_HASH_TYPE_NEW(rxcmp) \
+ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_NEW) >>\
+ RX_CMP_V3_RSS_EXT_OP_NEW_SHIFT)
+
+#define RX_CMP_V3_HASH_TYPE(bp, rxcmp) \
+ (((bp)->rss_cap & BNXT_RSS_CAP_RSS_TCAM) ? \
+ RX_CMP_V3_HASH_TYPE_NEW(rxcmp) : \
+ RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp))
+
+#define EXT_OP_INNER_4 0x0
+#define EXT_OP_OUTER_4 0x2
+#define EXT_OP_INNFL_3 0x8
+#define EXT_OP_OUTFL_3 0xa
+
+#define RX_CMP_VLAN_VALID(rxcmp) \
+ ((rxcmp)->rx_cmp_misc_v1 & cpu_to_le32(RX_CMP_METADATA1_VALID))
+
+#define RX_CMP_VLAN_TPID_SEL(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_METADATA1_TPID_SEL)
+
struct rx_cmp_ext {
__le32 rx_cmp_flags2;
#define RX_CMP_FLAGS2_IP_CS_CALC 0x1
@@ -250,6 +312,9 @@ struct rx_cmp_ext {
#define RX_CMPL_CFA_CODE_MASK (0xffff << 16)
#define RX_CMPL_CFA_CODE_SFT 16
+ #define RX_CMPL_METADATA0_TCI_MASK (0xffff << 16)
+ #define RX_CMPL_METADATA0_VID_MASK (0x0fff << 16)
+ #define RX_CMPL_METADATA0_SFT 16
__le32 rx_cmp_timestamp;
};
@@ -275,6 +340,10 @@ struct rx_cmp_ext {
((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) & \
RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT)
+#define RX_CMP_METADATA0_TCI(rxcmp1) \
+ ((le32_to_cpu((rxcmp1)->rx_cmp_cfa_code_errors_v2) & \
+ RX_CMPL_METADATA0_TCI_MASK) >> RX_CMPL_METADATA0_SFT)
+
struct rx_agg_cmp {
__le32 rx_agg_cmp_len_flags_type;
#define RX_AGG_CMP_TYPE (0x3f << 0)
@@ -317,10 +386,18 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_V1 (0x1 << 0)
#define RX_TPA_START_CMP_RSS_HASH_TYPE (0x7f << 9)
#define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9
+ #define RX_TPA_START_CMP_V3_RSS_HASH_TYPE (0x1ff << 7)
+ #define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT 7
#define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_START_CMP_AGG_ID_SHIFT 25
#define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16)
#define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16
+ #define RX_TPA_START_CMP_METADATA1 (0xf << 28)
+ #define RX_TPA_START_CMP_METADATA1_SHIFT 28
+ #define RX_TPA_START_METADATA1_TPID_SEL (0x7 << 28)
+ #define RX_TPA_START_METADATA1_TPID_8021Q (0x1 << 28)
+ #define RX_TPA_START_METADATA1_TPID_8021AD (0x0 << 28)
+ #define RX_TPA_START_METADATA1_VALID (0x8 << 28)
__le32 rx_tpa_start_cmp_rss_hash;
};
@@ -334,6 +411,11 @@ struct rx_tpa_start_cmp {
RX_TPA_START_CMP_RSS_HASH_TYPE) >> \
RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+#define TPA_START_V3_HASH_TYPE(rx_tpa_start) \
+ (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_V3_RSS_HASH_TYPE) >> \
+ RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+
#define TPA_START_AGG_ID(rx_tpa_start) \
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
@@ -346,6 +428,14 @@ struct rx_tpa_start_cmp {
((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR))
+#define TPA_START_VLAN_VALID(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_misc_v1 & \
+ cpu_to_le32(RX_TPA_START_METADATA1_VALID))
+
+#define TPA_START_VLAN_TPID_SEL(rx_tpa_start) \
+ (le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_METADATA1_TPID_SEL)
+
struct rx_tpa_start_cmp_ext {
__le32 rx_tpa_start_cmp_flags2;
#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0)
@@ -356,6 +446,8 @@ struct rx_tpa_start_cmp_ext {
#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID (0x1 << 9)
#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT (0x3 << 10)
#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT 10
+ #define RX_TPA_START_CMP_V3_FLAGS2_T_IP_TYPE (0x1 << 10)
+ #define RX_TPA_START_CMP_V3_FLAGS2_AGG_GRO (0x1 << 11)
#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL (0xffff << 16)
#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT 16
@@ -369,6 +461,9 @@ struct rx_tpa_start_cmp_ext {
#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
#define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
#define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
+ #define RX_TPA_START_CMP_METADATA0_TCI_MASK (0xffff << 16)
+ #define RX_TPA_START_CMP_METADATA0_VID_MASK (0x0fff << 16)
+ #define RX_TPA_START_CMP_METADATA0_SFT 16
__le32 rx_tpa_start_cmp_hdr_info;
};
@@ -385,6 +480,11 @@ struct rx_tpa_start_cmp_ext {
RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >> \
RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT)
+#define TPA_START_METADATA0_TCI(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
+ RX_TPA_START_CMP_METADATA0_TCI_MASK) >> \
+ RX_TPA_START_CMP_METADATA0_SFT)
+
struct rx_tpa_end_cmp {
__le32 rx_tpa_end_cmp_len_flags_type;
#define RX_TPA_END_CMP_TYPE (0x3f << 0)
@@ -529,6 +629,8 @@ struct nqe_cn {
#define NQ_CN_TYPE_SFT 0
#define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL
#define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION
+ #define NQ_CN_TOGGLE_MASK 0xc0UL
+ #define NQ_CN_TOGGLE_SFT 6
__le16 reserved16;
__le32 cq_handle_low;
__le32 v;
@@ -536,6 +638,23 @@ struct nqe_cn {
__le32 cq_handle_high;
};
+#define BNXT_NQ_HDL_IDX_MASK 0x00ffffff
+#define BNXT_NQ_HDL_TYPE_MASK 0xff000000
+#define BNXT_NQ_HDL_TYPE_SHIFT 24
+#define BNXT_NQ_HDL_TYPE_RX 0x00
+#define BNXT_NQ_HDL_TYPE_TX 0x01
+
+#define BNXT_NQ_HDL_IDX(hdl) ((hdl) & BNXT_NQ_HDL_IDX_MASK)
+#define BNXT_NQ_HDL_TYPE(hdl) (((hdl) & BNXT_NQ_HDL_TYPE_MASK) >> \
+ BNXT_NQ_HDL_TYPE_SHIFT)
+
+#define BNXT_SET_NQ_HDL(cpr) \
+ (((cpr)->cp_ring_type << BNXT_NQ_HDL_TYPE_SHIFT) | (cpr)->cp_idx)
+
+#define NQE_CN_TYPE(type) ((type) & NQ_CN_TYPE_MASK)
+#define NQE_CN_TOGGLE(type) (((type) & NQ_CN_TOGGLE_MASK) >> \
+ NQ_CN_TOGGLE_SFT)
+
#define DB_IDX_MASK 0xffffff
#define DB_IDX_VALID (0x1 << 26)
#define DB_IRQ_DIS (0x1 << 27)
@@ -551,9 +670,14 @@ struct nqe_cn {
/* 64-bit doorbell */
#define DBR_INDEX_MASK 0x0000000000ffffffULL
+#define DBR_EPOCH_MASK 0x01000000UL
+#define DBR_EPOCH_SFT 24
+#define DBR_TOGGLE_MASK 0x06000000UL
+#define DBR_TOGGLE_SFT 25
#define DBR_XID_MASK 0x000fffff00000000ULL
#define DBR_XID_SFT 32
#define DBR_PATH_L2 (0x1ULL << 56)
+#define DBR_VALID (0x1ULL << 58)
#define DBR_TYPE_SQ (0x0ULL << 60)
#define DBR_TYPE_RQ (0x1ULL << 60)
#define DBR_TYPE_SRQ (0x2ULL << 60)
@@ -566,6 +690,7 @@ struct nqe_cn {
#define DBR_TYPE_CQ_CUTOFF_ACK (0x9ULL << 60)
#define DBR_TYPE_NQ (0xaULL << 60)
#define DBR_TYPE_NQ_ARM (0xbULL << 60)
+#define DBR_TYPE_NQ_MASK (0xeULL << 60)
#define DBR_TYPE_NULL (0xfULL << 60)
#define DB_PF_OFFSET_P5 0x10000
@@ -661,10 +786,12 @@ struct nqe_cn {
*/
#define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
-#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define RX_RING(bp, x) (((x) & (bp)->rx_ring_mask) >> (BNXT_PAGE_SHIFT - 4))
+#define RX_AGG_RING(bp, x) (((x) & (bp)->rx_agg_ring_mask) >> \
+ (BNXT_PAGE_SHIFT - 4))
#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
-#define TX_RING(x) (((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define TX_RING(bp, x) (((x) & (bp)->tx_ring_mask) >> (BNXT_PAGE_SHIFT - 4))
#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
@@ -691,11 +818,14 @@ struct nqe_cn {
#define RX_CMP_TYPE(rxcmp) \
(le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
-#define NEXT_RX(idx) (((idx) + 1) & bp->rx_ring_mask)
+#define RING_RX(bp, idx) ((idx) & (bp)->rx_ring_mask)
+#define NEXT_RX(idx) ((idx) + 1)
-#define NEXT_RX_AGG(idx) (((idx) + 1) & bp->rx_agg_ring_mask)
+#define RING_RX_AGG(bp, idx) ((idx) & (bp)->rx_agg_ring_mask)
+#define NEXT_RX_AGG(idx) ((idx) + 1)
-#define NEXT_TX(idx) (((idx) + 1) & bp->tx_ring_mask)
+#define RING_TX(bp, idx) ((idx) & (bp)->tx_ring_mask)
+#define NEXT_TX(idx) ((idx) + 1)
#define ADV_RAW_CMP(idx, n) ((idx) + (n))
#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
@@ -708,6 +838,7 @@ struct nqe_cn {
#define BNXT_AGG_EVENT 2
#define BNXT_TX_EVENT 4
#define BNXT_REDIRECT_EVENT 8
+#define BNXT_TX_CMP_EVENT 0x10
struct bnxt_sw_tx_bd {
union {
@@ -736,13 +867,6 @@ struct bnxt_sw_rx_agg_bd {
dma_addr_t mapping;
};
-struct bnxt_mem_init {
- u8 init_val;
- u16 offset;
-#define BNXT_MEM_INVALID_OFFSET 0xffff
- u16 size;
-};
-
struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
@@ -752,7 +876,7 @@ struct bnxt_ring_mem_info {
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth;
- struct bnxt_mem_init *mem_init;
+ struct bnxt_ctx_mem_type *ctx_mem;
void **pg_arr;
dma_addr_t *dma_arr;
@@ -794,13 +918,27 @@ struct bnxt_db_info {
u64 db_key64;
u32 db_key32;
};
+ u32 db_ring_mask;
+ u32 db_epoch_mask;
+ u8 db_epoch_shift;
};
+#define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \
+ ((db)->db_epoch_shift))
+
+#define DB_TOGGLE(tgl) ((tgl) << DBR_TOGGLE_SFT)
+
+#define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \
+ DB_EPOCH(db, idx))
+
struct bnxt_tx_ring_info {
struct bnxt_napi *bnapi;
+ struct bnxt_cp_ring_info *tx_cpr;
u16 tx_prod;
u16 tx_cons;
+ u16 tx_hw_cons;
u16 txq_index;
+ u8 tx_napi_idx;
u8 kick_pending;
struct bnxt_db_info tx_db;
@@ -895,6 +1033,8 @@ struct bnxt_tpa_info {
u16 cfa_code; /* cfa_code in TPA start compl */
u8 agg_count;
+ u8 vlan_valid:1;
+ u8 cfa_code_valid:1;
struct rx_agg_cmp *agg_arr;
};
@@ -907,6 +1047,7 @@ struct bnxt_tpa_idx_map {
struct bnxt_rx_ring_info {
struct bnxt_napi *bnapi;
+ struct bnxt_cp_ring_info *rx_cpr;
u16 rx_prod;
u16 rx_agg_prod;
u16 rx_sw_agg_prod;
@@ -986,6 +1127,11 @@ struct bnxt_cp_ring_info {
u8 had_work_done:1;
u8 has_more_work:1;
+ u8 had_nqe_notify:1;
+ u8 toggle;
+
+ u8 cp_ring_type;
+ u8 cp_idx;
u32 last_cp_raw_cons;
@@ -1010,11 +1156,18 @@ struct bnxt_cp_ring_info {
struct bnxt_ring_struct cp_ring_struct;
- struct bnxt_cp_ring_info *cp_ring_arr[2];
-#define BNXT_RX_HDL 0
-#define BNXT_TX_HDL 1
+ int cp_ring_count;
+ struct bnxt_cp_ring_info *cp_ring_arr;
};
+#define BNXT_MAX_QUEUE 8
+#define BNXT_MAX_TXR_PER_NAPI BNXT_MAX_QUEUE
+
+#define bnxt_for_each_napi_tx(iter, bnapi, txr) \
+ for (iter = 0, txr = (bnapi)->tx_ring[0]; txr; \
+ txr = (iter < BNXT_MAX_TXR_PER_NAPI - 1) ? \
+ (bnapi)->tx_ring[++iter] : NULL)
+
struct bnxt_napi {
struct napi_struct napi;
struct bnxt *bp;
@@ -1022,11 +1175,10 @@ struct bnxt_napi {
int index;
struct bnxt_cp_ring_info cp_ring;
struct bnxt_rx_ring_info *rx_ring;
- struct bnxt_tx_ring_info *tx_ring;
+ struct bnxt_tx_ring_info *tx_ring[BNXT_MAX_TXR_PER_NAPI];
void (*tx_int)(struct bnxt *, struct bnxt_napi *,
int budget);
- int tx_pkts;
u8 events;
u8 tx_fault:1;
@@ -1067,7 +1219,7 @@ struct bnxt_vnic_info {
u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
u16 fw_l2_ctx_id;
#define BNXT_MAX_UC_ADDRS 4
- __le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
+ struct bnxt_l2_filter *l2_filters[BNXT_MAX_UC_ADDRS];
/* index 0 always dev_addr */
u16 uc_filter_count;
u8 *uc_list;
@@ -1180,19 +1332,71 @@ struct bnxt_pf_info {
struct bnxt_vf_info *vf;
};
-struct bnxt_ntuple_filter {
+struct bnxt_filter_base {
struct hlist_node hash;
- u8 dst_mac_addr[ETH_ALEN];
- u8 src_mac_addr[ETH_ALEN];
- struct flow_keys fkeys;
__le64 filter_id;
+ u8 type;
+#define BNXT_FLTR_TYPE_NTUPLE 1
+#define BNXT_FLTR_TYPE_L2 2
+ u8 flags;
+#define BNXT_ACT_DROP 1
+#define BNXT_ACT_RING_DST 2
+#define BNXT_ACT_FUNC_DST 4
+#define BNXT_ACT_NO_AGING 8
u16 sw_id;
- u8 l2_fltr_idx;
u16 rxq;
- u32 flow_id;
+ u16 fw_vnic_id;
+ u16 vf_idx;
unsigned long state;
#define BNXT_FLTR_VALID 0
-#define BNXT_FLTR_UPDATE 1
+#define BNXT_FLTR_INSERTED 1
+#define BNXT_FLTR_FW_DELETED 2
+
+ struct rcu_head rcu;
+};
+
+struct bnxt_ntuple_filter {
+ struct bnxt_filter_base base;
+ struct flow_keys fkeys;
+ struct bnxt_l2_filter *l2_fltr;
+ u32 ntuple_flags;
+#define BNXT_NTUPLE_MATCH_SRC_IP 1
+#define BNXT_NTUPLE_MATCH_DST_IP 2
+#define BNXT_NTUPLE_MATCH_SRC_PORT 4
+#define BNXT_NTUPLE_MATCH_DST_PORT 8
+#define BNXT_NTUPLE_MATCH_ALL (BNXT_NTUPLE_MATCH_SRC_IP | \
+ BNXT_NTUPLE_MATCH_DST_IP | \
+ BNXT_NTUPLE_MATCH_SRC_PORT | \
+ BNXT_NTUPLE_MATCH_DST_PORT)
+ u32 flow_id;
+};
+
+struct bnxt_l2_key {
+ union {
+ struct {
+ u8 dst_mac_addr[ETH_ALEN];
+ u16 vlan;
+ };
+ u32 filter_key;
+ };
+};
+
+struct bnxt_ipv4_tuple {
+ struct flow_dissector_key_ipv4_addrs v4addrs;
+ struct flow_dissector_key_ports ports;
+};
+
+struct bnxt_ipv6_tuple {
+ struct flow_dissector_key_ipv6_addrs v6addrs;
+ struct flow_dissector_key_ports ports;
+};
+
+#define BNXT_L2_KEY_SIZE (sizeof(struct bnxt_l2_key) / 4)
+
+struct bnxt_l2_filter {
+ struct bnxt_filter_base base;
+ struct bnxt_l2_key l2_key;
+ atomic_t refcnt;
};
struct bnxt_link_info {
@@ -1214,6 +1418,7 @@ struct bnxt_link_info {
#define BNXT_LINK_STATE_DOWN 1
#define BNXT_LINK_STATE_UP 2
#define BNXT_LINK_IS_UP(bp) ((bp)->link_info.link_state == BNXT_LINK_STATE_UP)
+ u8 active_lanes;
u8 duplex;
#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
@@ -1248,8 +1453,11 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
#define BNXT_LINK_SPEED_100GB PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
#define BNXT_LINK_SPEED_200GB PORT_PHY_QCFG_RESP_LINK_SPEED_200GB
+#define BNXT_LINK_SPEED_400GB PORT_PHY_QCFG_RESP_LINK_SPEED_400GB
u16 support_speeds;
u16 support_pam4_speeds;
+ u16 support_speeds2;
+
u16 auto_link_speeds; /* fw adv setting */
#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
@@ -1265,12 +1473,52 @@ struct bnxt_link_info {
#define BNXT_LINK_PAM4_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G
#define BNXT_LINK_PAM4_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G
#define BNXT_LINK_PAM4_SPEED_MSK_200GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G
+ u16 auto_link_speeds2;
+#define BNXT_LINK_SPEEDS2_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB
+#define BNXT_LINK_SPEEDS2_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB
+#define BNXT_LINK_SPEEDS2_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB
+#define BNXT_LINK_SPEEDS2_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB
+#define BNXT_LINK_SPEEDS2_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB
+#define BNXT_LINK_SPEEDS2_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB
+#define BNXT_LINK_SPEEDS2_MSK_50GB_PAM4 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56
+#define BNXT_LINK_SPEEDS2_MSK_100GB_PAM4 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56
+#define BNXT_LINK_SPEEDS2_MSK_200GB_PAM4 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56
+#define BNXT_LINK_SPEEDS2_MSK_400GB_PAM4 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56
+#define BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112
+#define BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112
+#define BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112
+
u16 support_auto_speeds;
u16 support_pam4_auto_speeds;
+ u16 support_auto_speeds2;
+
u16 lp_auto_link_speeds;
u16 lp_auto_pam4_link_speeds;
u16 force_link_speed;
u16 force_pam4_link_speed;
+ u16 force_link_speed2;
+#define BNXT_LINK_SPEED_50GB_PAM4 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56
+#define BNXT_LINK_SPEED_100GB_PAM4 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56
+#define BNXT_LINK_SPEED_200GB_PAM4 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56
+#define BNXT_LINK_SPEED_400GB_PAM4 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56
+#define BNXT_LINK_SPEED_100GB_PAM4_112 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112
+#define BNXT_LINK_SPEED_200GB_PAM4_112 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112
+#define BNXT_LINK_SPEED_400GB_PAM4_112 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
+
u32 preemphasis;
u8 module_status;
u8 active_fec_sig_mode;
@@ -1301,6 +1549,7 @@ struct bnxt_link_info {
u8 req_signal_mode;
#define BNXT_SIG_MODE_NRZ PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ
#define BNXT_SIG_MODE_PAM4 PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4
+#define BNXT_SIG_MODE_PAM4_112 PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
#define BNXT_SIG_MODE_MAX (PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST + 1)
u8 req_duplex;
u8 req_flow_ctrl;
@@ -1361,8 +1610,6 @@ struct bnxt_link_info {
(PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE | \
BNXT_FEC_RS_OFF(link_info))
-#define BNXT_MAX_QUEUE 8
-
struct bnxt_queue_info {
u8 queue_id;
u8 queue_profile;
@@ -1391,7 +1638,7 @@ struct bnxt_test_info {
};
#define CHIMP_REG_VIEW_ADDR \
- ((bp->flags & BNXT_FLAG_CHIP_P5) ? 0x80000000 : 0xb1000000)
+ ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ? 0x80000000 : 0xb1000000)
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
@@ -1515,53 +1762,73 @@ do { \
attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
} while (0)
+struct bnxt_ctx_mem_type {
+ u16 type;
+ u16 entry_size;
+ u32 flags;
+#define BNXT_CTX_MEM_TYPE_VALID FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID
+ u32 instance_bmap;
+ u8 init_value;
+ u8 entry_multiple;
+ u16 init_offset;
+#define BNXT_CTX_INIT_INVALID_OFFSET 0xffff
+ u32 max_entries;
+ u32 min_entries;
+ u8 last:1;
+ u8 split_entry_cnt;
+#define BNXT_MAX_SPLIT_ENTRY 4
+ union {
+ struct {
+ u32 qp_l2_entries;
+ u32 qp_qp1_entries;
+ u32 qp_fast_qpmd_entries;
+ };
+ u32 srq_l2_entries;
+ u32 cq_l2_entries;
+ u32 vnic_entries;
+ struct {
+ u32 mrav_av_entries;
+ u32 mrav_num_entries_units;
+ };
+ u32 split[BNXT_MAX_SPLIT_ENTRY];
+ };
+ struct bnxt_ctx_pg_info *pg_info;
+};
+
+#define BNXT_CTX_MRAV_AV_SPLIT_ENTRY 0
+
+#define BNXT_CTX_QP FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP
+#define BNXT_CTX_SRQ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ
+#define BNXT_CTX_CQ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ
+#define BNXT_CTX_VNIC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC
+#define BNXT_CTX_STAT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT
+#define BNXT_CTX_STQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING
+#define BNXT_CTX_FTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING
+#define BNXT_CTX_MRAV FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV
+#define BNXT_CTX_TIM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM
+#define BNXT_CTX_TKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC
+#define BNXT_CTX_RKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC
+#define BNXT_CTX_MTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING
+#define BNXT_CTX_SQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW
+#define BNXT_CTX_RQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW
+#define BNXT_CTX_SRQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW
+#define BNXT_CTX_CQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW
+#define BNXT_CTX_QTKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC
+#define BNXT_CTX_QRKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC
+#define BNXT_CTX_TBLSC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE
+#define BNXT_CTX_XPAR FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION
+
+#define BNXT_CTX_MAX (BNXT_CTX_TIM + 1)
+#define BNXT_CTX_L2_MAX (BNXT_CTX_FTQM + 1)
+#define BNXT_CTX_V2_MAX (BNXT_CTX_XPAR + 1)
+#define BNXT_CTX_INV ((u16)-1)
+
struct bnxt_ctx_mem_info {
- u32 qp_max_entries;
- u16 qp_min_qp1_entries;
- u16 qp_max_l2_entries;
- u16 qp_entry_size;
- u16 srq_max_l2_entries;
- u32 srq_max_entries;
- u16 srq_entry_size;
- u16 cq_max_l2_entries;
- u32 cq_max_entries;
- u16 cq_entry_size;
- u16 vnic_max_vnic_entries;
- u16 vnic_max_ring_table_entries;
- u16 vnic_entry_size;
- u32 stat_max_entries;
- u16 stat_entry_size;
- u16 tqm_entry_size;
- u32 tqm_min_entries_per_ring;
- u32 tqm_max_entries_per_ring;
- u32 mrav_max_entries;
- u16 mrav_entry_size;
- u16 tim_entry_size;
- u32 tim_max_entries;
- u16 mrav_num_entries_units;
- u8 tqm_entries_multiple;
u8 tqm_fp_rings_count;
u32 flags;
#define BNXT_CTX_FLAG_INITED 0x01
-
- struct bnxt_ctx_pg_info qp_mem;
- struct bnxt_ctx_pg_info srq_mem;
- struct bnxt_ctx_pg_info cq_mem;
- struct bnxt_ctx_pg_info vnic_mem;
- struct bnxt_ctx_pg_info stat_mem;
- struct bnxt_ctx_pg_info mrav_mem;
- struct bnxt_ctx_pg_info tim_mem;
- struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
-
-#define BNXT_CTX_MEM_INIT_QP 0
-#define BNXT_CTX_MEM_INIT_SRQ 1
-#define BNXT_CTX_MEM_INIT_CQ 2
-#define BNXT_CTX_MEM_INIT_VNIC 3
-#define BNXT_CTX_MEM_INIT_STAT 4
-#define BNXT_CTX_MEM_INIT_MRAV 5
-#define BNXT_CTX_MEM_INIT_MAX 6
- struct bnxt_mem_init mem_init[BNXT_CTX_MEM_INIT_MAX];
+ struct bnxt_ctx_mem_type ctx_arr[BNXT_CTX_V2_MAX];
};
enum bnxt_health_severity {
@@ -1697,6 +1964,10 @@ enum board_idx {
BCM57508_NPAR,
BCM57504_NPAR,
BCM57502_NPAR,
+ BCM57608,
+ BCM57604,
+ BCM57602,
+ BCM57601,
BCM58802,
BCM58804,
BCM58808,
@@ -1744,14 +2015,14 @@ struct bnxt {
#define CHIP_NUM_57504 0x1751
#define CHIP_NUM_57502 0x1752
+#define CHIP_NUM_57608 0x1760
+
#define CHIP_NUM_58802 0xd802
#define CHIP_NUM_58804 0xd804
#define CHIP_NUM_58808 0xd808
u8 chip_rev;
-#define CHIP_NUM_58818 0xd818
-
#define BNXT_CHIP_NUM_5730X(chip_num) \
((chip_num) >= CHIP_NUM_57301 && \
(chip_num) <= CHIP_NUM_57304)
@@ -1801,7 +2072,7 @@ struct bnxt {
atomic_t intr_sem;
u32 flags;
- #define BNXT_FLAG_CHIP_P5 0x1
+ #define BNXT_FLAG_CHIP_P5_PLUS 0x1
#define BNXT_FLAG_VF 0x2
#define BNXT_FLAG_LRO 0x4
#ifdef CONFIG_INET
@@ -1820,8 +2091,6 @@ struct bnxt {
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
- #define BNXT_FLAG_UDP_RSS_CAP 0x800
- #define BNXT_FLAG_NEW_RSS_CAP 0x2000
#define BNXT_FLAG_WOL_CAP 0x4000
#define BNXT_FLAG_ROCEV1_CAP 0x8000
#define BNXT_FLAG_ROCEV2_CAP 0x10000
@@ -1829,13 +2098,15 @@ struct bnxt {
BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
- #define BNXT_FLAG_CHIP_SR2 0x80000
+ #define BNXT_FLAG_CHIP_P7 0x80000
#define BNXT_FLAG_MULTI_HOST 0x100000
#define BNXT_FLAG_DSN_VALID 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
+ #define BNXT_FLAG_UDP_GSO_CAP 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
+ #define BNXT_FLAG_TX_COAL_CMPL 0x8000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -1855,21 +2126,21 @@ struct bnxt {
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
- (!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
+ (!((bp)->flags & BNXT_FLAG_CHIP_P5_PLUS) ||\
(bp)->max_tpa_v2) && !is_kdump_kernel())
#define BNXT_RX_JUMBO_MODE(bp) ((bp)->flags & BNXT_FLAG_JUMBO)
-#define BNXT_CHIP_SR2(bp) \
- ((bp)->chip_num == CHIP_NUM_58818)
+#define BNXT_CHIP_P7(bp) \
+ ((bp)->chip_num == CHIP_NUM_57608)
-#define BNXT_CHIP_P5_THOR(bp) \
+#define BNXT_CHIP_P5(bp) \
((bp)->chip_num == CHIP_NUM_57508 || \
(bp)->chip_num == CHIP_NUM_57504 || \
(bp)->chip_num == CHIP_NUM_57502)
/* Chip class phase 5 */
-#define BNXT_CHIP_P5(bp) \
- (BNXT_CHIP_P5_THOR(bp) || BNXT_CHIP_SR2(bp))
+#define BNXT_CHIP_P5_PLUS(bp) \
+ (BNXT_CHIP_P5(bp) || BNXT_CHIP_P7(bp))
/* Chip class phase 4.x */
#define BNXT_CHIP_P4(bp) \
@@ -1880,7 +2151,7 @@ struct bnxt {
!BNXT_CHIP_TYPE_NITRO_A0(bp)))
#define BNXT_CHIP_P4_PLUS(bp) \
- (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+ (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5_PLUS(bp))
struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev;
@@ -1941,6 +2212,11 @@ struct bnxt {
u16 rss_indir_tbl_entries;
u32 rss_hash_cfg;
u32 rss_hash_delta;
+ u32 rss_cap;
+#define BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA BIT(0)
+#define BNXT_RSS_CAP_UDP_RSS_CAP BIT(1)
+#define BNXT_RSS_CAP_NEW_RSS_CAP BIT(2)
+#define BNXT_RSS_CAP_RSS_TCAM BIT(3)
u16 max_mtu;
u8 max_tc;
@@ -1949,6 +2225,7 @@ struct bnxt {
u8 tc_to_qidx[BNXT_MAX_QUEUE];
u8 q_ids[BNXT_MAX_QUEUE];
u8 max_q;
+ u8 num_tc;
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
@@ -2006,7 +2283,6 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 BIT_ULL(16)
#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED BIT_ULL(17)
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED BIT_ULL(18)
- #define BNXT_FW_CAP_RSS_HASH_TYPE_DELTA BIT_ULL(19)
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT_ULL(20)
#define BNXT_FW_CAP_HOT_RESET BIT_ULL(21)
#define BNXT_FW_CAP_PTP_RTC BIT_ULL(22)
@@ -2023,6 +2299,8 @@ struct bnxt {
#define BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED BIT_ULL(33)
#define BNXT_FW_CAP_DFLT_VLAN_TPID_PCP BIT_ULL(34)
#define BNXT_FW_CAP_PRE_RESV_VNICS BIT_ULL(35)
+ #define BNXT_FW_CAP_BACKING_STORE_V2 BIT_ULL(36)
+ #define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37)
u32 fw_dbg_cap;
@@ -2067,8 +2345,10 @@ struct bnxt {
u16 vxlan_fw_dst_port_id;
u16 nge_fw_dst_port_id;
+ u16 vxlan_gpe_fw_dst_port_id;
__be16 vxlan_port;
__be16 nge_port;
+ __be16 vxlan_gpe_port;
u8 port_partition_type;
u8 port_count;
u16 br_mode;
@@ -2136,9 +2416,11 @@ struct bnxt {
/* ensure atomic 64-bit doorbell writes on 32-bit systems. */
spinlock_t db_lock;
#endif
+ int db_offset; /* db_offset within db_size */
int db_size;
#define BNXT_NTP_FLTR_MAX_FLTR 4096
+#define BNXT_MAX_FLTR (BNXT_NTP_FLTR_MAX_FLTR + BNXT_L2_FLTR_MAX_FLTR)
#define BNXT_NTP_FLTR_HASH_SIZE 512
#define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1)
struct hlist_head ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
@@ -2147,6 +2429,14 @@ struct bnxt {
unsigned long *ntp_fltr_bmap;
int ntp_fltr_count;
+#define BNXT_L2_FLTR_MAX_FLTR 1024
+#define BNXT_L2_FLTR_HASH_SIZE 32
+#define BNXT_L2_FLTR_HASH_MASK (BNXT_L2_FLTR_HASH_SIZE - 1)
+ struct hlist_head l2_fltr_hash_tbl[BNXT_L2_FLTR_HASH_SIZE];
+
+ u32 hash_seed;
+ u64 toeplitz_prefix;
+
/* To protect link related settings during link changes and
* ethtool settings changes.
*/
@@ -2169,6 +2459,7 @@ struct bnxt {
#define BNXT_PHY_FL_NO_PAUSE (PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED << 8)
#define BNXT_PHY_FL_NO_PFC (PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED << 8)
#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8)
+#define BNXT_PHY_FL_SPEEDS2 (PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED << 8)
u8 num_tests;
struct bnxt_test_info *test_info;
@@ -2212,15 +2503,15 @@ struct bnxt {
#define BNXT_NUM_TX_RING_STATS 8
#define BNXT_NUM_TPA_RING_STATS 4
#define BNXT_NUM_TPA_RING_STATS_P5 5
-#define BNXT_NUM_TPA_RING_STATS_P5_SR2 6
+#define BNXT_NUM_TPA_RING_STATS_P7 6
#define BNXT_RING_STATS_SIZE_P5 \
((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS + \
BNXT_NUM_TPA_RING_STATS_P5) * 8)
-#define BNXT_RING_STATS_SIZE_P5_SR2 \
+#define BNXT_RING_STATS_SIZE_P7 \
((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS + \
- BNXT_NUM_TPA_RING_STATS_P5_SR2) * 8)
+ BNXT_NUM_TPA_RING_STATS_P7) * 8)
#define BNXT_GET_RING_STATS64(sw, counter) \
(*((sw) + offsetof(struct ctx_hw_stats, counter) / 8))
@@ -2303,10 +2594,11 @@ static inline void bnxt_writeq_relaxed(struct bnxt *bp, u64 val,
static inline void bnxt_db_write_relaxed(struct bnxt *bp,
struct bnxt_db_info *db, u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- bnxt_writeq_relaxed(bp, db->db_key64 | idx, db->doorbell);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ bnxt_writeq_relaxed(bp, db->db_key64 | DB_RING_IDX(db, idx),
+ db->doorbell);
} else {
- u32 db_val = db->db_key32 | idx;
+ u32 db_val = db->db_key32 | DB_RING_IDX(db, idx);
writel_relaxed(db_val, db->doorbell);
if (bp->flags & BNXT_FLAG_DOUBLE_DB)
@@ -2318,10 +2610,11 @@ static inline void bnxt_db_write_relaxed(struct bnxt *bp,
static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- bnxt_writeq(bp, db->db_key64 | idx, db->doorbell);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ bnxt_writeq(bp, db->db_key64 | DB_RING_IDX(db, idx),
+ db->doorbell);
} else {
- u32 db_val = db->db_key32 | idx;
+ u32 db_val = db->db_key32 | DB_RING_IDX(db, idx);
writel(db_val, db->doorbell);
if (bp->flags & BNXT_FLAG_DOUBLE_DB)
@@ -2351,12 +2644,21 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
+void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr);
+int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr);
+void bnxt_fill_ipv6_mask(__be32 mask[4]);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
void bnxt_free_ctx_mem(struct bnxt *bp);
+int bnxt_num_tx_to_cp(struct bnxt *bp, int tx);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
@@ -2366,7 +2668,7 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- int idx);
+ u16 curr);
void bnxt_report_link(struct bnxt *bp);
int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
@@ -2393,6 +2695,13 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int bnxt_fw_init_one(struct bnxt *bp);
bool bnxt_hwrm_reset_permitted(struct bnxt *bp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
+struct bnxt_ntuple_filter *bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr, u32 idx);
+u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
+ const struct sk_buff *skb);
+int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
+ u32 idx);
+void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
int bnxt_restore_pf_fw_resources(struct bnxt *bp);
int bnxt_get_port_parent_id(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 63e0670383..0dbb880a7a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -228,7 +228,7 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
}
}
if (bp->ieee_ets) {
- int tc = netdev_get_num_tc(bp->dev);
+ int tc = bp->num_tc;
if (!tc)
tc = 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 89809f1b12..ae4529c043 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -462,8 +462,6 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
}
bnxt_cancel_reservations(bp, false);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
break;
}
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
@@ -734,7 +732,7 @@ static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver)
}
/* earlier devices present as an array of raw bytes */
- if (!BNXT_CHIP_P5(bp)) {
+ if (!BNXT_CHIP_P5_PLUS(bp)) {
dim = 0;
i = 0;
bits *= 3; /* array of 3 version components */
@@ -754,7 +752,7 @@ static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver)
goto exit;
bnxt_copy_from_nvm_data(&ver, data, bits, bytes);
- if (BNXT_CHIP_P5(bp)) {
+ if (BNXT_CHIP_P5_PLUS(bp)) {
*nvm_cfg_ver <<= 8;
*nvm_cfg_ver |= ver.vu8;
} else {
@@ -774,7 +772,7 @@ static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req,
if (!strlen(buf))
return 0;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) ||
!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE)))
return 0;
@@ -1000,7 +998,7 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (rc)
return rc;
- if (BNXT_CHIP_P5(bp)) {
+ if (BNXT_CHIP_P5_PLUS(bp)) {
rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 5f67a7f94e..dc4ca706b0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -460,6 +460,7 @@ static const struct {
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
+ BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss),
};
static const struct {
@@ -510,9 +511,9 @@ static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
{
if (BNXT_SUPPORTS_TPA(bp)) {
if (bp->max_tpa_v2) {
- if (BNXT_CHIP_P5_THOR(bp))
+ if (BNXT_CHIP_P5(bp))
return BNXT_NUM_TPA_RING_STATS_P5;
- return BNXT_NUM_TPA_RING_STATS_P5_SR2;
+ return BNXT_NUM_TPA_RING_STATS_P7;
}
return BNXT_NUM_TPA_RING_STATS;
}
@@ -527,7 +528,8 @@ static int bnxt_get_num_ring_stats(struct bnxt *bp)
bnxt_get_num_tpa_ring_stats(bp);
tx = NUM_RING_TX_HW_STATS;
cmn = NUM_RING_CMN_SW_STATS;
- return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
+ return rx * bp->rx_nr_rings +
+ tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) +
cmn * bp->cp_nr_rings;
}
@@ -882,7 +884,7 @@ static void bnxt_get_channels(struct net_device *dev,
if (max_tx_sch_inputs)
max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
tx_grps = max(tcs, 1);
if (bp->tx_nr_rings_xdp)
tx_grps++;
@@ -922,6 +924,7 @@ static int bnxt_set_channels(struct net_device *dev,
bool sh = false;
int tx_xdp = 0;
int rc = 0;
+ int tx_cp;
if (channel->other_count)
return -EINVAL;
@@ -941,7 +944,7 @@ static int bnxt_set_channels(struct net_device *dev,
if (channel->combined_count)
sh = true;
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
req_tx_rings = sh ? channel->combined_count : channel->tx_count;
req_rx_rings = sh ? channel->combined_count : channel->rx_count;
@@ -988,8 +991,9 @@ static int bnxt_set_channels(struct net_device *dev,
if (tcs > 1)
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
- bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
- bp->tx_nr_rings + bp->rx_nr_rings;
+ tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
+ bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
+ tx_cp + bp->rx_nr_rings;
/* After changing number of rx channels, update NTUPLE feature. */
netdev_update_features(dev);
@@ -1007,29 +1011,60 @@ static int bnxt_set_channels(struct net_device *dev,
return rc;
}
-#ifdef CONFIG_RFS_ACCEL
-static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[],
+ int tbl_size, u32 *ids, u32 start,
+ u32 id_cnt)
{
- int i, j = 0;
+ int i, j = start;
- cmd->data = bp->ntp_fltr_count;
- for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ if (j >= id_cnt)
+ return j;
+ for (i = 0; i < tbl_size; i++) {
struct hlist_head *head;
- struct bnxt_ntuple_filter *fltr;
+ struct bnxt_filter_base *fltr;
- head = &bp->ntp_fltr_hash_tbl[i];
- rcu_read_lock();
+ head = &tbl[i];
hlist_for_each_entry_rcu(fltr, head, hash) {
- if (j == cmd->rule_cnt)
- break;
- rule_locs[j++] = fltr->sw_id;
+ if (!fltr->flags ||
+ test_bit(BNXT_FLTR_FW_DELETED, &fltr->state))
+ continue;
+ ids[j++] = fltr->sw_id;
+ if (j == id_cnt)
+ return j;
+ }
+ }
+ return j;
+}
+
+static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp,
+ struct hlist_head tbl[],
+ int tbl_size, u32 id)
+{
+ int i;
+
+ for (i = 0; i < tbl_size; i++) {
+ struct hlist_head *head;
+ struct bnxt_filter_base *fltr;
+
+ head = &tbl[i];
+ hlist_for_each_entry_rcu(fltr, head, hash) {
+ if (fltr->flags && fltr->sw_id == id)
+ return fltr;
}
- rcu_read_unlock();
- if (j == cmd->rule_cnt)
- break;
}
- cmd->rule_cnt = j;
+ return NULL;
+}
+
+static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ cmd->data = bp->ntp_fltr_count;
+ rcu_read_lock();
+ cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl,
+ BNXT_NTP_FLTR_HASH_SIZE,
+ rule_locs, 0, cmd->rule_cnt);
+ rcu_read_unlock();
+
return 0;
}
@@ -1037,27 +1072,24 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fs =
(struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct bnxt_filter_base *fltr_base;
struct bnxt_ntuple_filter *fltr;
struct flow_keys *fkeys;
- int i, rc = -EINVAL;
+ int rc = -EINVAL;
if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
return rc;
- for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
- struct hlist_head *head;
-
- head = &bp->ntp_fltr_hash_tbl[i];
- rcu_read_lock();
- hlist_for_each_entry_rcu(fltr, head, hash) {
- if (fltr->sw_id == fs->location)
- goto fltr_found;
- }
+ rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
+ BNXT_NTP_FLTR_HASH_SIZE,
+ fs->location);
+ if (!fltr_base) {
rcu_read_unlock();
+ return rc;
}
- return rc;
+ fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
-fltr_found:
fkeys = &fltr->fkeys;
if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
if (fkeys->basic.ip_proto == IPPROTO_TCP)
@@ -1067,20 +1099,23 @@ fltr_found:
else
goto fltr_err;
- fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
- fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
-
- fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
- fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
-
- fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
-
- fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
+ fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+ fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
+ fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+ fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+ fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
+ fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
+ fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ }
} else {
- int i;
-
if (fkeys->basic.ip_proto == IPPROTO_TCP)
fs->flow_type = TCP_V6_FLOW;
else if (fkeys->basic.ip_proto == IPPROTO_UDP)
@@ -1088,22 +1123,27 @@ fltr_found:
else
goto fltr_err;
- *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
- fkeys->addrs.v6addrs.src;
- *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
- fkeys->addrs.v6addrs.dst;
- for (i = 0; i < 4; i++) {
- fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
- fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
+ fkeys->addrs.v6addrs.src;
+ bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6src);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
+ fkeys->addrs.v6addrs.dst;
+ bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6dst);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+ fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
+ fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
+ fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
}
- fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
-
- fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
}
- fs->ring_cookie = fltr->rxq;
+ fs->ring_cookie = fltr->base.rxq;
rc = 0;
fltr_err:
@@ -1111,7 +1151,221 @@ fltr_err:
return rc;
}
-#endif
+
+#define IPV4_ALL_MASK ((__force __be32)~0)
+#define L4_PORT_ALL_MASK ((__force __be16)~0)
+
+static bool ipv6_mask_is_full(__be32 mask[4])
+{
+ return (mask[0] & mask[1] & mask[2] & mask[3]) == IPV4_ALL_MASK;
+}
+
+static bool ipv6_mask_is_zero(__be32 mask[4])
+{
+ return !(mask[0] | mask[1] | mask[2] | mask[3]);
+}
+
+static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
+ struct ethtool_rx_flow_spec *fs)
+{
+ u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ struct bnxt_ntuple_filter *new_fltr, *fltr;
+ struct bnxt_l2_filter *l2_fltr;
+ u32 flow_type = fs->flow_type;
+ struct flow_keys *fkeys;
+ u32 idx;
+ int rc;
+
+ if (!bp->vnic_info)
+ return -EAGAIN;
+
+ if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
+ return -EOPNOTSUPP;
+
+ new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL);
+ if (!new_fltr)
+ return -ENOMEM;
+
+ l2_fltr = bp->vnic_info[0].l2_filters[0];
+ atomic_inc(&l2_fltr->refcnt);
+ new_fltr->l2_fltr = l2_fltr;
+ fkeys = &new_fltr->fkeys;
+
+ rc = -EOPNOTSUPP;
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW: {
+ struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec;
+
+ fkeys->basic.ip_proto = IPPROTO_TCP;
+ if (flow_type == UDP_V4_FLOW)
+ fkeys->basic.ip_proto = IPPROTO_UDP;
+ fkeys->basic.n_proto = htons(ETH_P_IP);
+
+ if (ip_mask->ip4src == IPV4_ALL_MASK) {
+ fkeys->addrs.v4addrs.src = ip_spec->ip4src;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
+ } else if (ip_mask->ip4src) {
+ goto ntuple_err;
+ }
+ if (ip_mask->ip4dst == IPV4_ALL_MASK) {
+ fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
+ } else if (ip_mask->ip4dst) {
+ goto ntuple_err;
+ }
+
+ if (ip_mask->psrc == L4_PORT_ALL_MASK) {
+ fkeys->ports.src = ip_spec->psrc;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
+ } else if (ip_mask->psrc) {
+ goto ntuple_err;
+ }
+ if (ip_mask->pdst == L4_PORT_ALL_MASK) {
+ fkeys->ports.dst = ip_spec->pdst;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
+ } else if (ip_mask->pdst) {
+ goto ntuple_err;
+ }
+ break;
+ }
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW: {
+ struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec;
+
+ fkeys->basic.ip_proto = IPPROTO_TCP;
+ if (flow_type == UDP_V6_FLOW)
+ fkeys->basic.ip_proto = IPPROTO_UDP;
+ fkeys->basic.n_proto = htons(ETH_P_IPV6);
+
+ if (ipv6_mask_is_full(ip_mask->ip6src)) {
+ fkeys->addrs.v6addrs.src =
+ *(struct in6_addr *)&ip_spec->ip6src;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
+ } else if (!ipv6_mask_is_zero(ip_mask->ip6src)) {
+ goto ntuple_err;
+ }
+ if (ipv6_mask_is_full(ip_mask->ip6dst)) {
+ fkeys->addrs.v6addrs.dst =
+ *(struct in6_addr *)&ip_spec->ip6dst;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
+ } else if (!ipv6_mask_is_zero(ip_mask->ip6dst)) {
+ goto ntuple_err;
+ }
+
+ if (ip_mask->psrc == L4_PORT_ALL_MASK) {
+ fkeys->ports.src = ip_spec->psrc;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
+ } else if (ip_mask->psrc) {
+ goto ntuple_err;
+ }
+ if (ip_mask->pdst == L4_PORT_ALL_MASK) {
+ fkeys->ports.dst = ip_spec->pdst;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
+ } else if (ip_mask->pdst) {
+ goto ntuple_err;
+ }
+ break;
+ }
+ default:
+ rc = -EOPNOTSUPP;
+ goto ntuple_err;
+ }
+ if (!new_fltr->ntuple_flags)
+ goto ntuple_err;
+
+ idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
+ rcu_read_lock();
+ fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
+ if (fltr) {
+ rcu_read_unlock();
+ rc = -EEXIST;
+ goto ntuple_err;
+ }
+ rcu_read_unlock();
+
+ new_fltr->base.rxq = ring;
+ new_fltr->base.flags = BNXT_ACT_NO_AGING;
+ __set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
+ rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
+ if (!rc) {
+ rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr);
+ if (rc) {
+ bnxt_del_ntp_filter(bp, new_fltr);
+ return rc;
+ }
+ fs->location = new_fltr->base.sw_id;
+ return 0;
+ }
+
+ntuple_err:
+ atomic_dec(&l2_fltr->refcnt);
+ kfree(new_fltr);
+ return rc;
+}
+
+static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ u32 ring, flow_type;
+ int rc;
+ u8 vf;
+
+ if (!netif_running(bp->dev))
+ return -EAGAIN;
+ if (!(bp->flags & BNXT_FLAG_RFS))
+ return -EPERM;
+ if (fs->location != RX_CLS_LOC_ANY)
+ return -EINVAL;
+
+ ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ if (BNXT_VF(bp) && vf)
+ return -EINVAL;
+ if (BNXT_PF(bp) && vf > bp->pf.active_vfs)
+ return -EINVAL;
+ if (!vf && ring >= bp->rx_nr_rings)
+ return -EINVAL;
+
+ flow_type = fs->flow_type;
+ if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
+ return -EINVAL;
+ flow_type &= ~FLOW_EXT;
+ if (flow_type == ETHER_FLOW)
+ rc = -EOPNOTSUPP;
+ else
+ rc = bnxt_add_ntuple_cls_rule(bp, fs);
+ return rc;
+}
+
+static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ struct bnxt_filter_base *fltr_base;
+ struct bnxt_ntuple_filter *fltr;
+
+ rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
+ BNXT_NTP_FLTR_HASH_SIZE,
+ fs->location);
+ if (!fltr_base) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
+ if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+ bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr);
+ bnxt_del_ntp_filter(bp, fltr);
+ return 0;
+}
static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
{
@@ -1194,7 +1448,7 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (tuple == 4)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
} else if (cmd->flow_type == UDP_V4_FLOW) {
- if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
+ if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
return -EINVAL;
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
if (tuple == 4)
@@ -1204,7 +1458,7 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (tuple == 4)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
} else if (cmd->flow_type == UDP_V6_FLOW) {
- if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
+ if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
return -EINVAL;
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
if (tuple == 4)
@@ -1244,7 +1498,7 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (bp->rss_hash_cfg == rss_hash_cfg)
return 0;
- if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
+ if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg;
bp->rss_hash_cfg = rss_hash_cfg;
if (netif_running(bp->dev)) {
@@ -1261,14 +1515,13 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int rc = 0;
switch (cmd->cmd) {
-#ifdef CONFIG_RFS_ACCEL
case ETHTOOL_GRXRINGS:
cmd->data = bp->rx_nr_rings;
break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bp->ntp_fltr_count;
- cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
+ cmd->data = BNXT_NTP_FLTR_MAX_FLTR | RX_CLS_LOC_SPECIAL;
break;
case ETHTOOL_GRXCLSRLALL:
@@ -1278,7 +1531,6 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRULE:
rc = bnxt_grxclsrule(bp, cmd);
break;
-#endif
case ETHTOOL_GRXFH:
rc = bnxt_grxfh(bp, cmd);
@@ -1302,6 +1554,14 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
rc = bnxt_srxfh(bp, cmd);
break;
+ case ETHTOOL_SRXCLSRLINS:
+ rc = bnxt_srxclsrlins(bp, cmd);
+ break;
+
+ case ETHTOOL_SRXCLSRLDEL:
+ rc = bnxt_srxclsrldel(bp, cmd);
+ break;
+
default:
rc = -EOPNOTSUPP;
break;
@@ -1313,8 +1573,9 @@ u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
+ BNXT_RSS_TABLE_ENTRIES_P5;
return HW_HASH_INDEX_SIZE;
}
@@ -1323,49 +1584,49 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
return HW_HASH_KEY_SIZE;
}
-static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int bnxt_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_vnic_info *vnic;
u32 i, tbl_size;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
if (!bp->vnic_info)
return 0;
vnic = &bp->vnic_info[0];
- if (indir && bp->rss_indir_tbl) {
+ if (rxfh->indir && bp->rss_indir_tbl) {
tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
- indir[i] = bp->rss_indir_tbl[i];
+ rxfh->indir[i] = bp->rss_indir_tbl[i];
}
- if (key && vnic->rss_hash_key)
- memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+ if (rxfh->key && vnic->rss_hash_key)
+ memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
return 0;
}
-static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int bnxt_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- if (hfunc && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (key)
+ if (rxfh->key)
return -EOPNOTSUPP;
- if (indir) {
+ if (rxfh->indir) {
u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
- bp->rss_indir_tbl[i] = indir[i];
+ bp->rss_indir_tbl[i] = rxfh->indir[i];
pad = bp->rss_indir_tbl_entries - tbl_size;
if (pad)
memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
@@ -1568,6 +1829,22 @@ static const enum bnxt_media_type bnxt_phy_types[] = {
[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR,
[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
};
static enum bnxt_media_type
@@ -1595,6 +1872,7 @@ enum bnxt_link_speed_indices {
BNXT_LINK_SPEED_50GB_IDX,
BNXT_LINK_SPEED_100GB_IDX,
BNXT_LINK_SPEED_200GB_IDX,
+ BNXT_LINK_SPEED_400GB_IDX,
__BNXT_LINK_SPEED_END
};
@@ -1606,9 +1884,21 @@ static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed)
case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX;
case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX;
case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX;
- case BNXT_LINK_SPEED_50GB: return BNXT_LINK_SPEED_50GB_IDX;
- case BNXT_LINK_SPEED_100GB: return BNXT_LINK_SPEED_100GB_IDX;
- case BNXT_LINK_SPEED_200GB: return BNXT_LINK_SPEED_200GB_IDX;
+ case BNXT_LINK_SPEED_50GB:
+ case BNXT_LINK_SPEED_50GB_PAM4:
+ return BNXT_LINK_SPEED_50GB_IDX;
+ case BNXT_LINK_SPEED_100GB:
+ case BNXT_LINK_SPEED_100GB_PAM4:
+ case BNXT_LINK_SPEED_100GB_PAM4_112:
+ return BNXT_LINK_SPEED_100GB_IDX;
+ case BNXT_LINK_SPEED_200GB:
+ case BNXT_LINK_SPEED_200GB_PAM4:
+ case BNXT_LINK_SPEED_200GB_PAM4_112:
+ return BNXT_LINK_SPEED_200GB_IDX;
+ case BNXT_LINK_SPEED_400GB:
+ case BNXT_LINK_SPEED_400GB_PAM4:
+ case BNXT_LINK_SPEED_400GB_PAM4_112:
+ return BNXT_LINK_SPEED_400GB_IDX;
default: return BNXT_LINK_SPEED_UNKNOWN;
}
}
@@ -1681,6 +1971,12 @@ bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = {
[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
},
+ [BNXT_SIG_MODE_PAM4_112] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
+ },
},
[BNXT_LINK_SPEED_200GB_IDX] = {
[BNXT_SIG_MODE_PAM4] = {
@@ -1689,6 +1985,26 @@ bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = {
[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
},
+ [BNXT_SIG_MODE_PAM4_112] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_400GB_IDX] = {
+ [BNXT_SIG_MODE_PAM4] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
+ },
+ [BNXT_SIG_MODE_PAM4_112] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
+ },
},
};
@@ -1753,7 +2069,8 @@ static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info,
lk_ksettings->link_modes.supported);
}
- if (link_info->support_auto_speeds || link_info->support_pam4_auto_speeds)
+ if (link_info->support_auto_speeds || link_info->support_auto_speeds2 ||
+ link_info->support_pam4_auto_speeds)
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
lk_ksettings->link_modes.supported);
@@ -1789,22 +2106,60 @@ static const u16 bnxt_pam4_speed_masks[] = {
[BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB,
[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB,
[BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB,
+ [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
+};
+
+static const u16 bnxt_nrz_speeds2_masks[] = {
+ [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB,
+ [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB,
+ [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB,
+ [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB,
+ [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB,
+ [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB,
+ [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
+};
+
+static const u16 bnxt_pam4_speeds2_masks[] = {
+ [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4,
+ [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4,
+ [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4,
+ [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4,
+};
+
+static const u16 bnxt_pam4_112_speeds2_masks[] = {
+ [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112,
+ [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112,
+ [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112,
};
static enum bnxt_link_speed_indices
-bnxt_encoding_speed_idx(u8 sig_mode, u16 speed_msk)
+bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk)
{
const u16 *speeds;
int idx, len;
switch (sig_mode) {
case BNXT_SIG_MODE_NRZ:
- speeds = bnxt_nrz_speed_masks;
- len = ARRAY_SIZE(bnxt_nrz_speed_masks);
+ if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ speeds = bnxt_nrz_speeds2_masks;
+ len = ARRAY_SIZE(bnxt_nrz_speeds2_masks);
+ } else {
+ speeds = bnxt_nrz_speed_masks;
+ len = ARRAY_SIZE(bnxt_nrz_speed_masks);
+ }
break;
case BNXT_SIG_MODE_PAM4:
- speeds = bnxt_pam4_speed_masks;
- len = ARRAY_SIZE(bnxt_pam4_speed_masks);
+ if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ speeds = bnxt_pam4_speeds2_masks;
+ len = ARRAY_SIZE(bnxt_pam4_speeds2_masks);
+ } else {
+ speeds = bnxt_pam4_speed_masks;
+ len = ARRAY_SIZE(bnxt_pam4_speed_masks);
+ }
+ break;
+ case BNXT_SIG_MODE_PAM4_112:
+ speeds = bnxt_pam4_112_speeds2_masks;
+ len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks);
break;
default:
return BNXT_LINK_SPEED_UNKNOWN;
@@ -1822,14 +2177,14 @@ bnxt_encoding_speed_idx(u8 sig_mode, u16 speed_msk)
static void
__bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
- u8 sig_mode, unsigned long *et_mask)
+ u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
{
enum ethtool_link_mode_bit_indices link_mode;
enum bnxt_link_speed_indices speed;
u8 bit;
for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) {
- speed = bnxt_encoding_speed_idx(sig_mode, 1 << bit);
+ speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit);
if (!speed)
continue;
@@ -1843,16 +2198,83 @@ __bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
static void
bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
- u8 sig_mode, unsigned long *et_mask)
+ u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
{
if (media) {
- __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, et_mask);
+ __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
+ et_mask);
return;
}
/* list speeds for all media if unknown */
for (media = 1; media < __BNXT_MEDIA_END; media++)
- __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, et_mask);
+ __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
+ et_mask);
+}
+
+static void
+bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info,
+ enum bnxt_media_type media,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+ u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
+ u16 phy_flags = bp->phy_flags;
+
+ if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ sp_nrz = link_info->support_speeds2;
+ sp_pam4 = link_info->support_speeds2;
+ sp_pam4_112 = link_info->support_speeds2;
+ } else {
+ sp_nrz = link_info->support_speeds;
+ sp_pam4 = link_info->support_pam4_speeds;
+ }
+ bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
+ lk_ksettings->link_modes.supported);
+ bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
+ lk_ksettings->link_modes.supported);
+ bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
+ phy_flags, lk_ksettings->link_modes.supported);
+}
+
+static void
+bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info,
+ enum bnxt_media_type media,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+ u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
+ u16 phy_flags = bp->phy_flags;
+
+ sp_nrz = link_info->advertising;
+ if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ sp_pam4 = link_info->advertising;
+ sp_pam4_112 = link_info->advertising;
+ } else {
+ sp_pam4 = link_info->advertising_pam4;
+ }
+ bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
+ lk_ksettings->link_modes.advertising);
+ bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
+ lk_ksettings->link_modes.advertising);
+ bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
+ phy_flags, lk_ksettings->link_modes.advertising);
+}
+
+static void
+bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info,
+ enum bnxt_media_type media,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+ u16 phy_flags = bp->phy_flags;
+
+ bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media,
+ BNXT_SIG_MODE_NRZ, phy_flags,
+ lk_ksettings->link_modes.lp_advertising);
+ bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media,
+ BNXT_SIG_MODE_PAM4, phy_flags,
+ lk_ksettings->link_modes.lp_advertising);
}
static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds,
@@ -1881,22 +2303,42 @@ static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds,
static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info,
const unsigned long *et_mask)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+ u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks;
enum bnxt_media_type media = bnxt_get_media(link_info);
+ u16 *adv, *adv_pam4, *adv_pam4_112 = NULL;
+ u32 delta_pam4_112 = 0;
u32 delta_pam4 = 0;
u32 delta_nrz = 0;
int i, m;
+ adv = &link_info->advertising;
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ adv_pam4 = &link_info->advertising;
+ adv_pam4_112 = &link_info->advertising;
+ sp_msks = bnxt_nrz_speeds2_masks;
+ sp_pam4_msks = bnxt_pam4_speeds2_masks;
+ sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks;
+ } else {
+ adv_pam4 = &link_info->advertising_pam4;
+ sp_msks = bnxt_nrz_speed_masks;
+ sp_pam4_msks = bnxt_pam4_speed_masks;
+ }
for (i = 1; i < __BNXT_LINK_SPEED_END; i++) {
/* accept any legal media from user */
for (m = 1; m < __BNXT_MEDIA_END; m++) {
bnxt_update_speed(&delta_nrz, m == media,
- &link_info->advertising,
- bnxt_nrz_speed_masks[i], et_mask,
+ adv, sp_msks[i], et_mask,
bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]);
bnxt_update_speed(&delta_pam4, m == media,
- &link_info->advertising_pam4,
- bnxt_pam4_speed_masks[i], et_mask,
+ adv_pam4, sp_pam4_msks[i], et_mask,
bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]);
+ if (!adv_pam4_112)
+ continue;
+
+ bnxt_update_speed(&delta_pam4_112, m == media,
+ adv_pam4_112, sp_pam4_112_msks[i], et_mask,
+ bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]);
}
}
}
@@ -1961,11 +2403,20 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
case BNXT_LINK_SPEED_40GB:
return SPEED_40000;
case BNXT_LINK_SPEED_50GB:
+ case BNXT_LINK_SPEED_50GB_PAM4:
return SPEED_50000;
case BNXT_LINK_SPEED_100GB:
+ case BNXT_LINK_SPEED_100GB_PAM4:
+ case BNXT_LINK_SPEED_100GB_PAM4_112:
return SPEED_100000;
case BNXT_LINK_SPEED_200GB:
+ case BNXT_LINK_SPEED_200GB_PAM4:
+ case BNXT_LINK_SPEED_200GB_PAM4_112:
return SPEED_200000;
+ case BNXT_LINK_SPEED_400GB:
+ case BNXT_LINK_SPEED_400GB_PAM4:
+ case BNXT_LINK_SPEED_400GB_PAM4_112:
+ return SPEED_400000;
default:
return SPEED_UNKNOWN;
}
@@ -1981,6 +2432,7 @@ static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings,
base->duplex = DUPLEX_HALF;
if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
base->duplex = DUPLEX_FULL;
+ lk_ksettings->lanes = link_info->active_lanes;
} else if (!link_info->autoneg) {
base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
base->duplex = DUPLEX_HALF;
@@ -2008,12 +2460,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
mutex_lock(&bp->link_lock);
bnxt_get_ethtool_modes(link_info, lk_ksettings);
media = bnxt_get_media(link_info);
- bnxt_get_ethtool_speeds(link_info->support_speeds,
- media, BNXT_SIG_MODE_NRZ,
- lk_ksettings->link_modes.supported);
- bnxt_get_ethtool_speeds(link_info->support_pam4_speeds,
- media, BNXT_SIG_MODE_PAM4,
- lk_ksettings->link_modes.supported);
+ bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings);
bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
link_mode = bnxt_get_link_mode(link_info);
if (link_mode != BNXT_LINK_MODE_UNKNOWN)
@@ -2026,20 +2473,10 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
lk_ksettings->link_modes.advertising);
base->autoneg = AUTONEG_ENABLE;
- bnxt_get_ethtool_speeds(link_info->advertising,
- media, BNXT_SIG_MODE_NRZ,
- lk_ksettings->link_modes.advertising);
- bnxt_get_ethtool_speeds(link_info->advertising_pam4,
- media, BNXT_SIG_MODE_PAM4,
- lk_ksettings->link_modes.advertising);
- if (link_info->phy_link_status == BNXT_LINK_LINK) {
- bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds,
- media, BNXT_SIG_MODE_NRZ,
- lk_ksettings->link_modes.lp_advertising);
- bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds,
- media, BNXT_SIG_MODE_PAM4,
- lk_ksettings->link_modes.lp_advertising);
- }
+ bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings);
+ if (link_info->phy_link_status == BNXT_LINK_LINK)
+ bnxt_get_all_ethtool_lp_speeds(link_info, media,
+ lk_ksettings);
} else {
base->autoneg = AUTONEG_DISABLE;
}
@@ -2074,6 +2511,7 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
u16 support_pam4_spds = link_info->support_pam4_speeds;
+ u16 support_spds2 = link_info->support_speeds2;
u16 support_spds = link_info->support_speeds;
u8 sig_mode = BNXT_SIG_MODE_NRZ;
u32 lanes_needed = 1;
@@ -2085,7 +2523,8 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
break;
case SPEED_1000:
- if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
+ if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB))
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
break;
case SPEED_2500:
@@ -2093,7 +2532,8 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
break;
case SPEED_10000:
- if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
+ if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB))
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
break;
case SPEED_20000:
@@ -2103,26 +2543,34 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
}
break;
case SPEED_25000:
- if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
+ if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB))
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
break;
case SPEED_40000:
- if (support_spds & BNXT_LINK_SPEED_MSK_40GB) {
+ if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
lanes_needed = 4;
}
break;
case SPEED_50000:
- if ((support_spds & BNXT_LINK_SPEED_MSK_50GB) && lanes != 1) {
+ if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) &&
+ lanes != 1) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
lanes_needed = 2;
} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
sig_mode = BNXT_SIG_MODE_PAM4;
+ } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) {
+ fw_speed = BNXT_LINK_SPEED_50GB_PAM4;
+ sig_mode = BNXT_SIG_MODE_PAM4;
}
break;
case SPEED_100000:
- if ((support_spds & BNXT_LINK_SPEED_MSK_100GB) &&
+ if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) &&
lanes != 2 && lanes != 1) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
lanes_needed = 4;
@@ -2130,6 +2578,14 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
sig_mode = BNXT_SIG_MODE_PAM4;
lanes_needed = 2;
+ } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) &&
+ lanes != 1) {
+ fw_speed = BNXT_LINK_SPEED_100GB_PAM4;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ lanes_needed = 2;
+ } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) {
+ fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112;
+ sig_mode = BNXT_SIG_MODE_PAM4_112;
}
break;
case SPEED_200000:
@@ -2137,6 +2593,27 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
sig_mode = BNXT_SIG_MODE_PAM4;
lanes_needed = 4;
+ } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) &&
+ lanes != 2) {
+ fw_speed = BNXT_LINK_SPEED_200GB_PAM4;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ lanes_needed = 4;
+ } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) {
+ fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112;
+ sig_mode = BNXT_SIG_MODE_PAM4_112;
+ lanes_needed = 2;
+ }
+ break;
+ case SPEED_400000:
+ if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) &&
+ lanes != 4) {
+ fw_speed = BNXT_LINK_SPEED_400GB_PAM4;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ lanes_needed = 8;
+ } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) {
+ fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112;
+ sig_mode = BNXT_SIG_MODE_PAM4_112;
+ lanes_needed = 4;
}
break;
}
@@ -3910,7 +4387,8 @@ static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
* reading any further.
*/
dma_rmb();
- if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
+ if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP ||
+ TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) {
rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
raw_cons = NEXT_RAW_CMP(raw_cons);
raw_cons = NEXT_RAW_CMP(raw_cons);
@@ -3934,8 +4412,8 @@ static int bnxt_run_loopback(struct bnxt *bp)
int rc;
cpr = &rxr->bnapi->cp_ring;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ cpr = rxr->rx_cpr;
pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index d5fad5a3cd..e957abd704 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -40,6 +40,8 @@ struct hwrm_resp_hdr {
#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2 0x6UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 0x7UL
#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
@@ -196,6 +198,9 @@ struct cmd_nums {
#define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL
#define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL
#define HWRM_QUEUE_QCAPS 0x8cUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG 0x8dUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG 0x8eUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG 0x8fUL
#define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
#define HWRM_CFA_L2_FILTER_FREE 0x91UL
#define HWRM_CFA_L2_FILTER_CFG 0x92UL
@@ -214,6 +219,7 @@ struct cmd_nums {
#define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
#define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
#define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG 0xa3UL
#define HWRM_STAT_CTX_ENG_QUERY 0xafUL
#define HWRM_STAT_CTX_ALLOC 0xb0UL
#define HWRM_STAT_CTX_FREE 0xb1UL
@@ -261,6 +267,7 @@ struct cmd_nums {
#define HWRM_PORT_EP_TX_CFG 0xdbUL
#define HWRM_PORT_CFG 0xdcUL
#define HWRM_PORT_QCFG 0xddUL
+ #define HWRM_PORT_MAC_QCAPS 0xdfUL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_REG_POWER_QUERY 0xe1UL
#define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
@@ -392,6 +399,10 @@ struct cmd_nums {
#define HWRM_FUNC_KEY_CTX_FREE 0x1adUL
#define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL
#define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL
+ #define HWRM_FUNC_LAG_CREATE 0x1b0UL
+ #define HWRM_FUNC_LAG_UPDATE 0x1b1UL
+ #define HWRM_FUNC_LAG_FREE 0x1b2UL
+ #define HWRM_FUNC_LAG_QCFG 0x1b3UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -406,9 +417,9 @@ struct cmd_nums {
#define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
#define HWRM_MFG_SOC_IMAGE 0x20cUL
#define HWRM_MFG_SOC_QSTATUS 0x20dUL
- #define HWRM_MFG_PARAM_SEEPROM_SYNC 0x20eUL
- #define HWRM_MFG_PARAM_SEEPROM_READ 0x20fUL
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH 0x210UL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE 0x20eUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_READ 0x20fUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH 0x210UL
#define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL
#define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
#define HWRM_MFG_PRVSN_GET_STATE 0x213UL
@@ -418,6 +429,16 @@ struct cmd_nums {
#define HWRM_MFG_SELFTEST_EXEC 0x217UL
#define HWRM_STAT_GENERIC_QSTATS 0x218UL
#define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
+ #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
+ #define HWRM_UDCC_QCAPS 0x258UL
+ #define HWRM_UDCC_CFG 0x259UL
+ #define HWRM_UDCC_QCFG 0x25aUL
+ #define HWRM_UDCC_SESSION_CFG 0x25bUL
+ #define HWRM_UDCC_SESSION_QCFG 0x25cUL
+ #define HWRM_UDCC_SESSION_QUERY 0x25dUL
+ #define HWRM_UDCC_COMP_CFG 0x25eUL
+ #define HWRM_UDCC_COMP_QCFG 0x25fUL
+ #define HWRM_UDCC_COMP_QUERY 0x260UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -582,9 +603,9 @@ struct hwrm_err_output {
#define HWRM_TARGET_ID_TOOLS 0xFFFD
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 171
-#define HWRM_VERSION_STR "1.10.2.171"
+#define HWRM_VERSION_UPDATE 3
+#define HWRM_VERSION_RSVD 15
+#define HWRM_VERSION_STR "1.10.3.15"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -816,7 +837,8 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
#define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
#define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4cUL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1632,7 +1654,7 @@ struct hwrm_func_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_qcaps_output (size:896b/112B) */
+/* hwrm_func_qcaps_output (size:1088b/136B) */
struct hwrm_func_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1736,21 +1758,29 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
__le16 max_key_ctxs_alloc;
__le32 flags_ext2;
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_CONCURRENT_KTLS_QUIC_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_CROSS_TC_CAP_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_CAP_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_RESERVATION_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DB_ERROR_STATS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL
__le16 tunnel_disable_flag;
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
@@ -1760,15 +1790,21 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
- u8 key_xid_partition_cap;
- #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_TKC 0x1UL
- #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_RKC 0x2UL
- #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_QUIC_TKC 0x4UL
- #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_QUIC_RKC 0x8UL
- u8 unused_1;
+ __le16 xid_partition_cap;
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_TKC 0x1UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_RKC 0x2UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_TKC 0x4UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_RKC 0x8UL
u8 device_serial_number[8];
__le16 ctxs_per_partition;
- u8 unused_2[5];
+ u8 unused_2[2];
+ __le32 roce_vf_max_av;
+ __le32 roce_vf_max_cq;
+ __le32 roce_vf_max_mrw;
+ __le32 roce_vf_max_qp;
+ __le32 roce_vf_max_srq;
+ __le32 roce_vf_max_gid;
+ u8 unused_3[3];
u8 valid;
};
@@ -1783,7 +1819,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:1024b/128B) */
+/* hwrm_func_qcfg_output (size:1280b/160B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -1892,7 +1928,7 @@ struct hwrm_func_qcfg_output {
__le16 alloc_msix;
__le16 registered_vfs;
__le16 l2_doorbell_bar_size_kb;
- u8 unused_1;
+ u8 active_endpoints;
u8 always_1;
__le32 reset_addr_poll;
__le16 legacy_l2_db_size_kb;
@@ -1952,15 +1988,26 @@ struct hwrm_func_qcfg_output {
u8 kdnet_pcie_function;
__le16 port_kdnet_fid;
u8 unused_5[2];
- __le32 alloc_tx_key_ctxs;
- __le32 alloc_rx_key_ctxs;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
u8 lag_id;
u8 parif;
- u8 unused_6[5];
+ u8 fw_lag_id;
+ u8 unused_6;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ u8 unused_7;
u8 valid;
};
-/* hwrm_func_cfg_input (size:1088b/136B) */
+/* hwrm_func_cfg_input (size:1280b/160B) */
struct hwrm_func_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1996,7 +2043,6 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
#define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
#define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
- #define FUNC_CFG_REQ_FLAGS_KEY_CTX_ASSETS_TEST 0x80000000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -2028,8 +2074,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
#define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
#define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
- #define FUNC_CFG_REQ_ENABLES_TX_KEY_CTXS 0x40000000UL
- #define FUNC_CFG_REQ_ENABLES_RX_KEY_CTXS 0x80000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_TX_KEY_CTXS 0x40000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_RX_KEY_CTXS 0x80000000UL
__le16 admin_mtu;
__le16 mru;
__le16 num_rsscos_ctxs;
@@ -2139,10 +2185,21 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
__be16 tpid;
__le16 host_mtu;
- u8 unused_0[4];
+ __le32 flags2;
+ #define FUNC_CFG_REQ_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST 0x1UL
+ #define FUNC_CFG_REQ_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST 0x2UL
__le32 enables2;
- #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
- #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
+ #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
+ #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
u8 port_kdnet_mode;
#define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
#define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
@@ -2165,7 +2222,18 @@ struct hwrm_func_cfg_input {
__le32 num_ktls_rx_key_ctxs;
__le32 num_quic_tx_key_ctxs;
__le32 num_quic_rx_key_ctxs;
- __le32 unused_2;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_TKC 0x1UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_RKC 0x2UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_TKC 0x4UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_RKC 0x8UL
+ __le16 unused_2;
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -2604,7 +2672,7 @@ struct hwrm_func_vf_resource_cfg_input {
__le32 max_quic_rx_key_ctxs;
};
-/* hwrm_func_vf_resource_cfg_output (size:320b/40B) */
+/* hwrm_func_vf_resource_cfg_output (size:384b/48B) */
struct hwrm_func_vf_resource_cfg_output {
__le16 error_code;
__le16 req_type;
@@ -2618,8 +2686,10 @@ struct hwrm_func_vf_resource_cfg_output {
__le16 reserved_vnics;
__le16 reserved_stat_ctx;
__le16 reserved_hw_ring_grps;
- __le32 reserved_tx_key_ctxs;
- __le32 reserved_rx_key_ctxs;
+ __le32 reserved_ktls_tx_key_ctxs;
+ __le32 reserved_ktls_rx_key_ctxs;
+ __le32 reserved_quic_tx_key_ctxs;
+ __le32 reserved_quic_rx_key_ctxs;
u8 unused_0[7];
u8 valid;
};
@@ -3441,7 +3511,8 @@ struct hwrm_func_ptp_cfg_input {
#define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL
#define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL
#define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M
u8 unused_0[3];
__le32 ptp_freq_adj_ext_period;
__le32 ptp_freq_adj_ext_up;
@@ -3627,28 +3698,28 @@ struct hwrm_func_backing_store_qcfg_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
__le16 instance;
u8 rsvd[4];
};
@@ -3744,6 +3815,15 @@ struct mrav_split_entries {
__le32 rsvd2[2];
};
+/* ts_split_entries (size:128b/16B) */
+struct ts_split_entries {
+ __le32 region_num_entries;
+ u8 tsid;
+ u8 lkup_static_bkt_cnt_exp[2];
+ u8 rsvd;
+ __le32 rsvd2[2];
+};
+
/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
struct hwrm_func_backing_store_qcaps_v2_input {
__le16 req_type;
@@ -3761,8 +3841,8 @@ struct hwrm_func_backing_store_qcaps_v2_input {
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_RKC 0x14UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
@@ -3793,8 +3873,8 @@ struct hwrm_func_backing_store_qcaps_v2_output {
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_RKC 0x14UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
@@ -3838,56 +3918,55 @@ struct hwrm_func_backing_store_qcaps_v2_output {
/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */
struct hwrm_func_dbr_pacing_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */
struct hwrm_func_dbr_pacing_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
-#define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
- u8 unused_0[7];
- __le32 dbr_stat_db_fifo_reg;
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST \
- FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
- __le32 dbr_stat_db_fifo_reg_watermark_mask;
- u8 dbr_stat_db_fifo_reg_watermark_shift;
- u8 unused_1[3];
- __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
- u8 dbr_stat_db_fifo_reg_fifo_room_shift;
- u8 unused_2[3];
- __le32 dbr_throttling_aeq_arm_reg;
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST \
- FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
- u8 dbr_throttling_aeq_arm_reg_val;
- u8 unused_3[7];
- __le32 primary_nq_id;
- __le32 pacing_threshold;
- u8 unused_4[7];
- u8 valid;
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
+ u8 unused_0[7];
+ __le32 dbr_stat_db_fifo_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
+ __le32 dbr_stat_db_fifo_reg_watermark_mask;
+ u8 dbr_stat_db_fifo_reg_watermark_shift;
+ u8 unused_1[3];
+ __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
+ u8 dbr_stat_db_fifo_reg_fifo_room_shift;
+ u8 unused_2[3];
+ __le32 dbr_throttling_aeq_arm_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
+ u8 dbr_throttling_aeq_arm_reg_val;
+ u8 unused_3[3];
+ __le32 dbr_stat_db_max_fifo_depth;
+ __le32 primary_nq_id;
+ __le32 pacing_threshold;
+ u8 unused_4[7];
+ u8 valid;
};
/* hwrm_func_drv_if_change_input (size:192b/24B) */
@@ -3915,7 +3994,7 @@ struct hwrm_func_drv_if_change_output {
u8 valid;
};
-/* hwrm_port_phy_cfg_input (size:448b/56B) */
+/* hwrm_port_phy_cfg_input (size:512b/64B) */
struct hwrm_port_phy_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3960,6 +4039,8 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2 0x2000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK 0x4000UL
__le16 port_id;
__le16 force_link_speed;
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
@@ -3990,7 +4071,9 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- u8 unused_0;
+ u8 mgmt_flag;
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_LINK_RELEASE 0x1UL
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_MGMT_VALID 0x80UL
__le16 auto_link_speed;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
@@ -4054,7 +4137,36 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
- u8 unused_2[2];
+ __le16 force_link_speeds2;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
+ __le16 auto_link_speeds2_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_25GB 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_40GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
+ u8 unused_2[6];
};
/* hwrm_port_phy_cfg_output (size:128b/16B) */
@@ -4104,7 +4216,8 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
#define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
#define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
#define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
@@ -4127,6 +4240,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
u8 duplex_cfg;
@@ -4270,7 +4384,23 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR 0x28UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR 0x29UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR 0x2aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER 0x2bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2 0x2cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2 0x2dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2 0x2eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2 0x2fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8 0x30UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8 0x31UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8 0x32UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8 0x33UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4 0x34UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -4366,6 +4496,7 @@ struct hwrm_port_phy_qcfg_output {
u8 option_flags;
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SPEEDS2_SUPPORTED 0x4UL
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
__le16 support_pam4_speeds;
@@ -4387,7 +4518,53 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
u8 link_down_reason;
#define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
- u8 unused_0[7];
+ __le16 support_speeds2;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_800GB_PAM4_112 0x2000UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ __le16 auto_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL
+ u8 active_lanes;
u8 valid;
};
@@ -4426,6 +4603,7 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
#define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
#define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_LOAD_CONTROL 0x800UL
__le16 port_id;
u8 ipg;
u8 lpbk;
@@ -4459,7 +4637,12 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
u8 unused_0[3];
__le32 ptp_freq_adj_ppb;
- u8 unused_1[4];
+ u8 unused_1[3];
+ u8 ptp_load_control;
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_IMMEDIATE 0x1UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT 0x2UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_LAST PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT
__le64 ptp_adj_phase;
};
@@ -4504,6 +4687,7 @@ struct hwrm_port_mac_ptp_qcfg_output {
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME 0x40UL
u8 unused_0[3];
__le32 rx_ts_reg_off_lower;
__le32 rx_ts_reg_off_upper;
@@ -4968,7 +5152,7 @@ struct hwrm_port_phy_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcaps_output (size:256b/32B) */
+/* hwrm_port_phy_qcaps_output (size:320b/40B) */
struct hwrm_port_phy_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -5051,7 +5235,40 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
#define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
#define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
u8 internal_port_cnt;
+ u8 unused_0;
+ __le16 supported_speeds2_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 0x2000UL
+ __le16 supported_speeds2_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL
+ u8 unused_1[3];
u8 valid;
};
@@ -5472,6 +5689,30 @@ struct hwrm_port_led_qcaps_output {
u8 valid;
};
+/* hwrm_port_mac_qcaps_input (size:192b/24B) */
+struct hwrm_port_mac_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_mac_qcaps_output (size:128b/16B) */
+struct hwrm_port_mac_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x1UL
+ #define PORT_MAC_QCAPS_RESP_FLAGS_REMOTE_LPBK_SUPPORTED 0x2UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
/* hwrm_queue_qportcfg_input (size:192b/24B) */
struct hwrm_queue_qportcfg_input {
__le16 req_type;
@@ -7488,7 +7729,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD 0xffUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD
__le16 dst_id;
- __le16 mirror_vnic_id;
+ __le16 rfs_ring_tbl_idx;
u8 tunnel_type;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
@@ -8201,6 +8442,7 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED 0x100000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED 0x200000UL
u8 unused_0[3];
u8 valid;
};
@@ -8223,7 +8465,8 @@ struct hwrm_tunnel_dst_port_query_input {
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE
u8 tunnel_next_proto;
u8 unused_0[6];
};
@@ -8245,7 +8488,10 @@ struct hwrm_tunnel_dst_port_query_output {
#define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR5 0x20UL
#define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR6 0x40UL
#define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR7 0x80UL
- u8 unused_0[2];
+ u8 status;
+ #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_CHIP_LEVEL 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_FUNC_LEVEL 0x2UL
+ u8 unused_0;
u8 valid;
};
@@ -8267,7 +8513,8 @@ struct hwrm_tunnel_dst_port_alloc_input {
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE
u8 tunnel_next_proto;
__be16 tunnel_dst_port_val;
u8 unused_0[4];
@@ -8284,7 +8531,8 @@ struct hwrm_tunnel_dst_port_alloc_output {
#define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL
#define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL
#define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED 0x3UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED
u8 upar_in_use;
#define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL
#define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL
@@ -8316,7 +8564,8 @@ struct hwrm_tunnel_dst_port_free_input {
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE
u8 tunnel_next_proto;
__le16 tunnel_dst_port_id;
u8 unused_0[4];
@@ -9717,7 +9966,7 @@ struct hwrm_nvm_get_dev_info_input {
__le64 resp_addr;
};
-/* hwrm_nvm_get_dev_info_output (size:640b/80B) */
+/* hwrm_nvm_get_dev_info_output (size:704b/88B) */
struct hwrm_nvm_get_dev_info_output {
__le16 error_code;
__le16 req_type;
@@ -9747,6 +9996,10 @@ struct hwrm_nvm_get_dev_info_output {
__le16 roce_fw_minor;
__le16 roce_fw_build;
__le16 roce_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
u8 unused_0[7];
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 6e3da3362b..cc07660330 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -129,7 +129,7 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
}
resp = hwrm_req_hold(bp, req);
- rc = hwrm_req_send(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
if (!rc)
*ts = le64_to_cpu(resp->ptp_msg_ts);
hwrm_req_drop(bp, req);
@@ -319,15 +319,17 @@ static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event)
return hwrm_req_send(bp, req);
}
-void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
+int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct hwrm_port_mac_cfg_input *req;
+ int rc;
if (!ptp || !ptp->tstamp_filters)
- return;
+ return -EIO;
- if (hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG))
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
+ if (rc)
goto out;
if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters &
@@ -342,15 +344,17 @@ void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
- if (!hwrm_req_send(bp, req)) {
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
bp->ptp_all_rx_tstamp = !!(ptp->tstamp_filters &
PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE);
- return;
+ return 0;
}
ptp->tstamp_filters = 0;
out:
bp->ptp_all_rx_tstamp = 0;
netdev_warn(bp->dev, "Failed to configure HW packet timestamp filters\n");
+ return rc;
}
void bnxt_ptp_reapply_pps(struct bnxt *bp)
@@ -494,7 +498,6 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u32 flags = 0;
- int rc = 0;
switch (ptp->rx_filter) {
case HWTSTAMP_FILTER_ALL:
@@ -519,18 +522,7 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
ptp->tstamp_filters = flags;
- if (netif_running(bp->dev)) {
- if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) {
- bnxt_close_nic(bp, false, false);
- rc = bnxt_open_nic(bp, false, false);
- } else {
- bnxt_ptp_cfg_tstamp_filters(bp);
- }
- if (!rc && !ptp->tstamp_filters)
- rc = -EIO;
- }
-
- return rc;
+ return bnxt_ptp_cfg_tstamp_filters(bp);
}
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -649,7 +641,7 @@ static int bnxt_map_ptp_regs(struct bnxt *bp)
int rc, i;
reg_arr = ptp->refclk_regs;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_CHIP_P5(bp)) {
rc = bnxt_map_regs(bp, reg_arr, 2, BNXT_PTP_GRC_WIN);
if (rc)
return rc;
@@ -692,8 +684,8 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
timestamp.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(ptp->tx_skb, &timestamp);
} else {
- netdev_err(bp->dev, "TS query for TX timer failed rc = %x\n",
- rc);
+ netdev_warn_once(bp->dev,
+ "TS query for TX timer failed rc = %x\n", rc);
}
dev_kfree_skb_any(ptp->tx_skb);
@@ -966,7 +958,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
rc = err;
goto out;
}
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_CHIP_P5(bp)) {
spin_lock_bh(&ptp->ptp_lock);
bnxt_refclk_read(bp, NULL, &ptp->current_time);
WRITE_ONCE(ptp->old_time, ptp->current_time);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 34162e07a1..fce8dc39a7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -137,7 +137,7 @@ do { \
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
-void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
+int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index c722b3b417..175192ebaa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -536,7 +536,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
if (rc)
return rc;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
vf_ring_grps = 0;
} else {
@@ -565,7 +565,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
req->min_l2_ctxs = cpu_to_le16(min);
req->min_vnics = cpu_to_le16(min);
req->min_stat_ctx = cpu_to_le16(min);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
req->min_hw_ring_grps = cpu_to_le16(min);
} else {
vf_cp_rings /= num_vfs;
@@ -602,7 +602,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
req->max_stat_ctx = cpu_to_le16(vf_stat_ctx);
req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req->max_rsscos_ctx = cpu_to_le16(vf_rss);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
req->max_msix = cpu_to_le16(vf_msix / num_vfs);
hwrm_req_hold(bp, req);
@@ -630,7 +630,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
le16_to_cpu(req->min_rsscos_ctx) * n;
hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
hw_resc->max_nqs -= vf_msix;
rc = pf->active_vfs;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 6ba2b93986..195c02dc06 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -42,13 +42,10 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- ent[i].db_offset = DB_PF_OFFSET_P5;
- if (BNXT_VF(bp))
- ent[i].db_offset = DB_VF_OFFSET_P5;
- } else {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ ent[i].db_offset = bp->db_offset;
+ else
ent[i].db_offset = (idx + i) * 0x80;
- }
}
}
@@ -213,6 +210,9 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
if (err)
return;
+ if (edev->ulp_tbl->msix_requested)
+ bnxt_fill_msix_vecs(bp, edev->msix_entries);
+
if (aux_priv) {
struct auxiliary_device *adev;
@@ -333,6 +333,7 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->pdev = bp->pdev;
edev->l2_db_size = bp->db_size;
edev->l2_db_size_nc = bp->db_size;
+ edev->l2_db_offset = bp->db_offset;
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
@@ -394,12 +395,13 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
if (!edev)
goto aux_dev_uninit;
+ aux_priv->edev = edev;
+
ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
if (!ulp)
goto aux_dev_uninit;
edev->ulp_tbl = ulp;
- aux_priv->edev = edev;
bp->edev = edev;
bnxt_set_edev_info(edev, bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 6ff77f082e..b9e73de14b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -73,6 +73,10 @@ struct bnxt_en_dev {
* bytes mapped as non-
* cacheable.
*/
+ int l2_db_offset; /* Doorbell offset in
+ * bytes within
+ * l2_db_size_nc.
+ */
u16 chip_num;
u16 hw_ring_stats_size;
u16 pf_port_id;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 8cb9a99154..4079538bc3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -42,17 +42,17 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
/* fill up the first buffer */
prod = txr->tx_prod;
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
tx_buf->nr_frags = num_frags;
if (xdp)
tx_buf->page = virt_to_head_page(xdp->data);
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
flags = (len << TX_BD_LEN_SHIFT) |
((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
- txbd->tx_bd_opaque = prod;
+ txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
/* now let us fill up the frags into the next buffers */
@@ -66,10 +66,10 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
WRITE_ONCE(txr->tx_prod, prod);
/* first fill up the first buffer */
- frag_tx_buf = &txr->tx_buf_ring[prod];
+ frag_tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
frag_tx_buf->page = skb_frag_page(frag);
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
frag_len = skb_frag_size(frag);
flags = frag_len << TX_BD_LEN_SHIFT;
@@ -120,20 +120,20 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
{
- struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+ struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ u16 tx_hw_cons = txr->tx_hw_cons;
bool rx_doorbell_needed = false;
- int nr_pkts = bnapi->tx_pkts;
struct bnxt_sw_tx_bd *tx_buf;
u16 tx_cons = txr->tx_cons;
u16 last_tx_cons = tx_cons;
- int i, j, frags;
+ int j, frags;
if (!budget)
return;
- for (i = 0; i < nr_pkts; i++) {
- tx_buf = &txr->tx_buf_ring[tx_cons];
+ while (RING_TX(bp, tx_cons) != tx_hw_cons) {
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
if (tx_buf->action == XDP_REDIRECT) {
struct pci_dev *pdev = bp->pdev;
@@ -153,20 +153,20 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
frags = tx_buf->nr_frags;
for (j = 0; j < frags; j++) {
tx_cons = NEXT_TX(tx_cons);
- tx_buf = &txr->tx_buf_ring[tx_cons];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
}
} else {
- bnxt_sched_reset_txr(bp, txr, i);
+ bnxt_sched_reset_txr(bp, txr, tx_cons);
return;
}
tx_cons = NEXT_TX(tx_cons);
}
- bnapi->tx_pkts = 0;
+ bnapi->events &= ~BNXT_TX_CMP_EVENT;
WRITE_ONCE(txr->tx_cons, tx_cons);
if (rx_doorbell_needed) {
- tx_buf = &txr->tx_buf_ring[last_tx_cons];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, last_tx_cons)];
bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
}
@@ -242,7 +242,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
pdev = bp->pdev;
offset = bp->rx_offset;
- txr = rxr->bnapi->tx_ring;
+ txr = rxr->bnapi->tx_ring[0];
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
orig_data = xdp.data;
@@ -268,7 +268,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
case XDP_TX:
rx_buf = &rxr->rx_buf_ring[cons];
mapping = rx_buf->mapping - bp->rx_dma_offset;
- *event = 0;
+ *event &= BNXT_TX_CMP_EVENT;
if (unlikely(xdp_buff_has_frags(&xdp))) {
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
@@ -391,7 +391,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
{
struct net_device *dev = bp->dev;
- int tx_xdp = 0, rc, tc;
+ int tx_xdp = 0, tx_cp, rc, tc;
struct bpf_prog *old;
if (prog && !prog->aux->xdp_has_frags &&
@@ -407,7 +407,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog)
tx_xdp = bp->rx_nr_rings;
- tc = netdev_get_num_tc(dev);
+ tc = bp->num_tc;
if (!tc)
tc = 1;
rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
@@ -439,7 +439,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
}
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
- bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+ tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
+ bp->cp_nr_rings = max_t(int, tx_cp, bp->rx_nr_rings);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index f52830dfb2..04964bbe08 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12745,24 +12745,23 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
return size;
}
-static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
{
struct tg3 *tp = netdev_priv(dev);
int i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
- indir[i] = tp->rss_ind_tbl[i];
+ rxfh->indir[i] = tp->rss_ind_tbl[i];
return 0;
}
-static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
- const u8 hfunc)
+static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
size_t i;
@@ -12770,15 +12769,16 @@ static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
/* We require at least one supported parameter to be changed and no
* change in any of the unsupported parameters
*/
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
- tp->rss_ind_tbl[i] = indir[i];
+ tp->rss_ind_tbl[i] = rxfh->indir[i];
if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
return 0;