diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:18:06 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:18:06 +0000 |
commit | 638a9e433ecd61e64761352dbec1fa4f5874c941 (patch) | |
tree | fdbff74a238d7a5a7d1cef071b7230bc064b9f25 /drivers/net/ethernet/broadcom | |
parent | Releasing progress-linux version 6.9.12-1~progress7.99u1. (diff) | |
download | linux-638a9e433ecd61e64761352dbec1fa4f5874c941.tar.xz linux-638a9e433ecd61e64761352dbec1fa4f5874c941.zip |
Merging upstream version 6.10.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
18 files changed, 1289 insertions, 536 deletions
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 1be6d14030..e5809ad5eb 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1042,13 +1042,13 @@ static int b44_change_mtu(struct net_device *dev, int new_mtu) /* We'll just catch it later when the * device is up'd. */ - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } spin_lock_irq(&bp->lock); b44_halt(bp); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); b44_init_rings(bp); b44_init_hw(bp, B44_FULL_RESET); spin_unlock_irq(&bp->lock); diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 3196c4dea0..3c0e3b9828 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1652,7 +1652,7 @@ static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index b65b8592ad..6ec773e611 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -7912,7 +7912,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu) { struct bnx2 *bp = netdev_priv(dev); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size, false); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index c9b6acd8c8..a8e07e5141 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4902,7 +4902,7 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) * because the actual alloc size is * only updated as part of load */ - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); if (!bnx2x_mtu_allows_gro(new_mtu)) dev->features &= ~NETIF_F_GRO_HW; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 2b7936b3fb..43952689bf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -76,7 +76,7 @@ NETIF_MSG_TX_ERR) MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); +MODULE_DESCRIPTION("Broadcom NetXtreme network driver"); #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) #define BNXT_RX_DMA_OFFSET NET_SKB_PAD @@ -137,6 +137,7 @@ static const struct { [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, + [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" }, }; static const struct pci_device_id bnxt_pci_tbl[] = { @@ -211,6 +212,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF }, { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, #endif { 0 } @@ -294,7 +296,7 @@ static bool bnxt_vf_pciid(enum board_idx idx) return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || - idx == NETXTREME_E_P5_VF_HV); + idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF); } #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) @@ -1295,9 +1297,9 @@ static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, return RX_AGG_CMP_VALID(agg, *raw_cons); } -static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, - unsigned int len, - dma_addr_t mapping) +static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data, + unsigned int len, + dma_addr_t mapping) { struct bnxt *bp = bnapi->bp; struct pci_dev *pdev = bp->pdev; @@ -1317,6 +1319,39 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, bp->rx_dir); skb_put(skb, len); + + return skb; +} + +static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, + unsigned int len, + dma_addr_t mapping) +{ + return bnxt_copy_data(bnapi, data, len, mapping); +} + +static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi, + struct xdp_buff *xdp, + unsigned int len, + dma_addr_t mapping) +{ + unsigned int metasize = 0; + u8 *data = xdp->data; + struct sk_buff *skb; + + len = xdp->data_end - xdp->data_meta; + metasize = xdp->data - xdp->data_meta; + data = xdp->data_meta; + + skb = bnxt_copy_data(bnapi, data, len, mapping); + if (!skb) + return skb; + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + return skb; } @@ -1777,7 +1812,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); if (!skb) { bnxt_abort_tpa(cpr, idx, agg_bufs); - cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1; + cpr->sw_stats->rx.rx_oom_discards += 1; return NULL; } } else { @@ -1787,7 +1822,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC); if (!new_data) { bnxt_abort_tpa(cpr, idx, agg_bufs); - cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1; + cpr->sw_stats->rx.rx_oom_discards += 1; return NULL; } @@ -1803,7 +1838,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if (!skb) { skb_free_frag(data); bnxt_abort_tpa(cpr, idx, agg_bufs); - cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1; + cpr->sw_stats->rx.rx_oom_discards += 1; return NULL; } skb_reserve(skb, bp->rx_offset); @@ -1814,7 +1849,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true); if (!skb) { /* Page reuse already handled by bnxt_rx_pages(). */ - cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1; + cpr->sw_stats->rx.rx_oom_discards += 1; return NULL; } } @@ -2072,7 +2107,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rc = -EIO; if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { - bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; + bnapi->cp_ring.sw_stats->rx.rx_buf_errors++; if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { netdev_warn_once(bp->dev, "RX buffer error %x\n", @@ -2100,14 +2135,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } if (xdp_active) { - if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) { + if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) { rc = 1; goto next_rx; } } if (len <= bp->rx_copy_thresh) { - skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); + if (!xdp_active) + skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); + else + skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr); bnxt_reuse_rx_data(rxr, cons, data); if (!skb) { if (agg_bufs) { @@ -2185,7 +2223,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } else { if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { if (dev->features & NETIF_F_RXCSUM) - bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++; + bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++; } } @@ -2222,7 +2260,7 @@ next_rx_no_prod_no_len: return rc; oom_next_rx: - cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1; + cpr->sw_stats->rx.rx_oom_discards += 1; rc = -ENOMEM; goto next_rx; } @@ -2271,7 +2309,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp, } rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); if (rc && rc != -EBUSY) - cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1; + cpr->sw_stats->rx.rx_netpoll_discards += 1; return rc; } @@ -2480,6 +2518,9 @@ static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) } return false; } + case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED: + netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n"); + break; default: netdev_err(bp->dev, "FW reported unknown error type %u\n", err_type); @@ -3550,14 +3591,15 @@ static void bnxt_free_rx_rings(struct bnxt *bp) } static int bnxt_alloc_rx_page_pool(struct bnxt *bp, - struct bnxt_rx_ring_info *rxr) + struct bnxt_rx_ring_info *rxr, + int numa_node) { struct page_pool_params pp = { 0 }; pp.pool_size = bp->rx_agg_ring_size; if (BNXT_RX_PAGE_MODE(bp)) pp.pool_size += bp->rx_ring_size; - pp.nid = dev_to_node(&bp->pdev->dev); + pp.nid = numa_node; pp.napi = &rxr->bnapi->napi; pp.netdev = bp->dev; pp.dev = &bp->pdev->dev; @@ -3577,7 +3619,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, static int bnxt_alloc_rx_rings(struct bnxt *bp) { - int i, rc = 0, agg_rings = 0; + int numa_node = dev_to_node(&bp->pdev->dev); + int i, rc = 0, agg_rings = 0, cpu; if (!bp->rx_ring) return -ENOMEM; @@ -3588,10 +3631,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; + int cpu_node; ring = &rxr->rx_ring_struct; - rc = bnxt_alloc_rx_page_pool(bp, rxr); + cpu = cpumask_local_spread(i, numa_node); + cpu_node = cpu_to_node(cpu); + netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n", + i, cpu_node); + rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node); if (rc) return rc; @@ -3850,13 +3898,12 @@ static int bnxt_alloc_cp_sub_ring(struct bnxt *bp, static int bnxt_alloc_cp_rings(struct bnxt *bp) { bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); - int i, j, rc, ulp_base_vec, ulp_msix; + int i, j, rc, ulp_msix; int tcs = bp->num_tc; if (!tcs) tcs = 1; ulp_msix = bnxt_get_ulp_msix_num(bp); - ulp_base_vec = bnxt_get_ulp_msix_base(bp); for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr, *cpr2; @@ -3875,10 +3922,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp) if (rc) return rc; - if (ulp_msix && i >= ulp_base_vec) - ring->map_idx = i + ulp_msix; - else - ring->map_idx = i; + ring->map_idx = ulp_msix + i; if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) continue; @@ -3908,6 +3952,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp) if (rc) return rc; cpr2->bnapi = bnapi; + cpr2->sw_stats = cpr->sw_stats; cpr2->cp_idx = k; if (!k && rx) { bp->rx_ring[i].rx_cpr = cpr2; @@ -4232,6 +4277,7 @@ static void bnxt_init_vnics(struct bnxt *bp) int j; vnic->fw_vnic_id = INVALID_HW_RING_ID; + vnic->vnic_id = i; for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; @@ -4748,6 +4794,9 @@ static void bnxt_free_ring_stats(struct bnxt *bp) struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; bnxt_free_stats_mem(bp, &cpr->stats); + + kfree(cpr->sw_stats); + cpr->sw_stats = NULL; } } @@ -4762,6 +4811,10 @@ static int bnxt_alloc_stats(struct bnxt *bp) struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL); + if (!cpr->sw_stats) + return -ENOMEM; + cpr->stats.len = size; rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); if (rc) @@ -5779,8 +5832,22 @@ void bnxt_fill_ipv6_mask(__be32 mask[4]) static void bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, struct hwrm_cfa_ntuple_filter_alloc_input *req, - u16 rxq) + struct bnxt_ntuple_filter *fltr) { + struct bnxt_rss_ctx *rss_ctx, *tmp; + u16 rxq = fltr->base.rxq; + + if (fltr->base.flags & BNXT_ACT_RSS_CTX) { + list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { + if (rss_ctx->index == fltr->base.fw_vnic_id) { + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + + req->dst_id = cpu_to_le16(vnic->fw_vnic_id); + break; + } + } + return; + } if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { struct bnxt_vnic_info *vnic; u32 enables; @@ -5821,7 +5888,7 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, req->flags = cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP); } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { - bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr->base.rxq); + bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr); } else { vnic = &bp->vnic_info[fltr->base.rxq + 1]; req->dst_id = cpu_to_le16(vnic->fw_vnic_id); @@ -5929,9 +5996,9 @@ static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp, req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap); } -static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) +int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, + u32 tpa_flags) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; struct hwrm_vnic_tpa_cfg_input *req; int rc; @@ -6016,9 +6083,10 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); } -static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) +int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) { int entries; + u16 *tbl; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; @@ -6026,16 +6094,22 @@ static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) entries = HW_HASH_INDEX_SIZE; bp->rss_indir_tbl_entries = entries; - bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), - GFP_KERNEL); - if (!bp->rss_indir_tbl) + tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); + if (!tbl) return -ENOMEM; + + if (rss_ctx) + rss_ctx->rss_indir_tbl = tbl; + else + bp->rss_indir_tbl = tbl; + return 0; } -static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) +void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) { u16 max_rings, max_entries, pad, i; + u16 *rss_indir_tbl; if (!bp->rx_nr_rings) return; @@ -6046,13 +6120,17 @@ static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) max_rings = bp->rx_nr_rings; max_entries = bnxt_get_rxfh_indir_size(bp->dev); + if (rss_ctx) + rss_indir_tbl = &rss_ctx->rss_indir_tbl[0]; + else + rss_indir_tbl = &bp->rss_indir_tbl[0]; for (i = 0; i < max_entries; i++) - bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); + rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); pad = bp->rss_indir_tbl_entries - max_entries; if (pad) - memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); + memset(&rss_indir_tbl[i], 0, pad * sizeof(u16)); } static u16 bnxt_get_max_rss_ring(struct bnxt *bp) @@ -6068,6 +6146,24 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp) return max_ring; } +u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp) +{ + u16 i, tbl_size, max_ring = 0; + struct bnxt_rss_ctx *rss_ctx; + + if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + return 0; + + tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + + list_for_each_entry(rss_ctx, &bp->rss_ctx_list, list) { + for (i = 0; i < tbl_size; i++) + max_ring = max(max_ring, rss_ctx->rss_indir_tbl[i]); + } + + return max_ring; +} + int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { @@ -6108,6 +6204,8 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); + else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) + j = vnic->rss_ctx->rss_indir_tbl[i]; else j = bp->rss_indir_tbl[i]; rxr = &bp->rx_ring[j]; @@ -6145,9 +6243,9 @@ __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); } -static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) +static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic, + bool set_rss) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_rss_cfg_input *req; int rc; @@ -6165,9 +6263,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) return hwrm_req_send(bp, req); } -static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) +static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, + struct bnxt_vnic_info *vnic, bool set_rss) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_rss_cfg_input *req; dma_addr_t ring_tbl_map; u32 i, nr_ctxs; @@ -6220,9 +6318,8 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) hwrm_req_drop(bp, req); } -static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) +static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_plcmodes_cfg_input *req; int rc; @@ -6247,7 +6344,8 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) return hwrm_req_send(bp, req); } -static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, +static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, + struct bnxt_vnic_info *vnic, u16 ctx_idx) { struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; @@ -6256,10 +6354,10 @@ static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, return; req->rss_cos_lb_ctx_id = - cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); + cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]); hwrm_req_send(bp, req); - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; + vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; } static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) @@ -6271,13 +6369,14 @@ static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) - bnxt_hwrm_vnic_ctx_free_one(bp, i, j); + bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j); } } bp->rsscos_nr_ctxs = 0; } -static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) +static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, + struct bnxt_vnic_info *vnic, u16 ctx_idx) { struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; @@ -6290,7 +6389,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) resp = hwrm_req_hold(bp, req); rc = hwrm_req_send(bp, req); if (!rc) - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = + vnic->fw_rss_cos_lb_ctx[ctx_idx] = le16_to_cpu(resp->rss_cos_lb_ctx_id); hwrm_req_drop(bp, req); @@ -6304,10 +6403,9 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; } -int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) +int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) { struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_cfg_input *req; unsigned int ring = 0, grp_idx; u16 def_vlan = 0; @@ -6355,8 +6453,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) if (vnic->flags & BNXT_VNIC_RSS_FLAG) ring = 0; else if (vnic->flags & BNXT_VNIC_RFS_FLAG) - ring = vnic_id - 1; - else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) + ring = vnic->vnic_id - 1; + else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) ring = bp->rx_nr_rings - 1; grp_idx = bp->rx_ring[ring].bnapi->index; @@ -6372,25 +6470,25 @@ vnic_mru: #endif if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); - if (!vnic_id && bnxt_ulp_registered(bp->edev)) + if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev)) req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); return hwrm_req_send(bp, req); } -static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) +static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, + struct bnxt_vnic_info *vnic) { - if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { + if (vnic->fw_vnic_id != INVALID_HW_RING_ID) { struct hwrm_vnic_free_input *req; if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) return; - req->vnic_id = - cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); hwrm_req_send(bp, req); - bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; + vnic->fw_vnic_id = INVALID_HW_RING_ID; } } @@ -6399,15 +6497,14 @@ static void bnxt_hwrm_vnic_free(struct bnxt *bp) u16 i; for (i = 0; i < bp->nr_vnics; i++) - bnxt_hwrm_vnic_free_one(bp, i); + bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]); } -static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, - unsigned int start_rx_ring_idx, - unsigned int nr_rings) +int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, + unsigned int start_rx_ring_idx, + unsigned int nr_rings) { unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_alloc_output *resp; struct hwrm_vnic_alloc_input *req; int rc; @@ -6433,7 +6530,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, vnic_no_ring_grps: for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; - if (vnic_id == BNXT_VNIC_DEFAULT) + if (vnic->vnic_id == BNXT_VNIC_DEFAULT) req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); resp = hwrm_req_hold(bp, req); @@ -7265,17 +7362,7 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) int bnxt_nq_rings_in_use(struct bnxt *bp) { - int cp = bp->cp_nr_rings; - int ulp_msix, ulp_base; - - ulp_msix = bnxt_get_ulp_msix_num(bp); - if (ulp_msix) { - ulp_base = bnxt_get_ulp_msix_base(bp); - cp += ulp_msix; - if ((ulp_base + ulp_msix) > cp) - cp = ulp_base + ulp_msix; - } - return cp; + return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp); } static int bnxt_cp_rings_in_use(struct bnxt *bp) @@ -7291,16 +7378,7 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp) static int bnxt_get_func_stat_ctxs(struct bnxt *bp) { - int ulp_stat = bnxt_get_ulp_stat_ctxs(bp); - int cp = bp->cp_nr_rings; - - if (!ulp_stat) - return cp; - - if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp)) - return bnxt_get_ulp_msix_base(bp) + ulp_stat; - - return cp + ulp_stat; + return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp); } static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr) @@ -7332,7 +7410,7 @@ static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { hw_resc->resv_rx_rings = bp->rx_nr_rings; if (!netif_is_rxfh_configured(bp->dev)) - bnxt_set_dflt_rss_indir_tbl(bp); + bnxt_set_dflt_rss_indir_tbl(bp, NULL); } } @@ -7340,7 +7418,7 @@ static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings) { if (bp->flags & BNXT_FLAG_RFS) { if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) - return 2; + return 2 + bp->num_rss_ctx; if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) return rx_rings + 1; } @@ -7408,17 +7486,32 @@ static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr) hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)); } +static int bnxt_get_avail_msix(struct bnxt *bp, int num); + static int __bnxt_reserve_rings(struct bnxt *bp) { struct bnxt_hw_rings hwr = {0}; + int cp = bp->cp_nr_rings; int rx_rings, rc; + int ulp_msix = 0; bool sh = false; int tx_cp; if (!bnxt_need_reserve_rings(bp)) return 0; - hwr.cp = bnxt_nq_rings_in_use(bp); + if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { + ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); + if (!ulp_msix) + bnxt_set_ulp_stat_ctxs(bp, 0); + + if (ulp_msix > bp->ulp_num_msix_want) + ulp_msix = bp->ulp_num_msix_want; + hwr.cp = cp + ulp_msix; + } else { + hwr.cp = bnxt_nq_rings_in_use(bp); + } + hwr.tx = bp->tx_nr_rings; hwr.rx = bp->rx_nr_rings; if (bp->flags & BNXT_FLAG_SHARED_RINGS) @@ -7488,7 +7581,20 @@ static int __bnxt_reserve_rings(struct bnxt *bp) return -ENOMEM; if (!netif_is_rxfh_configured(bp->dev)) - bnxt_set_dflt_rss_indir_tbl(bp); + bnxt_set_dflt_rss_indir_tbl(bp, NULL); + + if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) { + int resv_msix, resv_ctx, ulp_ctxs; + struct bnxt_hw_resc *hw_resc; + + hw_resc = &bp->hw_resc; + resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings; + ulp_msix = min_t(int, resv_msix, ulp_msix); + bnxt_set_ulp_msix_num(bp, ulp_msix); + resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings; + ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp)); + bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs); + } return rc; } @@ -8907,6 +9013,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); #endif } + bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs); hwrm_func_qcaps_exit: hwrm_req_drop(bp, req); @@ -9667,7 +9774,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) else if (BNXT_NO_FW_ACCESS(bp)) return 0; for (i = 0; i < bp->nr_vnics; i++) { - rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); + rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags); if (rc) { netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", i, rc); @@ -9682,7 +9789,7 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) int i; for (i = 0; i < bp->nr_vnics; i++) - bnxt_hwrm_vnic_set_rss(bp, i, false); + bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false); } static void bnxt_clear_vnic(struct bnxt *bp) @@ -9760,28 +9867,27 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) return hwrm_req_send(bp, req); } -static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) +static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; int rc; if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) goto skip_rss_ctx; /* allocate context for vnic */ - rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); if (rc) { netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); goto vnic_setup_err; } bp->rsscos_nr_ctxs++; if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { - rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1); if (rc) { netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); goto vnic_setup_err; } bp->rsscos_nr_ctxs++; @@ -9789,26 +9895,26 @@ static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) skip_rss_ctx: /* configure default vnic, ring grp */ - rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); + rc = bnxt_hwrm_vnic_cfg(bp, vnic); if (rc) { netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); goto vnic_setup_err; } /* Enable RSS hashing on vnic */ - rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); + rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true); if (rc) { netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); goto vnic_setup_err; } if (bp->flags & BNXT_FLAG_AGG_RINGS) { - rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); + rc = bnxt_hwrm_vnic_set_hds(bp, vnic); if (rc) { netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); } } @@ -9816,16 +9922,33 @@ vnic_setup_err: return rc; } -static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) +int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc; + + rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", + vnic->vnic_id, rc); + return rc; + } + rc = bnxt_hwrm_vnic_cfg(bp, vnic); + if (rc) + netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", + vnic->vnic_id, rc); + return rc; +} + +int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) { int rc, i, nr_ctxs; nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); for (i = 0; i < nr_ctxs; i++) { - rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i); if (rc) { netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", - vnic_id, i, rc); + vnic->vnic_id, i, rc); break; } bp->rsscos_nr_ctxs++; @@ -9833,63 +9956,57 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) if (i < nr_ctxs) return -ENOMEM; - rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true); - if (rc) { - netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", - vnic_id, rc); - return rc; - } - rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); - if (rc) { - netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", - vnic_id, rc); + rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); + if (rc) return rc; - } + if (bp->flags & BNXT_FLAG_AGG_RINGS) { - rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); + rc = bnxt_hwrm_vnic_set_hds(bp, vnic); if (rc) { netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); } } return rc; } -static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) +static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) - return __bnxt_setup_vnic_p5(bp, vnic_id); + return __bnxt_setup_vnic_p5(bp, vnic); else - return __bnxt_setup_vnic(bp, vnic_id); + return __bnxt_setup_vnic(bp, vnic); } -static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id, +static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, + struct bnxt_vnic_info *vnic, u16 start_rx_ring_idx, int rx_rings) { int rc; - rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings); + rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings); if (rc) { netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); return rc; } - return bnxt_setup_vnic(bp, vnic_id); + return bnxt_setup_vnic(bp, vnic); } static int bnxt_alloc_rfs_vnics(struct bnxt *bp) { + struct bnxt_vnic_info *vnic; int i, rc = 0; - if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) - return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, 0, - bp->rx_nr_rings); + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { + vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; + return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings); + } if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) return 0; for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_vnic_info *vnic; u16 vnic_id = i + 1; u16 ring_id = i; @@ -9900,12 +10017,104 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) vnic->flags |= BNXT_VNIC_RFS_FLAG; if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; - if (bnxt_alloc_and_setup_vnic(bp, vnic_id, ring_id, 1)) + if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1)) break; } return rc; } +void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, + bool all) +{ + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + struct bnxt_filter_base *usr_fltr, *tmp; + struct bnxt_ntuple_filter *ntp_fltr; + int i; + + bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); + for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { + if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) + bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); + } + if (!all) + return; + + list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { + if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) && + usr_fltr->fw_vnic_id == rss_ctx->index) { + ntp_fltr = container_of(usr_fltr, + struct bnxt_ntuple_filter, + base); + bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr); + bnxt_del_ntp_filter(bp, ntp_fltr); + bnxt_del_one_usr_fltr(bp, usr_fltr); + } + } + + if (vnic->rss_table) + dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size, + vnic->rss_table, + vnic->rss_table_dma_addr); + kfree(rss_ctx->rss_indir_tbl); + list_del(&rss_ctx->list); + bp->num_rss_ctx--; + clear_bit(rss_ctx->index, bp->rss_ctx_bmap); + kfree(rss_ctx); +} + +static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) +{ + bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); + struct bnxt_rss_ctx *rss_ctx, *tmp; + + list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + + if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) || + bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) || + __bnxt_setup_vnic_p5(bp, vnic)) { + netdev_err(bp->dev, "Failed to restore RSS ctx %d\n", + rss_ctx->index); + bnxt_del_one_rss_ctx(bp, rss_ctx, true); + } + } +} + +struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp) +{ + struct bnxt_rss_ctx *rss_ctx = NULL; + + rss_ctx = kzalloc(sizeof(*rss_ctx), GFP_KERNEL); + if (rss_ctx) { + rss_ctx->vnic.rss_ctx = rss_ctx; + list_add_tail(&rss_ctx->list, &bp->rss_ctx_list); + bp->num_rss_ctx++; + } + return rss_ctx; +} + +void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all) +{ + struct bnxt_rss_ctx *rss_ctx, *tmp; + + list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) + bnxt_del_one_rss_ctx(bp, rss_ctx, all); + + if (all) + bitmap_free(bp->rss_ctx_bmap); +} + +static void bnxt_init_multi_rss_ctx(struct bnxt *bp) +{ + bp->rss_ctx_bmap = bitmap_zalloc(BNXT_RSS_CTX_BMAP_LEN, GFP_KERNEL); + if (bp->rss_ctx_bmap) { + /* burn index 0 since we cannot have context 0 */ + __set_bit(0, bp->rss_ctx_bmap); + INIT_LIST_HEAD(&bp->rss_ctx_list); + bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; + } +} + /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ static bool bnxt_promisc_ok(struct bnxt *bp) { @@ -9918,16 +10127,17 @@ static bool bnxt_promisc_ok(struct bnxt *bp) static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[1]; unsigned int rc = 0; - rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); + rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1); if (rc) { netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", rc); return rc; } - rc = bnxt_hwrm_vnic_cfg(bp, 1); + rc = bnxt_hwrm_vnic_cfg(bp, vnic); if (rc) { netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", rc); @@ -9970,7 +10180,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) rx_nr_rings--; /* default vnic 0 */ - rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings); + rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings); if (rc) { netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); goto err_out; @@ -9979,7 +10189,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) if (BNXT_VF(bp)) bnxt_hwrm_func_qcfg(bp); - rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT); + rc = bnxt_setup_vnic(bp, vnic); if (rc) goto err_out; if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) @@ -10294,19 +10504,10 @@ unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); } -int bnxt_get_avail_msix(struct bnxt *bp, int num) +static int bnxt_get_avail_msix(struct bnxt *bp, int num) { - int max_cp = bnxt_get_max_func_cp_rings(bp); int max_irq = bnxt_get_max_func_irqs(bp); int total_req = bp->cp_nr_rings + num; - int max_idx, avail_msix; - - max_idx = bp->total_irqs; - if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) - max_idx = min_t(int, bp->total_irqs, max_cp); - avail_msix = max_idx - bp->cp_nr_rings; - if (!BNXT_NEW_RM(bp) || avail_msix >= num) - return avail_msix; if (max_irq < total_req) { num = max_irq - bp->cp_nr_rings; @@ -10433,13 +10634,23 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) { bool irq_cleared = false; int tcs = bp->num_tc; + int irqs_required; int rc; if (!bnxt_need_reserve_rings(bp)) return 0; - if (irq_re_init && BNXT_NEW_RM(bp) && - bnxt_get_num_msix(bp) != bp->total_irqs) { + if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { + int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); + + if (ulp_msix > bp->ulp_num_msix_want) + ulp_msix = bp->ulp_num_msix_want; + irqs_required = ulp_msix + bp->cp_nr_rings; + } else { + irqs_required = bnxt_get_num_msix(bp); + } + + if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) { bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); irq_cleared = true; @@ -10621,9 +10832,9 @@ static void bnxt_disable_napi(struct bnxt *bp) cpr = &bnapi->cp_ring; if (bnapi->tx_fault) - cpr->sw_stats.tx.tx_resets++; + cpr->sw_stats->tx.tx_resets++; if (bnapi->in_reset) - cpr->sw_stats.rx.rx_resets++; + cpr->sw_stats->rx.rx_resets++; napi_disable(&bnapi->napi); if (bnapi->rx_ring) cancel_work_sync(&cpr->dim.work); @@ -11358,7 +11569,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) if (fw_reset) { set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) - bnxt_ulp_stop(bp); + bnxt_ulp_irq_stop(bp); bnxt_free_ctx_mem(bp); bnxt_dcb_free(bp); rc = bnxt_fw_init_one(bp); @@ -11665,6 +11876,46 @@ static void bnxt_cfg_usr_fltrs(struct bnxt *bp) bnxt_cfg_one_usr_fltr(bp, usr_fltr); } +static int bnxt_set_xps_mapping(struct bnxt *bp) +{ + int numa_node = dev_to_node(&bp->pdev->dev); + unsigned int q_idx, map_idx, cpu, i; + const struct cpumask *cpu_mask_ptr; + int nr_cpus = num_online_cpus(); + cpumask_t *q_map; + int rc = 0; + + q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL); + if (!q_map) + return -ENOMEM; + + /* Create CPU mask for all TX queues across MQPRIO traffic classes. + * Each TC has the same number of TX queues. The nth TX queue for each + * TC will have the same CPU mask. + */ + for (i = 0; i < nr_cpus; i++) { + map_idx = i % bp->tx_nr_rings_per_tc; + cpu = cpumask_local_spread(i, numa_node); + cpu_mask_ptr = get_cpu_mask(cpu); + cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr); + } + + /* Register CPU mask for each TX queue except the ones marked for XDP */ + for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) { + map_idx = q_idx % bp->tx_nr_rings_per_tc; + rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx); + if (rc) { + netdev_warn(bp->dev, "Error setting XPS for q:%d\n", + q_idx); + break; + } + } + + kfree(q_map); + + return rc; +} + static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { int rc = 0; @@ -11727,8 +11978,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } } - if (irq_re_init) + if (irq_re_init) { udp_tunnel_nic_reset_ntf(bp->dev); + rc = bnxt_set_xps_mapping(bp); + if (rc) + netdev_warn(bp->dev, "failed to set xps mapping\n"); + } if (bp->tx_nr_rings_xdp < num_possible_cpus()) { if (!static_key_enabled(&bnxt_xdp_locking_key)) @@ -11753,6 +12008,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); bnxt_ptp_init_rtc(bp, true); bnxt_ptp_cfg_tstamp_filters(bp); + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_hwrm_realloc_rss_ctx_vnic(bp); bnxt_cfg_usr_fltrs(bp); return 0; @@ -11867,10 +12124,9 @@ static int bnxt_open(struct net_device *dev) bnxt_hwrm_if_change(bp, false); } else { if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { - if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { - bnxt_ulp_start(bp, 0); - bnxt_reenable_sriov(bp); - } + if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + bnxt_queue_sp_work(bp, + BNXT_RESTART_ULP_SP_EVENT); } } @@ -11901,6 +12157,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, while (bnxt_drv_busy(bp)) msleep(20); + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_clear_rss_ctxs(bp, false); /* Flush rings and disable interrupts */ bnxt_shutdown_nic(bp, irq_re_init); @@ -12100,8 +12358,8 @@ static void bnxt_get_ring_stats(struct bnxt *bp, stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); stats->rx_dropped += - cpr->sw_stats.rx.rx_netpoll_discards + - cpr->sw_stats.rx.rx_oom_discards; + cpr->sw_stats->rx.rx_netpoll_discards + + cpr->sw_stats->rx.rx_oom_discards; } } @@ -12168,7 +12426,7 @@ static void bnxt_get_one_ring_err_stats(struct bnxt *bp, struct bnxt_total_ring_err_stats *stats, struct bnxt_cp_ring_info *cpr) { - struct bnxt_sw_stats *sw_stats = &cpr->sw_stats; + struct bnxt_sw_stats *sw_stats = cpr->sw_stats; u64 *hw_stats = cpr->stats.sw_stats; stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; @@ -12398,33 +12656,26 @@ static bool bnxt_rfs_supported(struct bnxt *bp) } /* If runtime conditions support RFS */ -static bool bnxt_rfs_capable(struct bnxt *bp) +bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx) { struct bnxt_hw_rings hwr = {0}; int max_vnics, max_rss_ctxs; - hwr.rss_ctx = 1; - if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { - /* 2 VNICS: default + Ntuple */ - hwr.vnic = 2; - hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) * - hwr.vnic; - goto check_reserve_vnic; - } - if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + !BNXT_SUPPORTS_NTUPLE_VNIC(bp)) return bnxt_rfs_supported(bp); + if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) return false; - hwr.vnic = 1 + bp->rx_nr_rings; -check_reserve_vnic: + hwr.grp = bp->rx_nr_rings; + hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings); + if (new_rss_ctx) + hwr.vnic++; + hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); max_vnics = bnxt_get_max_func_vnics(bp); max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); - if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && - !(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)) - hwr.rss_ctx = hwr.vnic; - if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) { if (bp->rx_nr_rings > 1) netdev_warn(bp->dev, @@ -12462,7 +12713,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); netdev_features_t vlan_features; - if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) + if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false)) features &= ~NETIF_F_NTUPLE; if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog) @@ -12854,17 +13105,8 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent) if (!silent) bnxt_dbg_dump_states(bp); if (netif_running(bp->dev)) { - int rc; - - if (silent) { - bnxt_close_nic(bp, false, false); - bnxt_open_nic(bp, false, false); - } else { - bnxt_ulp_stop(bp); - bnxt_close_nic(bp, true, false); - rc = bnxt_open_nic(bp, true, false); - bnxt_ulp_start(bp, rc); - } + bnxt_close_nic(bp, !silent, false); + bnxt_open_nic(bp, !silent, false); } } @@ -13022,7 +13264,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp) rxr->bnapi->in_reset = false; bnxt_alloc_one_rx_ring(bp, i); cpr = &rxr->bnapi->cp_ring; - cpr->sw_stats.rx.rx_resets++; + cpr->sw_stats->rx.rx_resets++; if (bp->flags & BNXT_FLAG_AGG_RINGS) bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); @@ -13044,7 +13286,6 @@ static void bnxt_fw_fatal_close(struct bnxt *bp) static void bnxt_fw_reset_close(struct bnxt *bp) { - bnxt_ulp_stop(bp); /* When firmware is in fatal state, quiesce device and disable * bus master to prevent any potential bad DMAs before freeing * kernel memory. @@ -13125,6 +13366,7 @@ void bnxt_fw_exception(struct bnxt *bp) { netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); + bnxt_ulp_stop(bp); bnxt_rtnl_lock_sp(bp); bnxt_force_fw_reset(bp); bnxt_rtnl_unlock_sp(bp); @@ -13156,6 +13398,7 @@ static int bnxt_get_registered_vfs(struct bnxt *bp) void bnxt_fw_reset(struct bnxt *bp) { + bnxt_ulp_stop(bp); bnxt_rtnl_lock_sp(bp); if (test_bit(BNXT_STATE_OPEN, &bp->state) && !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { @@ -13234,7 +13477,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp) bnxt_dbg_hwrm_ring_info_get(bp, DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, fw_ring_id, &val[0], &val[1]); - cpr->sw_stats.cmn.missed_irqs++; + cpr->sw_stats->cmn.missed_irqs++; } } } @@ -13280,6 +13523,12 @@ static void bnxt_fw_echo_reply(struct bnxt *bp) hwrm_req_send(bp, req); } +static void bnxt_ulp_restart(struct bnxt *bp) +{ + bnxt_ulp_stop(bp); + bnxt_ulp_start(bp, 0); +} + static void bnxt_sp_task(struct work_struct *work) { struct bnxt *bp = container_of(work, struct bnxt, sp_task); @@ -13291,6 +13540,11 @@ static void bnxt_sp_task(struct work_struct *work) return; } + if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) { + bnxt_ulp_restart(bp); + bnxt_reenable_sriov(bp); + } + if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) bnxt_cfg_rx_mode(bp); @@ -13419,8 +13673,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, return -ENOMEM; hwr.stat = hwr.cp; if (BNXT_NEW_RM(bp)) { - hwr.cp += bnxt_get_ulp_msix_num(bp); - hwr.stat += bnxt_get_ulp_stat_ctxs(bp); + hwr.cp += bnxt_get_ulp_msix_num_in_use(bp); + hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp); hwr.grp = rx; hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); } @@ -13603,7 +13857,7 @@ static void bnxt_set_dflt_rfs(struct bnxt *bp) bp->flags &= ~BNXT_FLAG_RFS; if (bnxt_rfs_supported(bp)) { dev->hw_features |= NETIF_F_NTUPLE; - if (bnxt_rfs_capable(bp)) { + if (bnxt_rfs_capable(bp, false)) { bp->flags |= BNXT_FLAG_RFS; dev->features |= NETIF_F_NTUPLE; } @@ -13747,10 +14001,8 @@ static bool bnxt_fw_reset_timeout(struct bnxt *bp) static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) { clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) { - bnxt_ulp_start(bp, rc); + if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) bnxt_dl_health_fw_status_update(bp, false); - } bp->fw_reset_state = 0; dev_close(bp->dev); } @@ -13781,7 +14033,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) bp->fw_reset_state = 0; netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", n); - return; + goto ulp_start; } bnxt_queue_fw_reset_work(bp, HZ / 10); return; @@ -13791,7 +14043,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { bnxt_fw_reset_abort(bp, rc); rtnl_unlock(); - return; + goto ulp_start; } bnxt_fw_reset_close(bp); if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { @@ -13884,7 +14136,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); bnxt_fw_reset_abort(bp, rc); rtnl_unlock(); - return; + goto ulp_start; } if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && @@ -13896,10 +14148,6 @@ static void bnxt_fw_reset_task(struct work_struct *work) /* Make sure fw_reset_state is 0 before clearing the flag */ smp_mb__before_atomic(); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - bnxt_ulp_start(bp, 0); - bnxt_reenable_sriov(bp); - bnxt_vf_reps_alloc(bp); - bnxt_vf_reps_open(bp); bnxt_ptp_reapply_pps(bp); clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) { @@ -13907,6 +14155,12 @@ static void bnxt_fw_reset_task(struct work_struct *work) bnxt_dl_health_fw_status_update(bp, true); } rtnl_unlock(); + bnxt_ulp_start(bp, 0); + bnxt_reenable_sriov(bp); + rtnl_lock(); + bnxt_vf_reps_alloc(bp); + bnxt_vf_reps_open(bp); + rtnl_unlock(); break; } return; @@ -13922,6 +14176,8 @@ fw_reset_abort: rtnl_lock(); bnxt_fw_reset_abort(bp, rc); rtnl_unlock(); +ulp_start: + bnxt_ulp_start(bp, rc); } static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) @@ -14046,7 +14302,7 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) if (netif_running(dev)) bnxt_close_nic(bp, true, false); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); bnxt_set_ring_params(bp); if (netif_running(dev)) @@ -14456,12 +14712,9 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, if (!br_spec) return -EINVAL; - nla_for_each_nested(attr, br_spec, rem) { + nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { u16 mode; - if (nla_type(attr) != IFLA_BRIDGE_MODE) - continue; - mode = nla_get_u16(attr); if (mode == bp->br_mode) break; @@ -14545,7 +14798,7 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i, stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); - stats->alloc_fail = cpr->sw_stats.rx.rx_oom_discards; + stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards; } static void bnxt_get_queue_stats_tx(struct net_device *dev, int i, @@ -14597,12 +14850,17 @@ static void bnxt_remove_one(struct pci_dev *pdev) if (BNXT_PF(bp)) bnxt_sriov_disable(bp); - bnxt_rdma_aux_device_uninit(bp); + bnxt_rdma_aux_device_del(bp); bnxt_ptp_clear(bp); unregister_netdev(dev); + + bnxt_rdma_aux_device_uninit(bp); + bnxt_free_l2_filters(bp, true); bnxt_free_ntp_fltrs(bp, true); + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_clear_rss_ctxs(bp, true); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); /* Flush any pending tasks */ cancel_work_sync(&bp->sp_task); @@ -14691,8 +14949,9 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, *max_rx = hw_resc->max_rx_rings; *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - - bnxt_get_ulp_msix_num(bp), - hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); + bnxt_get_ulp_msix_num_in_use(bp), + hw_resc->max_stat_ctxs - + bnxt_get_ulp_stat_ctxs_in_use(bp)); if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) *max_cp = min_t(int, *max_cp, max_irq); max_ring_grps = hw_resc->max_hw_ring_grps; @@ -14788,6 +15047,7 @@ static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) { int dflt_rings, max_rx_rings, max_tx_rings, rc; + int avail_msix; if (!bnxt_can_reserve_rings(bp)) return 0; @@ -14815,6 +15075,14 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings; + if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) { + int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want); + + bnxt_set_ulp_msix_num(bp, ulp_num_msix); + bnxt_set_dflt_ulp_stat_ctxs(bp); + } + rc = __bnxt_reserve_rings(bp); if (rc && rc != -ENODEV) netdev_warn(bp->dev, "Unable to reserve tx rings\n"); @@ -15061,7 +15329,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bp->flags |= BNXT_FLAG_CHIP_P7; } - rc = bnxt_alloc_rss_indir_tbl(bp); + rc = bnxt_alloc_rss_indir_tbl(bp, NULL); if (rc) goto init_err_pci_clean; @@ -15117,6 +15385,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->priv_flags |= IFF_UNICAST_FLT; netif_set_tso_max_size(dev, GSO_MAX_SIZE); + if (bp->tso_max_segs) + netif_set_tso_max_segs(dev, bp->tso_max_segs); dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_RX_SG; @@ -15164,6 +15434,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_set_rx_skb_mode(bp, false); bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); + bnxt_rdma_aux_device_init(bp); rc = bnxt_set_dflt_rings(bp, true); if (rc) { if (BNXT_VF(bp) && rc == -ENODEV) { @@ -15214,13 +15485,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_LIST_HEAD(&bp->usr_fltr_list); + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + bnxt_init_multi_rss_ctx(bp); + + rc = register_netdev(dev); if (rc) goto init_err_cleanup; bnxt_dl_fw_reporters_create(bp); - bnxt_rdma_aux_device_init(bp); + bnxt_rdma_aux_device_add(bp); bnxt_print_device_info(bp); @@ -15228,12 +15503,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; init_err_cleanup: + bnxt_rdma_aux_device_uninit(bp); bnxt_dl_unregister(bp); init_err_dl: bnxt_shutdown_tc(bp); bnxt_clear_int_mode(bp); init_err_pci_clean: + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_clear_rss_ctxs(bp, true); bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); bnxt_hwmon_uninit(bp); @@ -15288,8 +15566,9 @@ static int bnxt_suspend(struct device *device) struct bnxt *bp = netdev_priv(dev); int rc = 0; - rtnl_lock(); bnxt_ulp_stop(bp); + + rtnl_lock(); if (netif_running(dev)) { netif_device_detach(dev); rc = bnxt_close(dev); @@ -15344,10 +15623,10 @@ static int bnxt_resume(struct device *device) } resume_exit: + rtnl_unlock(); bnxt_ulp_start(bp, rc); if (!rc) bnxt_reenable_sriov(bp); - rtnl_unlock(); return rc; } @@ -15377,11 +15656,11 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, netdev_info(netdev, "PCI I/O error detected\n"); + bnxt_ulp_stop(bp); + rtnl_lock(); netif_device_detach(netdev); - bnxt_ulp_stop(bp); - if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { netdev_err(bp->dev, "Firmware reset already in progress\n"); abort = true; @@ -15433,6 +15712,10 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) netdev_info(bp->dev, "PCI Slot Reset\n"); + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state)) + msleep(900); + rtnl_lock(); if (pci_enable_device(pdev)) { @@ -15513,13 +15796,13 @@ static void bnxt_io_resume(struct pci_dev *pdev) if (!err && netif_running(netdev)) err = bnxt_open(netdev); - bnxt_ulp_start(bp, err); - if (!err) { - bnxt_reenable_sriov(bp); + if (!err) netif_device_attach(netdev); - } rtnl_unlock(); + bnxt_ulp_start(bp, err); + if (!err) + bnxt_reenable_sriov(bp); } static const struct pci_error_handlers bnxt_err_handler = { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index c46abcccfc..6b10a09ee1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1152,7 +1152,7 @@ struct bnxt_cp_ring_info { struct bnxt_stats_mem stats; u32 hw_stats_ctx_id; - struct bnxt_sw_stats sw_stats; + struct bnxt_sw_stats *sw_stats; struct bnxt_ring_struct cp_ring_struct; @@ -1256,8 +1256,22 @@ struct bnxt_vnic_info { #define BNXT_VNIC_UCAST_FLAG 8 #define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10 #define BNXT_VNIC_NTUPLE_FLAG 0x20 +#define BNXT_VNIC_RSSCTX_FLAG 0x40 + struct bnxt_rss_ctx *rss_ctx; + u32 vnic_id; }; +struct bnxt_rss_ctx { + struct list_head list; + struct bnxt_vnic_info vnic; + u16 *rss_indir_tbl; + u8 index; +}; + +#define BNXT_MAX_ETH_RSS_CTX 32 +#define BNXT_RSS_CTX_BMAP_LEN (BNXT_MAX_ETH_RSS_CTX + 1) +#define BNXT_VNIC_ID_INVALID 0xffffffff + struct bnxt_hw_rings { int tx; int rx; @@ -1360,6 +1374,7 @@ struct bnxt_filter_base { #define BNXT_ACT_RING_DST 2 #define BNXT_ACT_FUNC_DST 4 #define BNXT_ACT_NO_AGING 8 +#define BNXT_ACT_RSS_CTX 0x10 u16 sw_id; u16 rxq; u16 fw_vnic_id; @@ -2049,6 +2064,7 @@ enum board_idx { NETXTREME_E_VF_HV, NETXTREME_E_P5_VF, NETXTREME_E_P5_VF_HV, + NETXTREME_E_P7_VF, }; struct bnxt { @@ -2278,6 +2294,9 @@ struct bnxt { /* grp_info indexed by completion ring index */ struct bnxt_ring_grp_info *grp_info; struct bnxt_vnic_info *vnic_info; + struct list_head rss_ctx_list; + unsigned long *rss_ctx_bmap; + u32 num_rss_ctx; int nr_vnics; u16 *rss_indir_tbl; u16 rss_indir_tbl_entries; @@ -2292,12 +2311,14 @@ struct bnxt { #define BNXT_RSS_CAP_AH_V6_RSS_CAP BIT(5) #define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6) #define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7) +#define BNXT_RSS_CAP_MULTI_RSS_CTX BIT(8) u8 rss_hash_key[HW_HASH_KEY_SIZE]; u8 rss_hash_key_valid:1; u8 rss_hash_key_updated:1; u16 max_mtu; + u16 tso_max_segs; u8 max_tc; u8 max_lltc; /* lossless TCs */ struct bnxt_queue_info q_info[BNXT_MAX_QUEUE]; @@ -2335,6 +2356,7 @@ struct bnxt { struct bnxt_irq *irq_tbl; int total_irqs; + int ulp_num_msix_want; u8 mac_addr[ETH_ALEN]; #ifdef CONFIG_BNXT_DCB @@ -2391,6 +2413,10 @@ struct bnxt { #define BNXT_SUPPORTS_NTUPLE_VNIC(bp) \ (BNXT_PF(bp) && ((bp)->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3)) +#define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \ + (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \ + ((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX)) + u32 hwrm_spec_code; u16 hwrm_cmd_seq; u16 hwrm_cmd_kong_seq; @@ -2467,6 +2493,7 @@ struct bnxt { #define BNXT_LINK_CFG_CHANGE_SP_EVENT 21 #define BNXT_THERMAL_THRESHOLD_SP_EVENT 22 #define BNXT_FW_ECHO_REQUEST_SP_EVENT 23 +#define BNXT_RESTART_ULP_SP_EVENT 24 struct delayed_work fw_reset_task; int fw_reset_state; @@ -2744,9 +2771,17 @@ int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, struct bnxt_ntuple_filter *fltr); int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr); +int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, + u32 tpa_flags); void bnxt_fill_ipv6_mask(__be32 mask[4]); +int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx); +void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx); +u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp); int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings); -int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id); +int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, + unsigned int start_rx_ring_idx, + unsigned int nr_rings); int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings); int bnxt_nq_rings_in_use(struct bnxt *bp); int bnxt_hwrm_set_coal(struct bnxt *); @@ -2756,7 +2791,6 @@ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp); unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp); -int bnxt_get_avail_msix(struct bnxt *bp, int num); int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init); void bnxt_tx_disable(struct bnxt *bp); void bnxt_tx_enable(struct bnxt *bp); @@ -2772,6 +2806,12 @@ int bnxt_hwrm_free_wol_fltr(struct bnxt *bp); int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all); int bnxt_hwrm_func_qcaps(struct bnxt *bp); int bnxt_hwrm_fw_set_time(struct bnxt *); +int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); +void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, + bool all); +struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp); +void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp); @@ -2779,6 +2819,7 @@ void bnxt_reenable_sriov(struct bnxt *bp); void bnxt_close_nic(struct bnxt *, bool, bool); void bnxt_get_ring_err_stats(struct bnxt *bp, struct bnxt_total_ring_err_stats *stats); +bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx); int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, u32 *reg_buf); void bnxt_fw_exception(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index ae4529c043..4cb0fabf97 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -437,18 +437,20 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: { + bnxt_ulp_stop(bp); rtnl_lock(); if (bnxt_sriov_cfg(bp)) { NL_SET_ERR_MSG_MOD(extack, "reload is unsupported while VFs are allocated or being configured"); rtnl_unlock(); + bnxt_ulp_start(bp, 0); return -EOPNOTSUPP; } if (bp->dev->reg_state == NETREG_UNREGISTERED) { rtnl_unlock(); + bnxt_ulp_start(bp, 0); return -ENODEV; } - bnxt_ulp_stop(bp); if (netif_running(bp->dev)) bnxt_close_nic(bp, true, true); bnxt_vf_reps_free(bp); @@ -516,7 +518,6 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti bnxt_vf_reps_alloc(bp); if (netif_running(bp->dev)) rc = bnxt_open_nic(bp, true, true); - bnxt_ulp_start(bp, rc); if (!rc) { bnxt_reenable_sriov(bp); bnxt_ptp_reapply_pps(bp); @@ -570,6 +571,8 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti dev_close(bp->dev); } rtnl_unlock(); + if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT) + bnxt_ulp_start(bp, rc); return rc; } @@ -1096,7 +1099,8 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, } static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct bnxt *bp = bnxt_get_bp_from_dl(dl); struct hwrm_nvm_set_variable_input *req; @@ -1145,7 +1149,8 @@ static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id, } static int bnxt_remote_dev_reset_set(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct bnxt *bp = bnxt_get_bp_from_dl(dl); int rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1d240a2745..79c09c1cdf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -631,13 +631,13 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, buf[j] = sw_stats[k]; skip_tpa_ring_stats: - sw = (u64 *)&cpr->sw_stats.rx; + sw = (u64 *)&cpr->sw_stats->rx; if (is_rx_ring(bp, i)) { for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++) buf[j] = sw[k]; } - sw = (u64 *)&cpr->sw_stats.cmn; + sw = (u64 *)&cpr->sw_stats->cmn; for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++) buf[j] = sw[k]; } @@ -961,6 +961,12 @@ static int bnxt_set_channels(struct net_device *dev, return rc; } + if (req_rx_rings < bp->rx_nr_rings && + req_rx_rings <= bnxt_get_max_rss_ctx_ring(bp)) { + netdev_warn(dev, "Can't deactivate rings used by RSS contexts\n"); + return -EINVAL; + } + if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && netif_is_rxfh_configured(dev)) { @@ -969,6 +975,8 @@ static int bnxt_set_channels(struct net_device *dev, } bnxt_clear_usr_fltrs(bp, true); + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_clear_rss_ctxs(bp, false); if (netif_running(dev)) { if (BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's @@ -1205,6 +1213,36 @@ fltr_err: return rc; } +static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp, + u32 index) +{ + struct bnxt_rss_ctx *rss_ctx, *tmp; + + list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) + if (rss_ctx->index == index) + return rss_ctx; + return NULL; +} + +static int bnxt_alloc_rss_ctx_rss_table(struct bnxt *bp, + struct bnxt_rss_ctx *rss_ctx) +{ + int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + + vnic->rss_table_size = size + HW_HASH_KEY_SIZE; + vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev, + vnic->rss_table_size, + &vnic->rss_table_dma_addr, + GFP_KERNEL); + if (!vnic->rss_table) + return -ENOMEM; + + vnic->rss_hash_key = ((void *)vnic->rss_table) + size; + vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; + return 0; +} + static int bnxt_add_l2_cls_rule(struct bnxt *bp, struct ethtool_rx_flow_spec *fs) { @@ -1280,22 +1318,24 @@ static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec, } static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, - struct ethtool_rx_flow_spec *fs) + struct ethtool_rxnfc *cmd) { - u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); - u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); + struct ethtool_rx_flow_spec *fs = &cmd->fs; struct bnxt_ntuple_filter *new_fltr, *fltr; + u32 flow_type = fs->flow_type & 0xff; struct bnxt_l2_filter *l2_fltr; struct bnxt_flow_masks *fmasks; - u32 flow_type = fs->flow_type; struct flow_keys *fkeys; - u32 idx; + u32 idx, ring; int rc; + u8 vf; if (!bp->vnic_info) return -EAGAIN; - if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf) + vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + ring = ethtool_get_flow_spec_ring(fs->ring_cookie); + if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf) return -EOPNOTSUPP; if (flow_type == IP_USER_FLOW) { @@ -1403,6 +1443,19 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, rcu_read_unlock(); new_fltr->base.flags = BNXT_ACT_NO_AGING; + if (fs->flow_type & FLOW_RSS) { + struct bnxt_rss_ctx *rss_ctx; + + new_fltr->base.fw_vnic_id = 0; + new_fltr->base.flags |= BNXT_ACT_RSS_CTX; + rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context); + if (rss_ctx) { + new_fltr->base.fw_vnic_id = rss_ctx->index; + } else { + rc = -EINVAL; + goto ntuple_err; + } + } if (fs->ring_cookie == RX_CLS_FLOW_DISC) new_fltr->base.flags |= BNXT_ACT_DROP; else @@ -1444,12 +1497,12 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd) flow_type == IPV6_USER_FLOW) && !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO)) return -EOPNOTSUPP; - if (flow_type & (FLOW_MAC_EXT | FLOW_RSS)) + if (flow_type & FLOW_MAC_EXT) return -EINVAL; flow_type &= ~FLOW_EXT; if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW) - return bnxt_add_ntuple_cls_rule(bp, fs); + return bnxt_add_ntuple_cls_rule(bp, cmd); ring = ethtool_get_flow_spec_ring(fs->ring_cookie); vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); @@ -1463,7 +1516,7 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd) if (flow_type == ETHER_FLOW) rc = bnxt_add_l2_cls_rule(bp, fs); else - rc = bnxt_add_ntuple_cls_rule(bp, fs); + rc = bnxt_add_ntuple_cls_rule(bp, cmd); return rc; } @@ -1754,7 +1807,10 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev) static int bnxt_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh) { + u32 rss_context = rxfh->rss_context; + struct bnxt_rss_ctx *rss_ctx = NULL; struct bnxt *bp = netdev_priv(dev); + u16 *indir_tbl = bp->rss_indir_tbl; struct bnxt_vnic_info *vnic; u32 i, tbl_size; @@ -1764,10 +1820,18 @@ static int bnxt_get_rxfh(struct net_device *dev, return 0; vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; - if (rxfh->indir && bp->rss_indir_tbl) { + if (rxfh->rss_context) { + rss_ctx = bnxt_get_rss_ctx_from_index(bp, rss_context); + if (!rss_ctx) + return -EINVAL; + indir_tbl = rss_ctx->rss_indir_tbl; + vnic = &rss_ctx->vnic; + } + + if (rxfh->indir && indir_tbl) { tbl_size = bnxt_get_rxfh_indir_size(dev); for (i = 0; i < tbl_size; i++) - rxfh->indir[i] = bp->rss_indir_tbl[i]; + rxfh->indir[i] = indir_tbl[i]; } if (rxfh->key && vnic->rss_hash_key) @@ -1776,6 +1840,136 @@ static int bnxt_get_rxfh(struct net_device *dev, return 0; } +static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, + struct ethtool_rxfh_param *rxfh) +{ + if (rxfh->key) { + if (rss_ctx) { + memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key, + HW_HASH_KEY_SIZE); + } else { + memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE); + bp->rss_hash_key_updated = true; + } + } + if (rxfh->indir) { + u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + u16 *indir_tbl = bp->rss_indir_tbl; + + if (rss_ctx) + indir_tbl = rss_ctx->rss_indir_tbl; + for (i = 0; i < tbl_size; i++) + indir_tbl[i] = rxfh->indir[i]; + pad = bp->rss_indir_tbl_entries - tbl_size; + if (pad) + memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); + } +} + +static int bnxt_set_rxfh_context(struct bnxt *bp, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + u32 *rss_context = &rxfh->rss_context; + struct bnxt_rss_ctx *rss_ctx; + struct bnxt_vnic_info *vnic; + bool modify = false; + int bit_id; + int rc; + + if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) { + NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported"); + return -EOPNOTSUPP; + } + + if (!netif_running(bp->dev)) { + NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down"); + return -EAGAIN; + } + + if (*rss_context != ETH_RXFH_CONTEXT_ALLOC) { + rss_ctx = bnxt_get_rss_ctx_from_index(bp, *rss_context); + if (!rss_ctx) { + NL_SET_ERR_MSG_FMT_MOD(extack, "RSS context %u not found", + *rss_context); + return -EINVAL; + } + if (*rss_context && rxfh->rss_delete) { + bnxt_del_one_rss_ctx(bp, rss_ctx, true); + return 0; + } + modify = true; + vnic = &rss_ctx->vnic; + goto modify_context; + } + + if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) { + NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u", + BNXT_MAX_ETH_RSS_CTX); + return -EINVAL; + } + + if (!bnxt_rfs_capable(bp, true)) { + NL_SET_ERR_MSG_MOD(extack, "Out hardware resources"); + return -ENOMEM; + } + + rss_ctx = bnxt_alloc_rss_ctx(bp); + if (!rss_ctx) + return -ENOMEM; + + vnic = &rss_ctx->vnic; + vnic->flags |= BNXT_VNIC_RSSCTX_FLAG; + vnic->vnic_id = BNXT_VNIC_ID_INVALID; + rc = bnxt_alloc_rss_ctx_rss_table(bp, rss_ctx); + if (rc) + goto out; + + rc = bnxt_alloc_rss_indir_tbl(bp, rss_ctx); + if (rc) + goto out; + + bnxt_set_dflt_rss_indir_tbl(bp, rss_ctx); + memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE); + + rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC"); + goto out; + } + + rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); + goto out; + } +modify_context: + bnxt_modify_rss(bp, rss_ctx, rxfh); + + if (modify) + return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); + + rc = __bnxt_setup_vnic_p5(bp, vnic); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); + goto out; + } + + bit_id = bitmap_find_free_region(bp->rss_ctx_bmap, + BNXT_RSS_CTX_BMAP_LEN, 0); + if (bit_id < 0) { + rc = -ENOMEM; + goto out; + } + rss_ctx->index = (u16)bit_id; + *rss_context = rss_ctx->index; + + return 0; +out: + bnxt_del_one_rss_ctx(bp, rss_ctx, true); + return rc; +} + static int bnxt_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, struct netlink_ext_ack *extack) @@ -1786,20 +1980,11 @@ static int bnxt_set_rxfh(struct net_device *dev, if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (rxfh->key) { - memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE); - bp->rss_hash_key_updated = true; - } + if (rxfh->rss_context) + return bnxt_set_rxfh_context(bp, rxfh, extack); - if (rxfh->indir) { - u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev); + bnxt_modify_rss(bp, NULL, rxfh); - for (i = 0; i < tbl_size; i++) - bp->rss_indir_tbl[i] = rxfh->indir[i]; - pad = bp->rss_indir_tbl_entries - tbl_size; - if (pad) - memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); - } bnxt_clear_usr_fltrs(bp, false); if (netif_running(bp->dev)) { bnxt_close_nic(bp, false, false); @@ -4641,6 +4826,14 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, if (!bp->num_tests || !BNXT_PF(bp)) return; + + if (etest->flags & ETH_TEST_FL_OFFLINE && + bnxt_ulp_registered(bp->edev)) { + etest->flags |= ETH_TEST_FL_FAILED; + netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n"); + return; + } + memset(buf, 0, sizeof(u64) * bp->num_tests); if (!netif_running(dev)) { etest->flags |= ETH_TEST_FL_FAILED; @@ -4671,7 +4864,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, if (!offline) { bnxt_run_fw_tests(bp, test_mask, &test_results); } else { - bnxt_ulp_stop(bp); bnxt_close_nic(bp, true, false); bnxt_run_fw_tests(bp, test_mask, &test_results); @@ -4682,7 +4874,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, if (rc) { bnxt_hwrm_mac_loopback(bp, false); etest->flags |= ETH_TEST_FL_FAILED; - bnxt_ulp_start(bp, rc); return; } if (bnxt_run_loopback(bp)) @@ -4709,7 +4900,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, bnxt_hwrm_phy_loopback(bp, false, false); bnxt_half_close_nic(bp); rc = bnxt_open_nic(bp, true, true); - bnxt_ulp_start(bp, rc); } if (rc || bnxt_test_irq(bp)) { buf[BNXT_IRQ_TEST_IDX] = 1; @@ -5071,6 +5261,7 @@ void bnxt_ethtool_free(struct bnxt *bp) const struct ethtool_ops bnxt_ethtool_ops = { .cap_link_lanes_supported = 1, + .cap_rss_ctx_supported = 1, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USECS_IRQ | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index e957abd704..f219709f95 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -2,7 +2,7 @@ * * Copyright (c) 2014-2016 Broadcom Corporation * Copyright (c) 2014-2018 Broadcom Limited - * Copyright (c) 2018-2023 Broadcom Inc. + * Copyright (c) 2018-2024 Broadcom Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -468,6 +468,10 @@ struct cmd_nums { #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL #define HWRM_TF_IF_TBL_SET 0x2feUL #define HWRM_TF_IF_TBL_GET 0x2ffUL + #define HWRM_TF_RESC_USAGE_SET 0x300UL + #define HWRM_TF_RESC_USAGE_QUERY 0x301UL + #define HWRM_TF_TBL_TYPE_ALLOC 0x302UL + #define HWRM_TF_TBL_TYPE_FREE 0x303UL #define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL #define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL #define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL @@ -495,7 +499,12 @@ struct cmd_nums { #define HWRM_TFC_IF_TBL_SET 0x398UL #define HWRM_TFC_IF_TBL_GET 0x399UL #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL + #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL + #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x39cUL + #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x39dUL + #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x39eUL #define HWRM_SV 0x400UL + #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL #define HWRM_DBG_READ_DIRECT 0xff10UL #define HWRM_DBG_READ_INDIRECT 0xff11UL #define HWRM_DBG_WRITE_DIRECT 0xff12UL @@ -604,8 +613,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 3 -#define HWRM_VERSION_RSVD 15 -#define HWRM_VERSION_STR "1.10.3.15" +#define HWRM_VERSION_RSVD 44 +#define HWRM_VERSION_STR "1.10.3.44" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -659,6 +668,7 @@ struct hwrm_ver_get_output { #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL + #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL u8 roce_fw_maj_8b; u8 roce_fw_min_8b; u8 roce_fw_bld_8b; @@ -838,7 +848,9 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL - #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4cUL + #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL + #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL + #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4eUL #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR @@ -1321,15 +1333,16 @@ struct hwrm_async_event_cmpl_error_report_base { u8 timestamp_lo; __le16 timestamp_hi; __le32 event_data1; - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED }; /* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */ @@ -1478,6 +1491,30 @@ struct hwrm_async_event_cmpl_error_report_thermal { #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING }; +/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED +}; + /* hwrm_func_reset_input (size:192b/24B) */ struct hwrm_func_reset_input { __le16 req_type; @@ -1781,6 +1818,12 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL __le16 tunnel_disable_flag; #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL @@ -1791,13 +1834,11 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL __le16 xid_partition_cap; - #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_TKC 0x1UL - #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_RKC 0x2UL - #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_TKC 0x4UL - #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_RKC 0x8UL + #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_TX_CK 0x1UL + #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL u8 device_serial_number[8]; __le16 ctxs_per_partition; - u8 unused_2[2]; + __le16 max_tso_segs; __le32 roce_vf_max_av; __le32 roce_vf_max_cq; __le32 roce_vf_max_mrw; @@ -1844,6 +1885,7 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL + #define FUNC_QCFG_RESP_FLAGS_ROCE_VNIC_ID_VALID 0x8000UL u8 mac_address[6]; __le16 pci_id; __le16 alloc_rsscos_ctx; @@ -1955,7 +1997,7 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL #define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB - u8 unused_2[2]; + __le16 roce_vnic_id; __le32 partition_min_bw; #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0 @@ -2003,6 +2045,8 @@ struct hwrm_func_qcfg_output { __le32 roce_max_srq_per_vf; __le32 roce_max_gid_per_vf; __le16 xid_partition_cfg; + #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL + #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL u8 unused_7; u8 valid; }; @@ -2229,10 +2273,8 @@ struct hwrm_func_cfg_input { __le32 roce_max_srq_per_vf; __le32 roce_max_gid_per_vf; __le16 xid_partition_cfg; - #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_TKC 0x1UL - #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_RKC 0x2UL - #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_TKC 0x4UL - #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_RKC 0x8UL + #define FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK 0x1UL + #define FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK 0x2UL __le16 unused_2; }; @@ -2416,6 +2458,8 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL + #define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL + #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL __le32 enables; #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL @@ -3627,28 +3671,33 @@ struct hwrm_func_backing_store_cfg_v2_input { __le16 target_id; __le64 resp_addr; __le16 type; - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TKC 0x13UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RKC 0x14UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID __le16 instance; __le32 flags; #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL @@ -3707,17 +3756,22 @@ struct hwrm_func_backing_store_qcfg_v2_input { #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RX_CK 0x14UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID __le16 instance; @@ -3731,24 +3785,29 @@ struct hwrm_func_backing_store_qcfg_v2_output { __le16 seq_id; __le16 resp_len; __le16 type; - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID __le16 instance; __le32 flags; __le64 page_dir; @@ -3832,28 +3891,33 @@ struct hwrm_func_backing_store_qcaps_v2_input { __le16 target_id; __le64 resp_addr; __le16 type; - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_TKC 0x13UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_RKC 0x14UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC 0x1aUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC 0x1bUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID u8 rsvd[6]; }; @@ -3864,28 +3928,33 @@ struct hwrm_func_backing_store_qcaps_v2_output { __le16 seq_id; __le16 resp_len; __le16 type; - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_TKC 0x13UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_RKC 0x14UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_TKC 0x1aUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_RKC 0x1bUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID __le16 entry_size; __le32 flags; #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL @@ -3990,6 +4059,7 @@ struct hwrm_func_drv_if_change_output { __le32 flags; #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL + #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE 0x4UL u8 unused_0[3]; u8 valid; }; @@ -4151,7 +4221,8 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL - #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 __le16 auto_link_speeds2_mask; #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL @@ -4166,6 +4237,7 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL u8 unused_2[6]; }; @@ -4241,6 +4313,7 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB u8 duplex_cfg; @@ -4400,7 +4473,13 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 u8 media_type; #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL @@ -4472,7 +4551,11 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24) - #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPDD (0x18UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP112 (0x1eUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFPDD (0x1fUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP (0x20UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP __le16 fec_cfg; #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL @@ -4994,33 +5077,43 @@ struct hwrm_port_qstats_ext_output { u8 valid; }; -/* hwrm_port_lpbk_qstats_input (size:128b/16B) */ +/* hwrm_port_lpbk_qstats_input (size:256b/32B) */ struct hwrm_port_lpbk_qstats_input { __le16 req_type; __le16 cmpl_ring; __le16 seq_id; __le16 target_id; __le64 resp_addr; + __le16 lpbk_stat_size; + u8 flags; + #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[5]; + __le64 lpbk_stat_host_addr; }; -/* hwrm_port_lpbk_qstats_output (size:768b/96B) */ +/* hwrm_port_lpbk_qstats_output (size:128b/16B) */ struct hwrm_port_lpbk_qstats_output { __le16 error_code; __le16 req_type; __le16 seq_id; __le16 resp_len; + __le16 lpbk_stat_size; + u8 unused_0[5]; + u8 valid; +}; + +/* port_lpbk_stats (size:640b/80B) */ +struct port_lpbk_stats { __le64 lpbk_ucast_frames; __le64 lpbk_mcast_frames; __le64 lpbk_bcast_frames; __le64 lpbk_ucast_bytes; __le64 lpbk_mcast_bytes; __le64 lpbk_bcast_bytes; - __le64 tx_stat_discard; - __le64 tx_stat_error; - __le64 rx_stat_discard; - __le64 rx_stat_error; - u8 unused_0[7]; - u8 valid; + __le64 lpbk_tx_discards; + __le64 lpbk_tx_errors; + __le64 lpbk_rx_discards; + __le64 lpbk_rx_errors; }; /* hwrm_port_ecn_qstats_input (size:256b/32B) */ @@ -5085,13 +5178,15 @@ struct hwrm_port_clr_stats_output { u8 valid; }; -/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */ +/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */ struct hwrm_port_lpbk_clr_stats_input { __le16 req_type; __le16 cmpl_ring; __le16 seq_id; __le16 target_id; __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; }; /* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */ @@ -5232,10 +5327,11 @@ struct hwrm_port_phy_qcaps_output { #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL __le16 flags2; - #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL - #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL - #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL - #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL u8 internal_port_cnt; u8 unused_0; __le16 supported_speeds2_force_mode; @@ -7380,7 +7476,7 @@ struct hwrm_cfa_l2_filter_free_output { u8 valid; }; -/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */ +/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */ struct hwrm_cfa_l2_filter_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -7388,23 +7484,33 @@ struct hwrm_cfa_l2_filter_cfg_input { __le16 target_id; __le64 resp_addr; __le32 flags; - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX - #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2 - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2) - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2) - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2) - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX + #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP __le32 enables; #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC 0x4UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID 0x8UL __le64 l2_filter_id; __le32 dst_id; __le32 new_mirror_vnic_id; + __le32 prof_func; + __le32 l2_context_id; }; /* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */ @@ -8455,18 +8561,26 @@ struct hwrm_tunnel_dst_port_query_input { __le16 target_id; __le64 resp_addr; u8 tunnel_type; - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 u8 tunnel_next_proto; u8 unused_0[6]; }; @@ -8503,18 +8617,26 @@ struct hwrm_tunnel_dst_port_alloc_input { __le16 target_id; __le64 resp_addr; u8 tunnel_type; - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 u8 tunnel_next_proto; __be16 tunnel_dst_port_val; u8 unused_0[4]; @@ -8554,18 +8676,26 @@ struct hwrm_tunnel_dst_port_free_input { __le16 target_id; __le64 resp_addr; u8 tunnel_type; - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 u8 tunnel_next_proto; __le16 tunnel_dst_port_id; u8 unused_0[4]; @@ -8860,7 +8990,7 @@ struct hwrm_stat_generic_qstats_output { u8 valid; }; -/* generic_sw_hw_stats (size:1408b/176B) */ +/* generic_sw_hw_stats (size:1472b/184B) */ struct generic_sw_hw_stats { __le64 pcie_statistics_tx_tlp; __le64 pcie_statistics_rx_tlp; @@ -8884,6 +9014,7 @@ struct generic_sw_hw_stats { __le64 hw_db_recov_dbs_dropped; __le64 hw_db_recov_drops_serviced; __le64 hw_db_recov_dbs_recovered; + __le64 hw_db_recov_oo_drop_count; }; /* hwrm_fw_reset_input (size:192b/24B) */ @@ -9019,6 +9150,7 @@ struct hwrm_struct_hdr { #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL + #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL @@ -9668,6 +9800,9 @@ struct hwrm_dbg_coredump_initiate_input { __le16 instance; __le16 unused_0; u8 seg_flags; + #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL + #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL + #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL u8 unused_1[7]; }; @@ -10343,13 +10478,13 @@ struct hwrm_selftest_irq_output { /* dbc_dbc (size:64b/8B) */ struct dbc_dbc { - u32 index; + __le32 index; #define DBC_DBC_INDEX_MASK 0xffffffUL #define DBC_DBC_INDEX_SFT 0 #define DBC_DBC_EPOCH 0x1000000UL #define DBC_DBC_TOGGLE_MASK 0x6000000UL #define DBC_DBC_TOGGLE_SFT 25 - u32 type_path_xid; + __le32 type_path_xid; #define DBC_DBC_XID_MASK 0xfffffUL #define DBC_DBC_XID_SFT 0 #define DBC_DBC_PATH_MASK 0x3000000UL diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index cc07660330..e661ab154d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -109,7 +109,8 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp) spin_unlock_bh(&ptp->ptp_lock); } -static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts) +static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts, + u32 txts_tmo) { struct hwrm_port_ts_query_output *resp; struct hwrm_port_ts_query_input *req; @@ -122,10 +123,15 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts) req->flags = cpu_to_le32(flags); if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) == PORT_TS_QUERY_REQ_FLAGS_PATH_TX) { + u32 tmo_us = txts_tmo * 1000; + req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES); req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid); req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off); - req->ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT); + if (!tmo_us) + tmo_us = BNXT_PTP_QTS_TIMEOUT; + tmo_us = min(tmo_us, BNXT_PTP_QTS_MAX_TMO_US); + req->ts_req_timeout = cpu_to_le16(tmo_us); } resp = hwrm_req_hold(bp, req); @@ -672,10 +678,17 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; struct skb_shared_hwtstamps timestamp; + unsigned long now = jiffies; u64 ts = 0, ns = 0; + u32 tmo = 0; int rc; - rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts); + if (!ptp->txts_pending) + ptp->abs_txts_tmo = now + msecs_to_jiffies(ptp->txts_tmo); + if (!time_after_eq(now, ptp->abs_txts_tmo)) + tmo = jiffies_to_msecs(ptp->abs_txts_tmo - now); + rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts, + tmo); if (!rc) { memset(×tamp, 0, sizeof(timestamp)); spin_lock_bh(&ptp->ptp_lock); @@ -684,6 +697,10 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb) timestamp.hwtstamp = ns_to_ktime(ns); skb_tstamp_tx(ptp->tx_skb, ×tamp); } else { + if (!time_after_eq(jiffies, ptp->abs_txts_tmo)) { + ptp->txts_pending = true; + return; + } netdev_warn_once(bp->dev, "TS query for TX timer failed rc = %x\n", rc); } @@ -691,6 +708,7 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb) dev_kfree_skb_any(ptp->tx_skb); ptp->tx_skb = NULL; atomic_inc(&ptp->tx_avail); + ptp->txts_pending = false; } static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) @@ -714,6 +732,8 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) spin_unlock_bh(&ptp->ptp_lock); ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD; } + if (ptp->txts_pending) + return 0; return HZ; } @@ -891,7 +911,8 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg) if (rc) return rc; } else { - rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, &ns); + rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, + &ns, 0); if (rc) return rc; } @@ -965,6 +986,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) spin_unlock_bh(&ptp->ptp_lock); ptp_schedule_worker(ptp->ptp_clock, 0); } + ptp->txts_tmo = BNXT_PTP_DFLT_TX_TMO; return 0; out: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index fce8dc39a7..2c3415c8fc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -22,7 +22,9 @@ #define BNXT_LO_TIMER_MASK 0x0000ffffffffUL #define BNXT_HI_TIMER_MASK 0xffff00000000UL +#define BNXT_PTP_DFLT_TX_TMO 1000 /* ms */ #define BNXT_PTP_QTS_TIMEOUT 1000 +#define BNXT_PTP_QTS_MAX_TMO_US 65535U #define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \ PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \ PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET) @@ -115,11 +117,14 @@ struct bnxt_ptp_cfg { BNXT_PTP_MSG_PDELAY_REQ | \ BNXT_PTP_MSG_PDELAY_RESP) u8 tx_tstamp_en:1; + u8 txts_pending:1; int rx_filter; u32 tstamp_filters; u32 refclk_regs[2]; u32 refclk_mapped_regs[2]; + u32 txts_tmo; + unsigned long abs_txts_tmo; }; #if BITS_PER_LONG == 32 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 273c9ba48f..d2ca90407c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -370,6 +370,7 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, struct bnxt_tc_flow *flow) { struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd); + struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; struct flow_dissector *dissector = rule->match.dissector; /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ @@ -380,6 +381,9 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, return -EOPNOTSUPP; } + if (flow_rule_match_has_control_flags(rule, extack)) + return -EOPNOTSUPP; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 195c02dc06..ba3fa1c2e5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -31,21 +31,74 @@ static DEFINE_IDA(bnxt_aux_dev_ids); static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent) { struct bnxt_en_dev *edev = bp->edev; - int num_msix, idx, i; + int num_msix, i; if (!edev->ulp_tbl->msix_requested) { netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n"); return; } num_msix = edev->ulp_tbl->msix_requested; - idx = edev->ulp_tbl->msix_base; for (i = 0; i < num_msix; i++) { - ent[i].vector = bp->irq_tbl[idx + i].vector; - ent[i].ring_idx = idx + i; + ent[i].vector = bp->irq_tbl[i].vector; + ent[i].ring_idx = i; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ent[i].db_offset = bp->db_offset; else - ent[i].db_offset = (idx + i) * 0x80; + ent[i].db_offset = i * 0x80; + } +} + +int bnxt_get_ulp_msix_num(struct bnxt *bp) +{ + if (bp->edev) + return bp->edev->ulp_num_msix_vec; + return 0; +} + +void bnxt_set_ulp_msix_num(struct bnxt *bp, int num) +{ + if (bp->edev) + bp->edev->ulp_num_msix_vec = num; +} + +int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp) +{ + if (bnxt_ulp_registered(bp->edev)) + return bp->edev->ulp_num_msix_vec; + return 0; +} + +int bnxt_get_ulp_stat_ctxs(struct bnxt *bp) +{ + if (bp->edev) + return bp->edev->ulp_num_ctxs; + return 0; +} + +void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ulp_ctx) +{ + if (bp->edev) + bp->edev->ulp_num_ctxs = num_ulp_ctx; +} + +int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp) +{ + if (bnxt_ulp_registered(bp->edev)) + return bp->edev->ulp_num_ctxs; + return 0; +} + +void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp) +{ + if (bp->edev) { + bp->edev->ulp_num_ctxs = BNXT_MIN_ROCE_STAT_CTXS; + /* Reserve one additional stat_ctx for PF0 (except + * on 1-port NICs) as it also creates one stat_ctx + * for PF1 in case of RoCE bonding. + */ + if (BNXT_PF(bp) && !bp->pf.port_id && + bp->port_count > 1) + bp->edev->ulp_num_ctxs++; } } @@ -57,25 +110,36 @@ int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt *bp = netdev_priv(dev); unsigned int max_stat_ctxs; struct bnxt_ulp *ulp; + int rc = 0; + rtnl_lock(); + mutex_lock(&edev->en_dev_lock); + if (!bp->irq_tbl) { + rc = -ENODEV; + goto exit; + } max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS || - bp->cp_nr_rings == max_stat_ctxs) - return -ENOMEM; + bp->cp_nr_rings == max_stat_ctxs) { + rc = -ENOMEM; + goto exit; + } ulp = edev->ulp_tbl; - if (!ulp) - return -ENOMEM; - ulp->handle = handle; rcu_assign_pointer(ulp->ulp_ops, ulp_ops); if (test_bit(BNXT_STATE_OPEN, &bp->state)) - bnxt_hwrm_vnic_cfg(bp, 0); + bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT]); + + edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp); bnxt_fill_msix_vecs(bp, bp->edev->msix_entries); edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; - return 0; +exit: + mutex_unlock(&edev->en_dev_lock); + rtnl_unlock(); + return rc; } EXPORT_SYMBOL(bnxt_register_dev); @@ -87,8 +151,11 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev) int i = 0; ulp = edev->ulp_tbl; + rtnl_lock(); + mutex_lock(&edev->en_dev_lock); if (ulp->msix_requested) edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; + edev->ulp_tbl->msix_requested = 0; if (ulp->max_async_event_id) bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true); @@ -101,11 +168,13 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev) msleep(100); i++; } + mutex_unlock(&edev->en_dev_lock); + rtnl_unlock(); return; } EXPORT_SYMBOL(bnxt_unregister_dev); -int bnxt_get_ulp_msix_num(struct bnxt *bp) +static int bnxt_set_dflt_ulp_msix(struct bnxt *bp) { u32 roce_msix = BNXT_VF(bp) ? BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX; @@ -114,29 +183,6 @@ int bnxt_get_ulp_msix_num(struct bnxt *bp) min_t(u32, roce_msix, num_online_cpus()) : 0); } -int bnxt_get_ulp_msix_base(struct bnxt *bp) -{ - if (bnxt_ulp_registered(bp->edev)) { - struct bnxt_en_dev *edev = bp->edev; - - if (edev->ulp_tbl->msix_requested) - return edev->ulp_tbl->msix_base; - } - return 0; -} - -int bnxt_get_ulp_stat_ctxs(struct bnxt *bp) -{ - if (bnxt_ulp_registered(bp->edev)) { - struct bnxt_en_dev *edev = bp->edev; - - if (edev->ulp_tbl->msix_requested) - return BNXT_MIN_ROCE_STAT_CTXS; - } - - return 0; -} - int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg) { @@ -181,6 +227,12 @@ void bnxt_ulp_stop(struct bnxt *bp) if (!edev) return; + mutex_lock(&edev->en_dev_lock); + if (!bnxt_ulp_registered(edev)) { + mutex_unlock(&edev->en_dev_lock); + return; + } + edev->flags |= BNXT_EN_FLAG_ULP_STOPPED; if (aux_priv) { struct auxiliary_device *adev; @@ -195,6 +247,7 @@ void bnxt_ulp_stop(struct bnxt *bp) adrv->suspend(adev, pm); } } + mutex_unlock(&edev->en_dev_lock); } void bnxt_ulp_start(struct bnxt *bp, int err) @@ -210,6 +263,12 @@ void bnxt_ulp_start(struct bnxt *bp, int err) if (err) return; + mutex_lock(&edev->en_dev_lock); + if (!bnxt_ulp_registered(edev)) { + mutex_unlock(&edev->en_dev_lock); + return; + } + if (edev->ulp_tbl->msix_requested) bnxt_fill_msix_vecs(bp, edev->msix_entries); @@ -225,7 +284,7 @@ void bnxt_ulp_start(struct bnxt *bp, int err) adrv->resume(adev); } } - + mutex_unlock(&edev->en_dev_lock); } void bnxt_ulp_irq_stop(struct bnxt *bp) @@ -309,7 +368,6 @@ void bnxt_rdma_aux_device_uninit(struct bnxt *bp) aux_priv = bp->aux_priv; adev = &aux_priv->aux_dev; - auxiliary_device_delete(adev); auxiliary_device_uninit(adev); } @@ -327,6 +385,14 @@ static void bnxt_aux_dev_release(struct device *dev) bp->aux_priv = NULL; } +void bnxt_rdma_aux_device_del(struct bnxt *bp) +{ + if (!bp->edev) + return; + + auxiliary_device_delete(&bp->aux_priv->aux_dev); +} + static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp) { edev->net = bp->dev; @@ -334,6 +400,7 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp) edev->l2_db_size = bp->db_size; edev->l2_db_size_nc = bp->db_size; edev->l2_db_offset = bp->db_offset; + mutex_init(&edev->en_dev_lock); if (bp->flags & BNXT_FLAG_ROCEV1_CAP) edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP; @@ -347,7 +414,23 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp) edev->pf_port_id = bp->pf.port_id; edev->en_state = bp->state; edev->bar0 = bp->bar0; - edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp); +} + +void bnxt_rdma_aux_device_add(struct bnxt *bp) +{ + struct auxiliary_device *aux_dev; + int rc; + + if (!bp->edev) + return; + + aux_dev = &bp->aux_priv->aux_dev; + rc = auxiliary_device_add(aux_dev); + if (rc) { + netdev_warn(bp->dev, "Failed to add auxiliary device for ROCE\n"); + auxiliary_device_uninit(aux_dev); + bp->flags &= ~BNXT_FLAG_ROCE_CAP; + } } void bnxt_rdma_aux_device_init(struct bnxt *bp) @@ -404,13 +487,7 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp) edev->ulp_tbl = ulp; bp->edev = edev; bnxt_set_edev_info(edev, bp); - - rc = auxiliary_device_add(aux_dev); - if (rc) { - netdev_warn(bp->dev, - "Failed to add auxiliary device for ROCE\n"); - goto aux_dev_uninit; - } + bp->ulp_num_msix_want = bnxt_set_dflt_ulp_msix(bp); return; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index b9e73de14b..4eafe6ec0a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -46,7 +46,6 @@ struct bnxt_ulp { unsigned long *async_events_bmap; u16 max_async_event_id; u16 msix_requested; - u16 msix_base; atomic_t ref_count; }; @@ -86,18 +85,28 @@ struct bnxt_en_dev { * updated in resume. */ void __iomem *bar0; + + u16 ulp_num_msix_vec; + u16 ulp_num_ctxs; + + /* serialize ulp operations */ + struct mutex en_dev_lock; }; static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev) { - if (edev && edev->ulp_tbl) + if (edev && rcu_access_pointer(edev->ulp_tbl->ulp_ops)) return true; return false; } int bnxt_get_ulp_msix_num(struct bnxt *bp); -int bnxt_get_ulp_msix_base(struct bnxt *bp); +int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp); +void bnxt_set_ulp_msix_num(struct bnxt *bp, int num); int bnxt_get_ulp_stat_ctxs(struct bnxt *bp); +void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ctxs); +int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp); +void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp); void bnxt_ulp_stop(struct bnxt *bp); void bnxt_ulp_start(struct bnxt *bp, int err); void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); @@ -105,6 +114,8 @@ void bnxt_ulp_irq_stop(struct bnxt *bp); void bnxt_ulp_irq_restart(struct bnxt *bp, int err); void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl); void bnxt_rdma_aux_device_uninit(struct bnxt *bp); +void bnxt_rdma_aux_device_del(struct bnxt *bp); +void bnxt_rdma_aux_device_add(struct bnxt *bp); void bnxt_rdma_aux_device_init(struct bnxt *bp); int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops, void *handle); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 4079538bc3..345681d500 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -197,7 +197,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir); xdp_init_buff(xdp, buflen, &rxr->xdp_rxq); - xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false); + xdp_prepare_buff(xdp, data_ptr - offset, offset, len, true); } void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, @@ -222,7 +222,7 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, * false - packet should be passed to the stack. */ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, - struct xdp_buff xdp, struct page *page, u8 **data_ptr, + struct xdp_buff *xdp, struct page *page, u8 **data_ptr, unsigned int *len, u8 *event) { struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); @@ -244,9 +244,9 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, txr = rxr->bnapi->tx_ring[0]; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */ - orig_data = xdp.data; + orig_data = xdp->data; - act = bpf_prog_run_xdp(xdp_prog, &xdp); + act = bpf_prog_run_xdp(xdp_prog, xdp); tx_avail = bnxt_tx_avail(bp, txr); /* If the tx ring is not full, we must not update the rx producer yet @@ -255,10 +255,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, if (tx_avail != bp->tx_ring_size) *event &= ~BNXT_RX_EVENT; - *len = xdp.data_end - xdp.data; - if (orig_data != xdp.data) { - offset = xdp.data - xdp.data_hard_start; - *data_ptr = xdp.data_hard_start + offset; + *len = xdp->data_end - xdp->data; + if (orig_data != xdp->data) { + offset = xdp->data - xdp->data_hard_start; + *data_ptr = xdp->data_hard_start + offset; } switch (act) { @@ -270,8 +270,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, mapping = rx_buf->mapping - bp->rx_dma_offset; *event &= BNXT_TX_CMP_EVENT; - if (unlikely(xdp_buff_has_frags(&xdp))) { - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp); + if (unlikely(xdp_buff_has_frags(xdp))) { + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); tx_needed += sinfo->nr_frags; *event = BNXT_AGG_EVENT; @@ -279,7 +279,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, if (tx_avail < tx_needed) { trace_xdp_exception(bp->dev, xdp_prog, act); - bnxt_xdp_buff_frags_free(rxr, &xdp); + bnxt_xdp_buff_frags_free(rxr, xdp); bnxt_reuse_rx_data(rxr, cons, page); return true; } @@ -289,7 +289,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, *event |= BNXT_TX_EVENT; __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, - NEXT_RX(rxr->rx_prod), &xdp); + NEXT_RX(rxr->rx_prod), xdp); bnxt_reuse_rx_data(rxr, cons, page); return true; case XDP_REDIRECT: @@ -306,12 +306,12 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, /* if we are unable to allocate a new buffer, abort and reuse */ if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { trace_xdp_exception(bp->dev, xdp_prog, act); - bnxt_xdp_buff_frags_free(rxr, &xdp); + bnxt_xdp_buff_frags_free(rxr, xdp); bnxt_reuse_rx_data(rxr, cons, page); return true; } - if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) { + if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) { trace_xdp_exception(bp->dev, xdp_prog, act); page_pool_recycle_direct(rxr->page_pool, page); return true; @@ -326,7 +326,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, trace_xdp_exception(bp->dev, xdp_prog, act); fallthrough; case XDP_DROP: - bnxt_xdp_buff_frags_free(rxr, &xdp); + bnxt_xdp_buff_frags_free(rxr, xdp); bnxt_reuse_rx_data(rxr, cons, page); break; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index 5e412c5655..0122782400 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -18,7 +18,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct xdp_buff *xdp); void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget); bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, - struct xdp_buff xdp, struct page *page, u8 **data_ptr, + struct xdp_buff *xdp, struct page *page, u8 **data_ptr, unsigned int *len, u8 *event); int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp); int bnxt_xdp_xmit(struct net_device *dev, int num_frames, diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 3d63177e7e..c2b4188a1e 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -3682,7 +3682,8 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr, #if defined(CONFIG_INET) struct rtable *rt; - rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0); + rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0, + RT_SCOPE_UNIVERSE); if (!IS_ERR(rt)) { *dst = &rt->dst; return 0; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 62ff4381ac..1589a49b87 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -4019,7 +4019,7 @@ static int tg3_power_up(struct tg3 *tp) static int tg3_setup_phy(struct tg3 *, bool); -static int tg3_power_down_prepare(struct tg3 *tp) +static void tg3_power_down_prepare(struct tg3 *tp) { u32 misc_host_ctrl; bool device_should_wake, do_low_power; @@ -4263,7 +4263,7 @@ static int tg3_power_down_prepare(struct tg3 *tp) tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); - return 0; + return; } static void tg3_power_down(struct tg3 *tp) @@ -14295,7 +14295,7 @@ static void tg3_set_rx_mode(struct net_device *dev) static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, int new_mtu) { - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); if (new_mtu > ETH_DATA_LEN) { if (tg3_flag(tp, 5780_CLASS)) { @@ -18084,7 +18084,6 @@ static int tg3_suspend(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct tg3 *tp = netdev_priv(dev); - int err = 0; rtnl_lock(); @@ -18108,32 +18107,11 @@ static int tg3_suspend(struct device *device) tg3_flag_clear(tp, INIT_COMPLETE); tg3_full_unlock(tp); - err = tg3_power_down_prepare(tp); - if (err) { - int err2; - - tg3_full_lock(tp, 0); - - tg3_flag_set(tp, INIT_COMPLETE); - err2 = tg3_restart_hw(tp, true); - if (err2) - goto out; - - tg3_timer_start(tp); - - netif_device_attach(dev); - tg3_netif_start(tp); - -out: - tg3_full_unlock(tp); - - if (!err2) - tg3_phy_start(tp); - } + tg3_power_down_prepare(tp); unlock: rtnl_unlock(); - return err; + return 0; } static int tg3_resume(struct device *device) |