diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
commit | 01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch) | |
tree | b406c5242a088c4f59c6e4b719b783f43aca6ae9 /drivers/net/ethernet/marvell/octeontx2/af | |
parent | Adding upstream version 6.7.12. (diff) | |
download | linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip |
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af')
15 files changed, 1123 insertions, 126 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 3c0f55b3e4..b86f3224f0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -808,6 +808,11 @@ static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id, if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); + cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; + cfg |= rx_pause ? CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK : 0x0; + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c index 7d741e3ba8..1e5aa53975 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -452,4 +452,5 @@ const char *otx2_mbox_id2name(u16 id) EXPORT_SYMBOL(otx2_mbox_id2name); MODULE_AUTHOR("Marvell."); +MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index bd4b9661ee..98e203a0e2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -310,6 +310,13 @@ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \ nix_bandprof_get_hwinfo_rsp) \ M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \ msg_req, nix_inline_ipsec_cfg) \ +M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \ + nix_mcast_grp_create_rsp) \ +M(NIX_MCAST_GRP_DESTROY, 0x802c, nix_mcast_grp_destroy, nix_mcast_grp_destroy_req, \ + msg_rsp) \ +M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \ + nix_mcast_grp_update_req, \ + nix_mcast_grp_update_rsp) \ /* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \ M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \ mcs_alloc_rsrc_rsp) \ @@ -836,6 +843,9 @@ enum nix_af_status { NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429, NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430, NIX_AF_ERR_LINK_CREDITS = -431, + NIX_AF_ERR_INVALID_MCAST_GRP = -436, + NIX_AF_ERR_INVALID_MCAST_DEL_REQ = -437, + NIX_AF_ERR_NON_CONTIG_MCE_LIST = -438, }; /* For NIX RX vtag action */ @@ -1210,6 +1220,68 @@ struct nix_bp_cfg_rsp { u8 chan_cnt; /* Number of channel for which bpids are assigned */ }; +struct nix_mcast_grp_create_req { + struct mbox_msghdr hdr; +#define NIX_MCAST_INGRESS 0 +#define NIX_MCAST_EGRESS 1 + u8 dir; + u8 reserved[11]; + /* Reserving few bytes for future requirement */ +}; + +struct nix_mcast_grp_create_rsp { + struct mbox_msghdr hdr; + /* This mcast_grp_idx should be passed during MCAM + * write entry for multicast. AF will identify the + * corresponding multicast table index associated + * with the group id and program the same to MCAM entry. + * This group id is also needed during group delete + * and update request. + */ + u32 mcast_grp_idx; +}; + +struct nix_mcast_grp_destroy_req { + struct mbox_msghdr hdr; + /* Group id returned by nix_mcast_grp_create_rsp */ + u32 mcast_grp_idx; + /* If AF is requesting for destroy, then set + * it to '1'. Otherwise keep it to '0' + */ + u8 is_af; +}; + +struct nix_mcast_grp_update_req { + struct mbox_msghdr hdr; + /* Group id returned by nix_mcast_grp_create_rsp */ + u32 mcast_grp_idx; + /* Number of multicast/mirror entries requested */ + u32 num_mce_entry; +#define NIX_MCE_ENTRY_MAX 64 +#define NIX_RX_RQ 0 +#define NIX_RX_RSS 1 + /* Receive queue or RSS index within pf_func */ + u32 rq_rss_index[NIX_MCE_ENTRY_MAX]; + /* pcifunc is required for both ingress and egress multicast */ + u16 pcifunc[NIX_MCE_ENTRY_MAX]; + /* channel is required for egress multicast */ + u16 channel[NIX_MCE_ENTRY_MAX]; +#define NIX_MCAST_OP_ADD_ENTRY 0 +#define NIX_MCAST_OP_DEL_ENTRY 1 + /* Destination type. 0:Receive queue, 1:RSS*/ + u8 dest_type[NIX_MCE_ENTRY_MAX]; + u8 op; + /* If AF is requesting for update, then set + * it to '1'. Otherwise keep it to '0' + */ + u8 is_af; +}; + +struct nix_mcast_grp_update_rsp { + struct mbox_msghdr hdr; + u32 mce_start_index; +}; + /* Global NIX inline IPSec configuration */ struct nix_inline_ipsec_cfg { struct mbox_msghdr hdr; @@ -1485,6 +1557,8 @@ struct flow_msg { #define OTX2_FLOWER_MASK_MPLS_TTL GENMASK(7, 0) #define OTX2_FLOWER_MASK_MPLS_NON_TTL GENMASK(31, 8) u32 mpls_lse[4]; + u8 icmp_type; + u8 icmp_code; }; struct npc_install_flow_req { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 8c0732c9a7..b0b4dea548 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -214,6 +214,8 @@ enum key_fields { NPC_MPLS3_TTL, NPC_MPLS4_LBTCBOS, NPC_MPLS4_TTL, + NPC_TYPE_ICMP, + NPC_CODE_ICMP, NPC_HEADER_FIELDS_MAX, NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */ NPC_PF_FUNC, /* Valid when Tx */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 32645aefd5..6a911ea0cf 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -156,7 +156,7 @@ int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc) return start; } -static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start) +void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start) { if (!rsrc->bmap) return; @@ -935,6 +935,9 @@ static int rvu_setup_hw_resources(struct rvu *rvu) hw->total_vfs = (cfg >> 20) & 0xFFF; hw->max_vfs_per_pf = (cfg >> 40) & 0xFF; + if (!is_rvu_otx2(rvu)) + rvu_apr_block_cn10k_init(rvu); + /* Init NPA LF's bitmap */ block = &hw->block[BLKADDR_NPA]; if (!block->implemented) @@ -2633,6 +2636,10 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) * 2. Flush and reset SSO/SSOW * 3. Cleanup pools (NPA) */ + + /* Free multicast/mirror node associated with the 'pcifunc' */ + rvu_nix_mcast_flr_free_entries(rvu, pcifunc); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 185c296eaa..d44a400e1b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -116,11 +116,12 @@ struct rvu_block { }; struct nix_mcast { - struct qmem *mce_ctx; - struct qmem *mcast_buf; - int replay_pkind; - int next_free_mce; - struct mutex mce_lock; /* Serialize MCE updates */ + struct qmem *mce_ctx; + struct qmem *mcast_buf; + int replay_pkind; + struct rsrc_bmap mce_counter[2]; + /* Counters for both ingress and egress mcast lists */ + struct mutex mce_lock; /* Serialize MCE updates */ }; struct nix_mce_list { @@ -129,6 +130,23 @@ struct nix_mce_list { int max; }; +struct nix_mcast_grp_elem { + struct nix_mce_list mcast_mce_list; + u32 mcast_grp_idx; + u32 pcifunc; + int mcam_index; + int mce_start_index; + struct list_head list; + u8 dir; +}; + +struct nix_mcast_grp { + struct list_head mcast_grp_head; + int count; + int next_grp_index; + struct mutex mcast_grp_lock; /* Serialize MCE updates */ +}; + /* layer metadata to uniquely identify a packet header field */ struct npc_layer_mdata { u8 lid; @@ -339,6 +357,7 @@ struct nix_hw { struct rvu *rvu; struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */ struct nix_mcast mcast; + struct nix_mcast_grp mcast_grp; struct nix_flowkey flowkey; struct nix_mark_format mark_format; struct nix_lso lso; @@ -744,6 +763,7 @@ void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id); bool is_rsrc_free(struct rsrc_bmap *rsrc, int id); int rvu_rsrc_free_count(struct rsrc_bmap *rsrc); int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc); +void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start); bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc); u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr); int rvu_get_pf(u16 pcifunc); @@ -850,6 +870,11 @@ u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu); u32 convert_bytes_to_dwrr_mtu(u32 bytes); void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, struct nix_txsch *txsch, bool enable); +void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc); +int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, + u32 mcast_grp_idx); +int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc, + u32 mcast_grp_idx, u16 mcam_index); /* NPC APIs */ void rvu_npc_freemem(struct rvu *rvu); @@ -898,6 +923,10 @@ void npc_mcam_enable_flows(struct rvu *rvu, u16 target); void npc_mcam_disable_flows(struct rvu *rvu, u16 target); void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, int index, bool enable); +u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index); +void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index, u64 cfg); void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 src, struct mcam_entry *entry, u8 *intf, u8 *ena); @@ -923,6 +952,8 @@ int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx, u64 bcast_mcast_val, u64 bcast_mcast_mask); void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx); bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf); +int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr); +void npc_mcam_rsrcs_deinit(struct rvu *rvu); /* CPT APIs */ int rvu_cpt_register_interrupts(struct rvu *rvu); @@ -944,6 +975,7 @@ void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw); /* CN10K RVU - LMT*/ void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc); +void rvu_apr_block_cn10k_init(struct rvu *rvu); #ifdef CONFIG_DEBUG_FS void rvu_dbg_init(struct rvu *rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 72e060cf6b..e9bf9231b0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -160,6 +160,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) continue; lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { + if (iter >= MAX_LMAC_COUNT) + continue; lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), iter); rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 0e74c5a223..7fa98aeb36 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -559,3 +559,12 @@ void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw) cfg |= BIT_ULL(1) | BIT_ULL(2); rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg); } + +void rvu_apr_block_cn10k_init(struct rvu *rvu) +{ + u64 reg; + + reg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + reg |= FIELD_PREP(LMTST_THROTTLE_MASK, LMTST_WR_PEND_MAX); + rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CFG, reg); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index bd817ee887..e6d7914ce6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -1825,6 +1825,8 @@ static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) { struct nix_cq_ctx_s *cq_ctx = &rsp->cq; + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base); @@ -1836,6 +1838,16 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n", cq_ctx->bpid, cq_ctx->bp_ena); + if (!is_rvu_otx2(rvu)) { + seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high); + seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med); + seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low); + seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n", + cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 | + cq_ctx->lbpid_low); + seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena); + } + seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n", cq_ctx->update_time, cq_ctx->avg_level); seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n", @@ -1847,6 +1859,11 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) cq_ctx->qsize, cq_ctx->caching); seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n", cq_ctx->substream, cq_ctx->ena); + if (!is_rvu_otx2(rvu)) { + seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac); + seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n", + cq_ctx->cpt_drop_err_en); + } seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n", cq_ctx->drop_ena, cq_ctx->drop); seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp); @@ -2889,6 +2906,14 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3], rule->mask.mpls_lse[3]); break; + case NPC_TYPE_ICMP: + seq_printf(s, "%d ", rule->packet.icmp_type); + seq_printf(s, "mask 0x%x\n", rule->mask.icmp_type); + break; + case NPC_CODE_ICMP: + seq_printf(s, "%d ", rule->packet.icmp_code); + seq_printf(s, "mask 0x%x\n", rule->mask.icmp_code); + break; default: seq_puts(s, "\n"); break; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 21b5d71c1e..96c04f7d93 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -5,7 +5,7 @@ * */ -#include<linux/bitfield.h> +#include <linux/bitfield.h> #include "rvu.h" #include "rvu_reg.h" @@ -1235,8 +1235,9 @@ static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id, enum rvu_af_dl_param_id { RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU, - RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE, RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT, + RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE, + RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF, }; static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id, @@ -1354,12 +1355,97 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink return 0; } +static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + + ctx->val.vu16 = (u16)rvu_get_nixlf_count(rvu); + + return 0; +} + +static int rvu_af_dl_nix_maxlf_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + struct rvu_block *block; + int blkaddr = 0; + + npc_mcam_rsrcs_deinit(rvu); + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + while (blkaddr) { + block = &rvu->hw->block[blkaddr]; + block->lf.max = ctx->val.vu16; + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + } + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + npc_mcam_rsrcs_init(rvu, blkaddr); + + return 0; +} + +static int rvu_af_dl_nix_maxlf_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + u16 max_nix0_lf, max_nix1_lf; + struct npc_mcam *mcam; + u64 cfg; + + cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2); + max_nix0_lf = cfg & 0xFFF; + cfg = rvu_read64(rvu, BLKADDR_NIX1, NIX_AF_CONST2); + max_nix1_lf = cfg & 0xFFF; + + /* Do not allow user to modify maximum NIX LFs while mcam entries + * have already been assigned. + */ + mcam = &rvu->hw->mcam; + if (mcam->bmap_fcnt < mcam->bmap_entries) { + NL_SET_ERR_MSG_MOD(extack, + "mcam entries have already been assigned, can't resize"); + return -EPERM; + } + + if (max_nix0_lf && val.vu16 > max_nix0_lf) { + NL_SET_ERR_MSG_MOD(extack, + "requested nixlf is greater than the max supported nix0_lf"); + return -EPERM; + } + + if (max_nix1_lf && val.vu16 > max_nix1_lf) { + NL_SET_ERR_MSG_MOD(extack, + "requested nixlf is greater than the max supported nix1_lf"); + return -EINVAL; + } + + return 0; +} + static const struct devlink_param rvu_af_dl_params[] = { DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU, "dwrr_mtu", DEVLINK_PARAM_TYPE_U32, BIT(DEVLINK_PARAM_CMODE_RUNTIME), rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set, rvu_af_dl_dwrr_mtu_validate), + DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT, + "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + rvu_af_dl_npc_mcam_high_zone_percent_get, + rvu_af_dl_npc_mcam_high_zone_percent_set, + rvu_af_dl_npc_mcam_high_zone_percent_validate), + DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF, + "nix_maxlf", DEVLINK_PARAM_TYPE_U16, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + rvu_af_dl_nix_maxlf_get, + rvu_af_dl_nix_maxlf_set, + rvu_af_dl_nix_maxlf_validate), }; static const struct devlink_param rvu_af_dl_param_exact_match[] = { @@ -1369,12 +1455,6 @@ static const struct devlink_param rvu_af_dl_param_exact_match[] = { rvu_af_npc_exact_feature_get, rvu_af_npc_exact_feature_disable, rvu_af_npc_exact_feature_validate), - DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT, - "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8, - BIT(DEVLINK_PARAM_CMODE_RUNTIME), - rvu_af_dl_npc_mcam_high_zone_percent_get, - rvu_af_dl_npc_mcam_high_zone_percent_set, - rvu_af_dl_npc_mcam_high_zone_percent_validate), }; /* Devlink switch mode */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 58744313f0..42db213fb6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -72,12 +72,19 @@ enum nix_makr_fmt_indexes { /* For now considering MC resources needed for broadcast * pkt replication only. i.e 256 HWVFs + 12 PFs. */ -#define MC_TBL_SIZE MC_TBL_SZ_512 -#define MC_BUF_CNT MC_BUF_CNT_128 +#define MC_TBL_SIZE MC_TBL_SZ_2K +#define MC_BUF_CNT MC_BUF_CNT_1024 + +#define MC_TX_MAX 2048 struct mce { struct hlist_node node; + u32 rq_rss_index; u16 pcifunc; + u16 channel; + u8 dest_type; + u8 is_active; + u8 reserved[2]; }; int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) @@ -165,18 +172,33 @@ static void nix_mce_list_init(struct nix_mce_list *list, int max) list->max = max; } -static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) +static int nix_alloc_mce_list(struct nix_mcast *mcast, int count, u8 dir) { + struct rsrc_bmap *mce_counter; int idx; if (!mcast) - return 0; + return -EINVAL; - idx = mcast->next_free_mce; - mcast->next_free_mce += count; + mce_counter = &mcast->mce_counter[dir]; + if (!rvu_rsrc_check_contig(mce_counter, count)) + return -ENOSPC; + + idx = rvu_alloc_rsrc_contig(mce_counter, count); return idx; } +static void nix_free_mce_list(struct nix_mcast *mcast, int count, int start, u8 dir) +{ + struct rsrc_bmap *mce_counter; + + if (!mcast) + return; + + mce_counter = &mcast->mce_counter[dir]; + rvu_free_rsrc_contig(mce_counter, count, start); +} + struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) { int nix_blkaddr = 0, i = 0; @@ -2956,7 +2978,8 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, } static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, - int mce, u8 op, u16 pcifunc, int next, bool eol) + int mce, u8 op, u16 pcifunc, int next, + int index, u8 mce_op, bool eol) { struct nix_aq_enq_req aq_req; int err; @@ -2967,8 +2990,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, aq_req.qidx = mce; /* Use RSS with RSS index 0 */ - aq_req.mce.op = 1; - aq_req.mce.index = 0; + aq_req.mce.op = mce_op; + aq_req.mce.index = index; aq_req.mce.eol = eol; aq_req.mce.pf_func = pcifunc; aq_req.mce.next = next; @@ -2985,6 +3008,206 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, return 0; } +static void nix_delete_mcast_mce_list(struct nix_mce_list *mce_list) +{ + struct hlist_node *tmp; + struct mce *mce; + + /* Scan through the current list */ + hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { + hlist_del(&mce->node); + kfree(mce); + } + + mce_list->count = 0; + mce_list->max = 0; +} + +static int nix_get_last_mce_list_index(struct nix_mcast_grp_elem *elem) +{ + return elem->mce_start_index + elem->mcast_mce_list.count - 1; +} + +static int nix_update_ingress_mce_list_hw(struct rvu *rvu, + struct nix_hw *nix_hw, + struct nix_mcast_grp_elem *elem) +{ + int idx, last_idx, next_idx, err; + struct nix_mce_list *mce_list; + struct mce *mce, *prev_mce; + + mce_list = &elem->mcast_mce_list; + idx = elem->mce_start_index; + last_idx = nix_get_last_mce_list_index(elem); + hlist_for_each_entry(mce, &mce_list->head, node) { + if (idx > last_idx) + break; + + if (!mce->is_active) { + if (idx == elem->mce_start_index) { + idx++; + prev_mce = mce; + elem->mce_start_index = idx; + continue; + } else if (idx == last_idx) { + err = nix_blk_setup_mce(rvu, nix_hw, idx - 1, NIX_AQ_INSTOP_WRITE, + prev_mce->pcifunc, next_idx, + prev_mce->rq_rss_index, + prev_mce->dest_type, + false); + if (err) + return err; + + break; + } + } + + next_idx = idx + 1; + /* EOL should be set in last MCE */ + err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, + mce->pcifunc, next_idx, + mce->rq_rss_index, mce->dest_type, + (next_idx > last_idx) ? true : false); + if (err) + return err; + + idx++; + prev_mce = mce; + } + + return 0; +} + +static void nix_update_egress_mce_list_hw(struct rvu *rvu, + struct nix_hw *nix_hw, + struct nix_mcast_grp_elem *elem) +{ + struct nix_mce_list *mce_list; + int idx, last_idx, next_idx; + struct mce *mce, *prev_mce; + u64 regval; + u8 eol; + + mce_list = &elem->mcast_mce_list; + idx = elem->mce_start_index; + last_idx = nix_get_last_mce_list_index(elem); + hlist_for_each_entry(mce, &mce_list->head, node) { + if (idx > last_idx) + break; + + if (!mce->is_active) { + if (idx == elem->mce_start_index) { + idx++; + prev_mce = mce; + elem->mce_start_index = idx; + continue; + } else if (idx == last_idx) { + regval = (next_idx << 16) | (1 << 12) | prev_mce->channel; + rvu_write64(rvu, nix_hw->blkaddr, + NIX_AF_TX_MCASTX(idx - 1), + regval); + break; + } + } + + eol = 0; + next_idx = idx + 1; + /* EOL should be set in last MCE */ + if (next_idx > last_idx) + eol = 1; + + regval = (next_idx << 16) | (eol << 12) | mce->channel; + rvu_write64(rvu, nix_hw->blkaddr, + NIX_AF_TX_MCASTX(idx), + regval); + idx++; + prev_mce = mce; + } +} + +static int nix_del_mce_list_entry(struct rvu *rvu, + struct nix_hw *nix_hw, + struct nix_mcast_grp_elem *elem, + struct nix_mcast_grp_update_req *req) +{ + u32 num_entry = req->num_mce_entry; + struct nix_mce_list *mce_list; + struct mce *mce; + bool is_found; + int i; + + mce_list = &elem->mcast_mce_list; + for (i = 0; i < num_entry; i++) { + is_found = false; + hlist_for_each_entry(mce, &mce_list->head, node) { + /* If already exists, then delete */ + if (mce->pcifunc == req->pcifunc[i]) { + hlist_del(&mce->node); + kfree(mce); + mce_list->count--; + is_found = true; + break; + } + } + + if (!is_found) + return NIX_AF_ERR_INVALID_MCAST_DEL_REQ; + } + + mce_list->max = mce_list->count; + /* Dump the updated list to HW */ + if (elem->dir == NIX_MCAST_INGRESS) + return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); + + nix_update_egress_mce_list_hw(rvu, nix_hw, elem); + return 0; +} + +static int nix_add_mce_list_entry(struct rvu *rvu, + struct nix_hw *nix_hw, + struct nix_mcast_grp_elem *elem, + struct nix_mcast_grp_update_req *req) +{ + u32 num_entry = req->num_mce_entry; + struct nix_mce_list *mce_list; + struct hlist_node *tmp; + struct mce *mce; + int i; + + mce_list = &elem->mcast_mce_list; + for (i = 0; i < num_entry; i++) { + mce = kzalloc(sizeof(*mce), GFP_KERNEL); + if (!mce) + goto free_mce; + + mce->pcifunc = req->pcifunc[i]; + mce->channel = req->channel[i]; + mce->rq_rss_index = req->rq_rss_index[i]; + mce->dest_type = req->dest_type[i]; + mce->is_active = 1; + hlist_add_head(&mce->node, &mce_list->head); + mce_list->count++; + } + + mce_list->max += num_entry; + + /* Dump the updated list to HW */ + if (elem->dir == NIX_MCAST_INGRESS) + return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); + + nix_update_egress_mce_list_hw(rvu, nix_hw, elem); + return 0; + +free_mce: + hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { + hlist_del(&mce->node); + kfree(mce); + mce_list->count--; + } + + return -ENOMEM; +} + static int nix_update_mce_list_entry(struct nix_mce_list *mce_list, u16 pcifunc, bool add) { @@ -3080,6 +3303,7 @@ int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, /* EOL should be set in last MCE */ err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, mce->pcifunc, next_idx, + 0, 1, (next_idx > last_idx) ? true : false); if (err) goto end; @@ -3160,6 +3384,16 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, return err; } +static void nix_setup_mcast_grp(struct nix_hw *nix_hw) +{ + struct nix_mcast_grp *mcast_grp = &nix_hw->mcast_grp; + + INIT_LIST_HEAD(&mcast_grp->mcast_grp_head); + mutex_init(&mcast_grp->mcast_grp_lock); + mcast_grp->next_grp_index = 1; + mcast_grp->count = 0; +} + static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) { struct nix_mcast *mcast = &nix_hw->mcast; @@ -3184,15 +3418,15 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) continue; /* save start idx of broadcast mce list */ - pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); + pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS); nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); /* save start idx of multicast mce list */ - pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); + pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS); nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1); /* save the start idx of promisc mce list */ - pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); + pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS); nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1); for (idx = 0; idx < (numvfs + 1); idx++) { @@ -3207,7 +3441,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) err = nix_blk_setup_mce(rvu, nix_hw, pfvf->bcast_mce_idx + idx, NIX_AQ_INSTOP_INIT, - pcifunc, 0, true); + pcifunc, 0, 0, 1, true); if (err) return err; @@ -3215,7 +3449,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) err = nix_blk_setup_mce(rvu, nix_hw, pfvf->mcast_mce_idx + idx, NIX_AQ_INSTOP_INIT, - pcifunc, 0, true); + pcifunc, 0, 0, 1, true); if (err) return err; @@ -3223,7 +3457,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) err = nix_blk_setup_mce(rvu, nix_hw, pfvf->promisc_mce_idx + idx, NIX_AQ_INSTOP_INIT, - pcifunc, 0, true); + pcifunc, 0, 0, 1, true); if (err) return err; } @@ -3238,13 +3472,30 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) int err, size; size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; - size = (1ULL << size); + size = BIT_ULL(size); + + /* Allocate bitmap for rx mce entries */ + mcast->mce_counter[NIX_MCAST_INGRESS].max = 256UL << MC_TBL_SIZE; + err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); + if (err) + return -ENOMEM; + + /* Allocate bitmap for tx mce entries */ + mcast->mce_counter[NIX_MCAST_EGRESS].max = MC_TX_MAX; + err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]); + if (err) { + rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); + return -ENOMEM; + } /* Alloc memory for multicast/mirror replication entries */ err = qmem_alloc(rvu->dev, &mcast->mce_ctx, - (256UL << MC_TBL_SIZE), size); - if (err) + mcast->mce_counter[NIX_MCAST_INGRESS].max, size); + if (err) { + rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); + rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]); return -ENOMEM; + } rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, (u64)mcast->mce_ctx->iova); @@ -3257,8 +3508,11 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; err = qmem_alloc(rvu->dev, &mcast->mcast_buf, (8UL << MC_BUF_CNT), size); - if (err) + if (err) { + rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); + rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]); return -ENOMEM; + } rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, (u64)mcast->mcast_buf->iova); @@ -3272,6 +3526,8 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) mutex_init(&mcast->mce_lock); + nix_setup_mcast_grp(nix_hw); + return nix_setup_mce_tables(rvu, nix_hw); } @@ -4465,18 +4721,18 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) */ rvu_write64(rvu, blkaddr, NIX_AF_CFG, rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); + } - /* Set chan/link to backpressure TL3 instead of TL2 */ - rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); + /* Set chan/link to backpressure TL3 instead of TL2 */ + rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); - /* Disable SQ manager's sticky mode operation (set TM6 = 0) - * This sticky mode is known to cause SQ stalls when multiple - * SQs are mapped to same SMQ and transmitting pkts at a time. - */ - cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); - cfg &= ~BIT_ULL(15); - rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); - } + /* Disable SQ manager's sticky mode operation (set TM6 = 0) + * This sticky mode is known to cause SQ stalls when multiple + * SQs are mapped to same SMQ and transmitting pkts at a time. + */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); + cfg &= ~BIT_ULL(15); + rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); ltdefs = rvu->kpu.lt_def; /* Calibrate X2P bus to check if CGX/LBK links are fine */ @@ -4698,6 +4954,74 @@ void rvu_nix_freemem(struct rvu *rvu) } } +static void nix_mcast_update_action(struct rvu *rvu, + struct nix_mcast_grp_elem *elem) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct nix_rx_action rx_action = { 0 }; + struct nix_tx_action tx_action = { 0 }; + int npc_blkaddr; + + npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (elem->dir == NIX_MCAST_INGRESS) { + *(u64 *)&rx_action = npc_get_mcam_action(rvu, mcam, + npc_blkaddr, + elem->mcam_index); + rx_action.index = elem->mce_start_index; + npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index, + *(u64 *)&rx_action); + } else { + *(u64 *)&tx_action = npc_get_mcam_action(rvu, mcam, + npc_blkaddr, + elem->mcam_index); + tx_action.index = elem->mce_start_index; + npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index, + *(u64 *)&tx_action); + } +} + +static void nix_mcast_update_mce_entry(struct rvu *rvu, u16 pcifunc, u8 is_active) +{ + struct nix_mcast_grp_elem *elem; + struct nix_mcast_grp *mcast_grp; + struct nix_hw *nix_hw; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return; + + mcast_grp = &nix_hw->mcast_grp; + + mutex_lock(&mcast_grp->mcast_grp_lock); + list_for_each_entry(elem, &mcast_grp->mcast_grp_head, list) { + struct nix_mce_list *mce_list; + struct mce *mce; + + /* Iterate the group elements and disable the element which + * received the disable request. + */ + mce_list = &elem->mcast_mce_list; + hlist_for_each_entry(mce, &mce_list->head, node) { + if (mce->pcifunc == pcifunc) { + mce->is_active = is_active; + break; + } + } + + /* Dump the updated list to HW */ + if (elem->dir == NIX_MCAST_INGRESS) + nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); + else + nix_update_egress_mce_list_hw(rvu, nix_hw, elem); + + /* Update the multicast index in NPC rule */ + nix_mcast_update_action(rvu, elem); + } + mutex_unlock(&mcast_grp->mcast_grp_lock); +} + int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { @@ -4709,6 +5033,9 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, if (err) return err; + /* Enable the interface if it is in any multicast list */ + nix_mcast_update_mce_entry(rvu, pcifunc, 1); + rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); npc_mcam_enable_flows(rvu, pcifunc); @@ -4733,6 +5060,9 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, return err; rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); + /* Disable the interface if it is in any multicast list */ + nix_mcast_update_mce_entry(rvu, pcifunc, 0); + pfvf = rvu_get_pfvf(rvu, pcifunc); clear_bit(NIXLF_INITIALIZED, &pfvf->flags); @@ -5707,3 +6037,361 @@ int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *re return 0; } + +static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp *mcast_grp, + u32 mcast_grp_idx) +{ + struct nix_mcast_grp_elem *iter; + bool is_found = false; + + list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) { + if (iter->mcast_grp_idx == mcast_grp_idx) { + is_found = true; + break; + } + } + + if (is_found) + return iter; + + return NULL; +} + +int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx) +{ + struct nix_mcast_grp_elem *elem; + struct nix_mcast_grp *mcast_grp; + struct nix_hw *nix_hw; + int blkaddr, ret; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + mcast_grp = &nix_hw->mcast_grp; + mutex_lock(&mcast_grp->mcast_grp_lock); + elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx); + if (!elem) + ret = NIX_AF_ERR_INVALID_MCAST_GRP; + else + ret = elem->mce_start_index; + + mutex_unlock(&mcast_grp->mcast_grp_lock); + return ret; +} + +void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc) +{ + struct nix_mcast_grp_destroy_req dreq = { 0 }; + struct nix_mcast_grp_update_req ureq = { 0 }; + struct nix_mcast_grp_update_rsp ursp = { 0 }; + struct nix_mcast_grp_elem *elem, *tmp; + struct nix_mcast_grp *mcast_grp; + struct nix_hw *nix_hw; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return; + + mcast_grp = &nix_hw->mcast_grp; + + mutex_lock(&mcast_grp->mcast_grp_lock); + list_for_each_entry_safe(elem, tmp, &mcast_grp->mcast_grp_head, list) { + struct nix_mce_list *mce_list; + struct hlist_node *tmp; + struct mce *mce; + + /* If the pcifunc which created the multicast/mirror + * group received an FLR, then delete the entire group. + */ + if (elem->pcifunc == pcifunc) { + /* Delete group */ + dreq.hdr.pcifunc = elem->pcifunc; + dreq.mcast_grp_idx = elem->mcast_grp_idx; + dreq.is_af = 1; + rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL); + continue; + } + + /* Iterate the group elements and delete the element which + * received the FLR. + */ + mce_list = &elem->mcast_mce_list; + hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { + if (mce->pcifunc == pcifunc) { + ureq.hdr.pcifunc = pcifunc; + ureq.num_mce_entry = 1; + ureq.mcast_grp_idx = elem->mcast_grp_idx; + ureq.op = NIX_MCAST_OP_DEL_ENTRY; + ureq.pcifunc[0] = pcifunc; + ureq.is_af = 1; + rvu_mbox_handler_nix_mcast_grp_update(rvu, &ureq, &ursp); + break; + } + } + } + mutex_unlock(&mcast_grp->mcast_grp_lock); +} + +int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc, + u32 mcast_grp_idx, u16 mcam_index) +{ + struct nix_mcast_grp_elem *elem; + struct nix_mcast_grp *mcast_grp; + struct nix_hw *nix_hw; + int blkaddr, ret = 0; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + mcast_grp = &nix_hw->mcast_grp; + mutex_lock(&mcast_grp->mcast_grp_lock); + elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx); + if (!elem) + ret = NIX_AF_ERR_INVALID_MCAST_GRP; + else + elem->mcam_index = mcam_index; + + mutex_unlock(&mcast_grp->mcast_grp_lock); + return ret; +} + +int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu, + struct nix_mcast_grp_create_req *req, + struct nix_mcast_grp_create_rsp *rsp) +{ + struct nix_mcast_grp_elem *elem; + struct nix_mcast_grp *mcast_grp; + struct nix_hw *nix_hw; + int blkaddr, err; + + err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + mcast_grp = &nix_hw->mcast_grp; + elem = kzalloc(sizeof(*elem), GFP_KERNEL); + if (!elem) + return -ENOMEM; + + INIT_HLIST_HEAD(&elem->mcast_mce_list.head); + elem->mcam_index = -1; + elem->mce_start_index = -1; + elem->pcifunc = req->hdr.pcifunc; + elem->dir = req->dir; + elem->mcast_grp_idx = mcast_grp->next_grp_index++; + + mutex_lock(&mcast_grp->mcast_grp_lock); + list_add_tail(&elem->list, &mcast_grp->mcast_grp_head); + mcast_grp->count++; + mutex_unlock(&mcast_grp->mcast_grp_lock); + + rsp->mcast_grp_idx = elem->mcast_grp_idx; + return 0; +} + +int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu, + struct nix_mcast_grp_destroy_req *req, + struct msg_rsp *rsp) +{ + struct npc_delete_flow_req uninstall_req = { 0 }; + struct npc_delete_flow_rsp uninstall_rsp = { 0 }; + struct nix_mcast_grp_elem *elem; + struct nix_mcast_grp *mcast_grp; + int blkaddr, err, ret = 0; + struct nix_mcast *mcast; + struct nix_hw *nix_hw; + + err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + mcast_grp = &nix_hw->mcast_grp; + + /* If AF is requesting for the deletion, + * then AF is already taking the lock + */ + if (!req->is_af) + mutex_lock(&mcast_grp->mcast_grp_lock); + + elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx); + if (!elem) { + ret = NIX_AF_ERR_INVALID_MCAST_GRP; + goto unlock_grp; + } + + /* If no mce entries are associated with the group + * then just remove it from the global list. + */ + if (!elem->mcast_mce_list.count) + goto delete_grp; + + /* Delete the associated mcam entry and + * remove all mce entries from the group + */ + mcast = &nix_hw->mcast; + mutex_lock(&mcast->mce_lock); + if (elem->mcam_index != -1) { + uninstall_req.hdr.pcifunc = req->hdr.pcifunc; + uninstall_req.entry = elem->mcam_index; + rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp); + } + + nix_free_mce_list(mcast, elem->mcast_mce_list.count, + elem->mce_start_index, elem->dir); + nix_delete_mcast_mce_list(&elem->mcast_mce_list); + mutex_unlock(&mcast->mce_lock); + +delete_grp: + list_del(&elem->list); + kfree(elem); + mcast_grp->count--; + +unlock_grp: + if (!req->is_af) + mutex_unlock(&mcast_grp->mcast_grp_lock); + + return ret; +} + +int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu, + struct nix_mcast_grp_update_req *req, + struct nix_mcast_grp_update_rsp *rsp) +{ + struct nix_mcast_grp_destroy_req dreq = { 0 }; + struct npc_mcam *mcam = &rvu->hw->mcam; + struct nix_mcast_grp_elem *elem; + struct nix_mcast_grp *mcast_grp; + int blkaddr, err, npc_blkaddr; + u16 prev_count, new_count; + struct nix_mcast *mcast; + struct nix_hw *nix_hw; + int i, ret; + + if (!req->num_mce_entry) + return 0; + + err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + mcast_grp = &nix_hw->mcast_grp; + + /* If AF is requesting for the updation, + * then AF is already taking the lock + */ + if (!req->is_af) + mutex_lock(&mcast_grp->mcast_grp_lock); + + elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx); + if (!elem) { + ret = NIX_AF_ERR_INVALID_MCAST_GRP; + goto unlock_grp; + } + + /* If any pcifunc matches the group's pcifunc, then we can + * delete the entire group. + */ + if (req->op == NIX_MCAST_OP_DEL_ENTRY) { + for (i = 0; i < req->num_mce_entry; i++) { + if (elem->pcifunc == req->pcifunc[i]) { + /* Delete group */ + dreq.hdr.pcifunc = elem->pcifunc; + dreq.mcast_grp_idx = elem->mcast_grp_idx; + dreq.is_af = 1; + rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL); + ret = 0; + goto unlock_grp; + } + } + } + + mcast = &nix_hw->mcast; + mutex_lock(&mcast->mce_lock); + npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (elem->mcam_index != -1) + npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, false); + + prev_count = elem->mcast_mce_list.count; + if (req->op == NIX_MCAST_OP_ADD_ENTRY) { + new_count = prev_count + req->num_mce_entry; + if (prev_count) + nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir); + + elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir); + + /* It is possible not to get contiguous memory */ + if (elem->mce_start_index < 0) { + if (elem->mcam_index != -1) { + npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, + elem->mcam_index, true); + ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST; + goto unlock_mce; + } + } + + ret = nix_add_mce_list_entry(rvu, nix_hw, elem, req); + if (ret) { + nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir); + if (prev_count) + elem->mce_start_index = nix_alloc_mce_list(mcast, + prev_count, + elem->dir); + + if (elem->mcam_index != -1) + npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, + elem->mcam_index, true); + + goto unlock_mce; + } + } else { + if (!prev_count || prev_count < req->num_mce_entry) { + if (elem->mcam_index != -1) + npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, + elem->mcam_index, true); + ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ; + goto unlock_mce; + } + + nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir); + new_count = prev_count - req->num_mce_entry; + elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir); + ret = nix_del_mce_list_entry(rvu, nix_hw, elem, req); + if (ret) { + nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir); + elem->mce_start_index = nix_alloc_mce_list(mcast, prev_count, elem->dir); + if (elem->mcam_index != -1) + npc_enable_mcam_entry(rvu, mcam, + npc_blkaddr, + elem->mcam_index, + true); + + goto unlock_mce; + } + } + + if (elem->mcam_index == -1) { + rsp->mce_start_index = elem->mce_start_index; + ret = 0; + goto unlock_mce; + } + + nix_mcast_update_action(rvu, elem); + npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true); + rsp->mce_start_index = elem->mce_start_index; + ret = 0; + +unlock_mce: + mutex_unlock(&mcast->mce_lock); + +unlock_grp: + if (!req->is_af) + mutex_unlock(&mcast_grp->mcast_grp_lock); + + return ret; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 55639c133d..d94b7b88e1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -61,28 +61,6 @@ int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena) return 0; } -static int npc_mcam_verify_pf_func(struct rvu *rvu, - struct mcam_entry *entry_data, u8 intf, - u16 pcifunc) -{ - u16 pf_func, pf_func_mask; - - if (is_npc_intf_rx(intf)) - return 0; - - pf_func_mask = (entry_data->kw_mask[0] >> 32) & - NPC_KEX_PF_FUNC_MASK; - pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK; - - pf_func = be16_to_cpu((__force __be16)pf_func); - if (pf_func_mask != NPC_KEX_PF_FUNC_MASK || - ((pf_func & ~RVU_PFVF_FUNC_MASK) != - (pcifunc & ~RVU_PFVF_FUNC_MASK))) - return -EINVAL; - - return 0; -} - void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) { int blkaddr; @@ -599,8 +577,8 @@ static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg); } -static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, - int blkaddr, int index) +u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index) { int bank = npc_get_bank(mcam, index); @@ -609,6 +587,16 @@ static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); } +void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index, u64 cfg) +{ + int bank = npc_get_bank(mcam, index); + + index &= (mcam->banksize - 1); + return rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index, bank), cfg); +} + void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan, u8 *mac_addr) { @@ -1669,7 +1657,7 @@ static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz, struct npc_coalesced_kpu_prfl *img_data = NULL; int i = 0, rc = -EINVAL; void __iomem *kpu_prfl_addr; - u16 offset; + u32 offset; img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr; if (le64_to_cpu(img_data->signature) == KPU_SIGN && @@ -1840,7 +1828,21 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr) npc_program_kpu_profile(rvu, blkaddr, idx, &rvu->kpu.kpu[idx]); } -static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) +void npc_mcam_rsrcs_deinit(struct rvu *rvu) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + + bitmap_free(mcam->bmap); + bitmap_free(mcam->bmap_reverse); + kfree(mcam->entry2pfvf_map); + kfree(mcam->cntr2pfvf_map); + kfree(mcam->entry2cntr_map); + kfree(mcam->cntr_refcnt); + kfree(mcam->entry2target_pffunc); + kfree(mcam->counters.bmap); +} + +int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) { int nixlf_count = rvu_get_nixlf_count(rvu); struct npc_mcam *mcam = &rvu->hw->mcam; @@ -1884,24 +1886,22 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) mcam->pf_offset = mcam->nixlf_offset + nixlf_count; /* Allocate bitmaps for managing MCAM entries */ - mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries), - sizeof(long), GFP_KERNEL); + mcam->bmap = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL); if (!mcam->bmap) return -ENOMEM; - mcam->bmap_reverse = devm_kcalloc(rvu->dev, - BITS_TO_LONGS(mcam->bmap_entries), - sizeof(long), GFP_KERNEL); + mcam->bmap_reverse = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL); if (!mcam->bmap_reverse) - return -ENOMEM; + goto free_bmap; mcam->bmap_fcnt = mcam->bmap_entries; /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ - mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, - sizeof(u16), GFP_KERNEL); + mcam->entry2pfvf_map = kcalloc(mcam->bmap_entries, sizeof(u16), + GFP_KERNEL); + if (!mcam->entry2pfvf_map) - return -ENOMEM; + goto free_bmap_reverse; /* Reserve 1/8th of MCAM entries at the bottom for low priority * allocations and another 1/8th at the top for high priority @@ -1920,31 +1920,31 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) */ err = rvu_alloc_bitmap(&mcam->counters); if (err) - return err; + goto free_entry_map; - mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max, - sizeof(u16), GFP_KERNEL); + mcam->cntr2pfvf_map = kcalloc(mcam->counters.max, sizeof(u16), + GFP_KERNEL); if (!mcam->cntr2pfvf_map) - goto free_mem; + goto free_cntr_bmap; /* Alloc memory for MCAM entry to counter mapping and for tracking * counter's reference count. */ - mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, - sizeof(u16), GFP_KERNEL); + mcam->entry2cntr_map = kcalloc(mcam->bmap_entries, sizeof(u16), + GFP_KERNEL); if (!mcam->entry2cntr_map) - goto free_mem; + goto free_cntr_map; - mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max, - sizeof(u16), GFP_KERNEL); + mcam->cntr_refcnt = kcalloc(mcam->counters.max, sizeof(u16), + GFP_KERNEL); if (!mcam->cntr_refcnt) - goto free_mem; + goto free_entry_cntr_map; /* Alloc memory for saving target device of mcam rule */ - mcam->entry2target_pffunc = devm_kcalloc(rvu->dev, mcam->total_entries, - sizeof(u16), GFP_KERNEL); + mcam->entry2target_pffunc = kmalloc_array(mcam->total_entries, + sizeof(u16), GFP_KERNEL); if (!mcam->entry2target_pffunc) - goto free_mem; + goto free_cntr_refcnt; for (index = 0; index < mcam->bmap_entries; index++) { mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; @@ -1958,8 +1958,21 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) return 0; -free_mem: +free_cntr_refcnt: + kfree(mcam->cntr_refcnt); +free_entry_cntr_map: + kfree(mcam->entry2cntr_map); +free_cntr_map: + kfree(mcam->cntr2pfvf_map); +free_cntr_bmap: kfree(mcam->counters.bmap); +free_entry_map: + kfree(mcam->entry2pfvf_map); +free_bmap_reverse: + bitmap_free(mcam->bmap_reverse); +free_bmap: + bitmap_free(mcam->bmap); + return -ENOMEM; } @@ -2167,7 +2180,7 @@ void rvu_npc_freemem(struct rvu *rvu) struct npc_mcam *mcam = &rvu->hw->mcam; kfree(pkind->rsrc.bmap); - kfree(mcam->counters.bmap); + npc_mcam_rsrcs_deinit(rvu); if (rvu->kpu_prfl_addr) iounmap(rvu->kpu_prfl_addr); else @@ -2819,12 +2832,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, else nix_intf = pfvf->nix_rx_intf; - if (!is_pffunc_af(pcifunc) && - npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) { - rc = NPC_MCAM_INVALID_REQ; - goto exit; - } - /* For AF installed rules, the nix_intf should be set to target NIX */ if (is_pffunc_af(req->hdr.pcifunc)) nix_intf = req->intf; @@ -3176,10 +3183,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, if (!is_npc_interface_valid(rvu, req->intf)) return NPC_MCAM_INVALID_REQ; - if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, - req->hdr.pcifunc)) - return NPC_MCAM_INVALID_REQ; - /* Try to allocate a MCAM entry */ entry_req.hdr.pcifunc = req->hdr.pcifunc; entry_req.contig = true; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 114e4ec218..c75669c8fd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -51,6 +51,8 @@ static const char * const npc_flow_names[] = { [NPC_MPLS3_TTL] = "lse depth 3 ttl", [NPC_MPLS4_LBTCBOS] = "lse depth 4 label tc bos", [NPC_MPLS4_TTL] = "lse depth 4", + [NPC_TYPE_ICMP] = "icmp type", + [NPC_CODE_ICMP] = "icmp code", [NPC_UNKNOWN] = "unknown", }; @@ -526,6 +528,8 @@ do { \ NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2); NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2); NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2); + NPC_SCAN_HDR(NPC_TYPE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 0, 1); + NPC_SCAN_HDR(NPC_CODE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 1, 1); NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2); NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2); NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2); @@ -555,7 +559,7 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) { struct npc_mcam *mcam = &rvu->hw->mcam; u64 *features = &mcam->rx_features; - u64 tcp_udp_sctp; + u64 proto_flags; int hdr; if (is_npc_intf_tx(intf)) @@ -566,18 +570,21 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) *features |= BIT_ULL(hdr); } - tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) | + proto_flags = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) | BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) | - BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP); + BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) | + BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) | + BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP); /* for tcp/udp/sctp corresponding layer type should be in the key */ - if (*features & tcp_udp_sctp) { + if (*features & proto_flags) { if (!npc_check_field(rvu, blkaddr, NPC_LD, intf)) - *features &= ~tcp_udp_sctp; + *features &= ~proto_flags; else *features |= BIT_ULL(NPC_IPPROTO_TCP) | BIT_ULL(NPC_IPPROTO_UDP) | - BIT_ULL(NPC_IPPROTO_SCTP); + BIT_ULL(NPC_IPPROTO_SCTP) | + BIT_ULL(NPC_IPPROTO_ICMP); } /* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */ @@ -971,6 +978,10 @@ do { \ ntohs(mask->sport), 0); NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0, ntohs(mask->dport), 0); + NPC_WRITE_FLOW(NPC_TYPE_ICMP, icmp_type, pkt->icmp_type, 0, + mask->icmp_type, 0); + NPC_WRITE_FLOW(NPC_CODE_ICMP, icmp_code, pkt->icmp_code, 0, + mask->icmp_code, 0); NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0, ntohl(mask->spi), 0); @@ -1106,13 +1117,40 @@ static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, } } -static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, - struct mcam_entry *entry, - struct npc_install_flow_req *req, - u16 target, bool pf_set_vfs_mac) +static int npc_mcast_update_action_index(struct rvu *rvu, struct npc_install_flow_req *req, + u64 op, void *action) +{ + int mce_index; + + /* If a PF/VF is installing a multicast rule then it is expected + * that the PF/VF should have created a group for the multicast/mirror + * list. Otherwise reject the configuration. + * During this scenario, req->index is set as multicast/mirror + * group index. + */ + if (req->hdr.pcifunc && + (op == NIX_RX_ACTIONOP_MCAST || op == NIX_TX_ACTIONOP_MCAST)) { + mce_index = rvu_nix_mcast_get_mce_index(rvu, req->hdr.pcifunc, req->index); + if (mce_index < 0) + return mce_index; + + if (op == NIX_RX_ACTIONOP_MCAST) + ((struct nix_rx_action *)action)->index = mce_index; + else + ((struct nix_tx_action *)action)->index = mce_index; + } + + return 0; +} + +static int npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct mcam_entry *entry, + struct npc_install_flow_req *req, + u16 target, bool pf_set_vfs_mac) { struct rvu_switch *rswitch = &rvu->rswitch; struct nix_rx_action action; + int ret; if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac) req->chan_mask = 0x0; /* Do not care channel */ @@ -1124,6 +1162,11 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, action.pf_func = target; action.op = req->op; action.index = req->index; + + ret = npc_mcast_update_action_index(rvu, req, action.op, (void *)&action); + if (ret) + return ret; + action.match_id = req->match_id; action.flow_key_alg = req->flow_key_alg; @@ -1155,14 +1198,17 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) | FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) | FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4); + + return 0; } -static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, - struct mcam_entry *entry, - struct npc_install_flow_req *req, u16 target) +static int npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct mcam_entry *entry, + struct npc_install_flow_req *req, u16 target) { struct nix_tx_action action; u64 mask = ~0ULL; + int ret; /* If AF is installing then do not care about * PF_FUNC in Send Descriptor @@ -1176,6 +1222,11 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, *(u64 *)&action = 0x00; action.op = req->op; action.index = req->index; + + ret = npc_mcast_update_action_index(rvu, req, action.op, (void *)&action); + if (ret) + return ret; + action.match_id = req->match_id; entry->action = *(u64 *)&action; @@ -1191,6 +1242,8 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) | FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) | FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24); + + return 0; } static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, @@ -1220,10 +1273,15 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy, req->intf, blkaddr); - if (is_npc_intf_rx(req->intf)) - npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac); - else - npc_update_tx_entry(rvu, pfvf, entry, req, target); + if (is_npc_intf_rx(req->intf)) { + err = npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac); + if (err) + return err; + } else { + err = npc_update_tx_entry(rvu, pfvf, entry, req, target); + if (err) + return err; + } /* Default unicast rules do not exist for TX */ if (is_npc_intf_tx(req->intf)) @@ -1340,6 +1398,10 @@ find_rule: return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc, req->index, req->match_id); + if (owner && req->op == NIX_RX_ACTIONOP_MCAST) + return rvu_nix_mcast_update_mcam_entry(rvu, req->hdr.pcifunc, + req->index, entry_index); + return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 18c1c9f361..6f73ad9807 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -734,5 +734,7 @@ #define APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT 23 #define APR_LMT_MAP_ENT_SCH_ENA_SHIFT 22 #define APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT 21 +#define LMTST_THROTTLE_MASK GENMASK_ULL(38, 35) +#define LMTST_WR_PEND_MAX 15 #endif /* RVU_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index edc9367b1b..5ef406c7e8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -340,11 +340,12 @@ struct nix_aq_res_s { /* NIX Completion queue context structure */ struct nix_cq_ctx_s { u64 base; - u64 rsvd_64_67 : 4; + u64 lbp_ena : 1; + u64 lbpid_low : 3; u64 bp_ena : 1; - u64 rsvd_69_71 : 3; + u64 lbpid_med : 3; u64 bpid : 9; - u64 rsvd_81_83 : 3; + u64 lbpid_high : 3; u64 qint_idx : 7; u64 cq_err : 1; u64 cint_idx : 7; @@ -358,10 +359,14 @@ struct nix_cq_ctx_s { u64 drop : 8; u64 drop_ena : 1; u64 ena : 1; - u64 rsvd_210_211 : 2; - u64 substream : 20; + u64 cpt_drop_err_en : 1; + u64 rsvd_211 : 1; + u64 substream : 12; + u64 stash_thresh : 4; + u64 lbp_frac : 4; u64 caching : 1; - u64 rsvd_233_235 : 3; + u64 stashing : 1; + u64 rsvd_234_235 : 2; u64 qsize : 4; u64 cq_err_int : 8; u64 cq_err_int_ena : 8; |